1 From 936d5f485f2ff837cdd7d49839771bd3367e8b92 Mon Sep 17 00:00:00 2001
2 From: Biwen Li <biwen.li@nxp.com>
3 Date: Tue, 30 Oct 2018 18:28:03 +0800
4 Subject: [PATCH 37/40] sec: support layerscape
5 This is an integrated patch of sec for layerscape
7 Signed-off-by: Alex Porosanu <alexandru.porosanu@nxp.com>
8 Signed-off-by: Cristian Stoica <cristian.stoica@nxp.com>
9 Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
10 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
11 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
12 Signed-off-by: Horia Geantă horia.geanta@nxp.com
13 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
14 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
15 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
16 Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
17 Signed-off-by: Biwen Li <biwen.li@nxp.com>
21 crypto/tcrypt.c | 27 +-
22 crypto/testmgr.c | 244 ++
23 crypto/testmgr.h | 219 ++
24 crypto/tls.c | 607 +++
25 drivers/crypto/Makefile | 2 +-
26 drivers/crypto/caam/Kconfig | 57 +-
27 drivers/crypto/caam/Makefile | 10 +-
28 drivers/crypto/caam/caamalg.c | 131 +-
29 drivers/crypto/caam/caamalg_desc.c | 761 +++-
30 drivers/crypto/caam/caamalg_desc.h | 47 +-
31 drivers/crypto/caam/caamalg_qi.c | 927 ++++-
32 drivers/crypto/caam/caamalg_qi2.c | 5691 +++++++++++++++++++++++++++
33 drivers/crypto/caam/caamalg_qi2.h | 274 ++
34 drivers/crypto/caam/caamhash.c | 132 +-
35 drivers/crypto/caam/caamhash_desc.c | 108 +
36 drivers/crypto/caam/caamhash_desc.h | 49 +
37 drivers/crypto/caam/compat.h | 2 +
38 drivers/crypto/caam/ctrl.c | 23 +-
39 drivers/crypto/caam/desc.h | 62 +-
40 drivers/crypto/caam/desc_constr.h | 52 +-
41 drivers/crypto/caam/dpseci.c | 865 ++++
42 drivers/crypto/caam/dpseci.h | 433 ++
43 drivers/crypto/caam/dpseci_cmd.h | 287 ++
44 drivers/crypto/caam/error.c | 75 +-
45 drivers/crypto/caam/error.h | 6 +-
46 drivers/crypto/caam/intern.h | 1 +
47 drivers/crypto/caam/jr.c | 42 +
48 drivers/crypto/caam/jr.h | 2 +
49 drivers/crypto/caam/key_gen.c | 30 -
50 drivers/crypto/caam/key_gen.h | 30 +
51 drivers/crypto/caam/qi.c | 85 +-
52 drivers/crypto/caam/qi.h | 2 +-
53 drivers/crypto/caam/regs.h | 2 +
54 drivers/crypto/caam/sg_sw_qm.h | 46 +-
55 drivers/crypto/talitos.c | 8 +
56 37 files changed, 11006 insertions(+), 354 deletions(-)
57 create mode 100644 crypto/tls.c
58 create mode 100644 drivers/crypto/caam/caamalg_qi2.c
59 create mode 100644 drivers/crypto/caam/caamalg_qi2.h
60 create mode 100644 drivers/crypto/caam/caamhash_desc.c
61 create mode 100644 drivers/crypto/caam/caamhash_desc.h
62 create mode 100644 drivers/crypto/caam/dpseci.c
63 create mode 100644 drivers/crypto/caam/dpseci.h
64 create mode 100644 drivers/crypto/caam/dpseci_cmd.h
68 @@ -312,6 +312,26 @@ config CRYPTO_ECHAINIV
69 a sequence number xored with a salt. This is the default
73 + tristate "TLS support"
75 + select CRYPTO_BLKCIPHER
76 + select CRYPTO_MANAGER
79 + select CRYPTO_AUTHENC
81 + Support for TLS 1.0 record encryption and decryption
83 + This module adds support for encryption/decryption of TLS 1.0 frames
84 + using blockcipher algorithms. The name of the resulting algorithm is
85 + "tls10(hmac(<digest>),cbc(<cipher>))". By default, the generic base
86 + algorithms are used (e.g. aes-generic, sha1-generic), but hardware
87 + accelerated versions will be used automatically if available.
89 + User-space applications (OpenSSL, GnuTLS) can offload TLS 1.0
90 + operations through AF_ALG or cryptodev interfaces
97 @@ -118,6 +118,7 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_ge
98 obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
99 obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
100 obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
101 +obj-$(CONFIG_CRYPTO_TLS) += tls.o
102 obj-$(CONFIG_CRYPTO_LZO) += lzo.o
103 obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
104 obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
105 --- a/crypto/tcrypt.c
106 +++ b/crypto/tcrypt.c
107 @@ -76,7 +76,7 @@ static char *check[] = {
108 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
109 "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
110 "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
115 struct tcrypt_result {
116 @@ -355,11 +355,13 @@ static void test_aead_speed(const char *
118 aead_request_set_ad(req, aad_size);
122 ret = test_aead_jiffies(req, enc, *b_size,
127 ret = test_aead_cycles(req, enc, *b_size);
131 pr_err("%s() failed return code=%d\n", e, ret);
132 @@ -736,12 +738,14 @@ static void test_ahash_speed_common(cons
134 ahash_request_set_crypt(req, sg, output, speed[i].plen);
138 ret = test_ahash_jiffies(req, speed[i].blen,
139 speed[i].plen, output, secs);
143 ret = test_ahash_cycles(req, speed[i].blen,
144 speed[i].plen, output);
148 pr_err("hashing failed ret=%d\n", ret);
149 @@ -959,12 +963,14 @@ static void test_skcipher_speed(const ch
151 skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
155 ret = test_acipher_jiffies(req, enc,
160 ret = test_acipher_cycles(req, enc,
165 pr_err("%s() failed flags=%x\n", e,
166 @@ -1336,6 +1342,10 @@ static int do_test(const char *alg, u32
167 ret += tcrypt_test("hmac(sha3-512)");
171 + ret += tcrypt_test("rsa");
175 ret += tcrypt_test("ansi_cprng");
177 @@ -1397,6 +1407,9 @@ static int do_test(const char *alg, u32
179 ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
182 + ret += tcrypt_test("tls10(hmac(sha1),cbc(aes))");
185 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
186 speed_template_16_24_32);
187 --- a/crypto/testmgr.c
188 +++ b/crypto/testmgr.c
189 @@ -117,6 +117,13 @@ struct drbg_test_suite {
193 +struct tls_test_suite {
195 + struct tls_testvec *vecs;
196 + unsigned int count;
200 struct akcipher_test_suite {
201 const struct akcipher_testvec *vecs;
203 @@ -140,6 +147,7 @@ struct alg_test_desc {
204 struct hash_test_suite hash;
205 struct cprng_test_suite cprng;
206 struct drbg_test_suite drbg;
207 + struct tls_test_suite tls;
208 struct akcipher_test_suite akcipher;
209 struct kpp_test_suite kpp;
211 @@ -991,6 +999,233 @@ static int test_aead(struct crypto_aead
215 +static int __test_tls(struct crypto_aead *tfm, int enc,
216 + struct tls_testvec *template, unsigned int tcount,
217 + const bool diff_dst)
219 + const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
220 + unsigned int i, k, authsize;
222 + struct aead_request *req;
223 + struct scatterlist *sg;
224 + struct scatterlist *sgout;
226 + struct tcrypt_result result;
232 + char *xbuf[XBUFSIZE];
233 + char *xoutbuf[XBUFSIZE];
234 + char *axbuf[XBUFSIZE];
237 + if (testmgr_alloc_buf(xbuf))
240 + if (diff_dst && testmgr_alloc_buf(xoutbuf))
243 + if (testmgr_alloc_buf(axbuf))
246 + iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
250 + key = kzalloc(MAX_KEYLEN, GFP_KERNEL);
254 + sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 2 : 1), GFP_KERNEL);
260 + d = diff_dst ? "-ddst" : "";
261 + e = enc ? "encryption" : "decryption";
263 + init_completion(&result.completion);
265 + req = aead_request_alloc(tfm, GFP_KERNEL);
267 + pr_err("alg: tls%s: Failed to allocate request for %s\n",
272 + aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
273 + tcrypt_complete, &result);
275 + for (i = 0; i < tcount; i++) {
280 + if (WARN_ON(template[i].ilen > PAGE_SIZE ||
281 + template[i].alen > PAGE_SIZE))
284 + memcpy(assoc, template[i].assoc, template[i].alen);
285 + memcpy(input, template[i].input, template[i].ilen);
287 + if (template[i].iv)
288 + memcpy(iv, template[i].iv, MAX_IVLEN);
290 + memset(iv, 0, MAX_IVLEN);
292 + crypto_aead_clear_flags(tfm, ~0);
294 + if (template[i].klen > MAX_KEYLEN) {
295 + pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
296 + d, i, algo, template[i].klen, MAX_KEYLEN);
300 + memcpy(key, template[i].key, template[i].klen);
302 + ret = crypto_aead_setkey(tfm, key, template[i].klen);
303 + if (!ret == template[i].fail) {
304 + pr_err("alg: tls%s: setkey failed on test %d for %s: flags=%x\n",
305 + d, i, algo, crypto_aead_get_flags(tfm));
311 + ret = crypto_aead_setauthsize(tfm, authsize);
313 + pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n",
314 + d, authsize, i, algo);
318 + k = !!template[i].alen;
319 + sg_init_table(sg, k + 1);
320 + sg_set_buf(&sg[0], assoc, template[i].alen);
321 + sg_set_buf(&sg[k], input, (enc ? template[i].rlen :
322 + template[i].ilen));
326 + sg_init_table(sgout, k + 1);
327 + sg_set_buf(&sgout[0], assoc, template[i].alen);
329 + output = xoutbuf[0];
330 + sg_set_buf(&sgout[k], output,
331 + (enc ? template[i].rlen : template[i].ilen));
334 + aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
335 + template[i].ilen, iv);
337 + aead_request_set_ad(req, template[i].alen);
339 + ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
343 + if (template[i].novrfy) {
344 + /* verification was supposed to fail */
345 + pr_err("alg: tls%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
347 + /* so really, we got a bad message */
354 + wait_for_completion(&result.completion);
355 + reinit_completion(&result.completion);
360 + /* verification failure was expected */
361 + if (template[i].novrfy)
365 + pr_err("alg: tls%s: %s failed on test %d for %s: ret=%d\n",
366 + d, e, i, algo, -ret);
371 + if (memcmp(q, template[i].result, template[i].rlen)) {
372 + pr_err("alg: tls%s: Test %d failed on %s for %s\n",
374 + hexdump(q, template[i].rlen);
375 + pr_err("should be:\n");
376 + hexdump(template[i].result, template[i].rlen);
383 + aead_request_free(req);
391 + testmgr_free_buf(axbuf);
394 + testmgr_free_buf(xoutbuf);
396 + testmgr_free_buf(xbuf);
401 +static int test_tls(struct crypto_aead *tfm, int enc,
402 + struct tls_testvec *template, unsigned int tcount)
405 + /* test 'dst == src' case */
406 + ret = __test_tls(tfm, enc, template, tcount, false);
409 + /* test 'dst != src' case */
410 + return __test_tls(tfm, enc, template, tcount, true);
413 +static int alg_test_tls(const struct alg_test_desc *desc, const char *driver,
414 + u32 type, u32 mask)
416 + struct crypto_aead *tfm;
419 + tfm = crypto_alloc_aead(driver, type, mask);
421 + pr_err("alg: aead: Failed to load transform for %s: %ld\n",
422 + driver, PTR_ERR(tfm));
423 + return PTR_ERR(tfm);
426 + if (desc->suite.tls.enc.vecs) {
427 + err = test_tls(tfm, ENCRYPT, desc->suite.tls.enc.vecs,
428 + desc->suite.tls.enc.count);
433 + if (!err && desc->suite.tls.dec.vecs)
434 + err = test_tls(tfm, DECRYPT, desc->suite.tls.dec.vecs,
435 + desc->suite.tls.dec.count);
438 + crypto_free_aead(tfm);
442 static int test_cipher(struct crypto_cipher *tfm, int enc,
443 const struct cipher_testvec *template,
445 @@ -3524,6 +3759,15 @@ static const struct alg_test_desc alg_te
446 .hash = __VECS(tgr192_tv_template)
449 + .alg = "tls10(hmac(sha1),cbc(aes))",
450 + .test = alg_test_tls,
453 + .enc = __VECS(tls_enc_tv_template),
454 + .dec = __VECS(tls_dec_tv_template)
459 .test = alg_test_hash,
461 --- a/crypto/testmgr.h
462 +++ b/crypto/testmgr.h
463 @@ -125,6 +125,20 @@ struct drbg_testvec {
467 +struct tls_testvec {
468 + char *key; /* wrapped keys for encryption and authentication */
469 + char *iv; /* initialization vector */
470 + char *input; /* input data */
471 + char *assoc; /* associated data: seq num, type, version, input len */
472 + char *result; /* result data */
473 + unsigned char fail; /* the test failure is expected */
474 + unsigned char novrfy; /* dec verification failure expected */
475 + unsigned char klen; /* key length */
476 + unsigned short ilen; /* input data length */
477 + unsigned short alen; /* associated data length */
478 + unsigned short rlen; /* result length */
481 struct akcipher_testvec {
482 const unsigned char *key;
483 const unsigned char *m;
484 @@ -153,6 +167,211 @@ struct kpp_testvec {
485 static const char zeroed_string[48];
488 + * TLS1.0 synthetic test vectors
490 +static struct tls_testvec tls_enc_tv_template[] = {
492 +#ifdef __LITTLE_ENDIAN
493 + .key = "\x08\x00" /* rta length */
494 + "\x01\x00" /* rta type */
496 + .key = "\x00\x08" /* rta length */
497 + "\x00\x01" /* rta type */
499 + "\x00\x00\x00\x10" /* enc key length */
500 + "authenticationkey20benckeyis16_bytes",
501 + .klen = 8 + 20 + 16,
502 + .iv = "iv0123456789abcd",
503 + .input = "Single block msg",
505 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
506 + "\x00\x03\x01\x00\x10",
508 + .result = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
509 + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
510 + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
511 + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
512 + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
513 + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
514 + .rlen = 16 + 20 + 12,
516 +#ifdef __LITTLE_ENDIAN
517 + .key = "\x08\x00" /* rta length */
518 + "\x01\x00" /* rta type */
520 + .key = "\x00\x08" /* rta length */
521 + "\x00\x01" /* rta type */
523 + "\x00\x00\x00\x10" /* enc key length */
524 + "authenticationkey20benckeyis16_bytes",
525 + .klen = 8 + 20 + 16,
526 + .iv = "iv0123456789abcd",
529 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
530 + "\x00\x03\x01\x00\x00",
532 + .result = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
533 + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
534 + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
535 + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
538 +#ifdef __LITTLE_ENDIAN
539 + .key = "\x08\x00" /* rta length */
540 + "\x01\x00" /* rta type */
542 + .key = "\x00\x08" /* rta length */
543 + "\x00\x01" /* rta type */
545 + "\x00\x00\x00\x10" /* enc key length */
546 + "authenticationkey20benckeyis16_bytes",
547 + .klen = 8 + 20 + 16,
548 + .iv = "iv0123456789abcd",
549 + .input = "285 bytes plaintext285 bytes plaintext285 bytes"
550 + " plaintext285 bytes plaintext285 bytes plaintext285"
551 + " bytes plaintext285 bytes plaintext285 bytes"
552 + " plaintext285 bytes plaintext285 bytes plaintext285"
553 + " bytes plaintext285 bytes plaintext285 bytes"
554 + " plaintext285 bytes plaintext285 bytes plaintext285"
555 + " bytes plaintext285 bytes plaintext",
557 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
558 + "\x00\x03\x01\x01\x1d",
560 + .result = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
561 + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
562 + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
563 + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
564 + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
565 + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
566 + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
567 + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
568 + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
569 + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
570 + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
571 + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
572 + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
573 + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
574 + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
575 + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
576 + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
577 + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
578 + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
579 + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
580 + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
581 + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
582 + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
583 + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
584 + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
585 + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
586 + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
587 + .rlen = 285 + 20 + 15,
591 +static struct tls_testvec tls_dec_tv_template[] = {
593 +#ifdef __LITTLE_ENDIAN
594 + .key = "\x08\x00" /* rta length */
595 + "\x01\x00" /* rta type */
597 + .key = "\x00\x08" /* rta length */
598 + "\x00\x01" /* rta type */
600 + "\x00\x00\x00\x10" /* enc key length */
601 + "authenticationkey20benckeyis16_bytes",
602 + .klen = 8 + 20 + 16,
603 + .iv = "iv0123456789abcd",
604 + .input = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
605 + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
606 + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
607 + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
608 + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
609 + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
610 + .ilen = 16 + 20 + 12,
611 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
612 + "\x00\x03\x01\x00\x30",
614 + .result = "Single block msg",
617 +#ifdef __LITTLE_ENDIAN
618 + .key = "\x08\x00" /* rta length */
619 + "\x01\x00" /* rta type */
621 + .key = "\x00\x08" /* rta length */
622 + "\x00\x01" /* rta type */
624 + "\x00\x00\x00\x10" /* enc key length */
625 + "authenticationkey20benckeyis16_bytes",
626 + .klen = 8 + 20 + 16,
627 + .iv = "iv0123456789abcd",
628 + .input = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
629 + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
630 + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
631 + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
633 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
634 + "\x00\x03\x01\x00\x20",
639 +#ifdef __LITTLE_ENDIAN
640 + .key = "\x08\x00" /* rta length */
641 + "\x01\x00" /* rta type */
643 + .key = "\x00\x08" /* rta length */
644 + "\x00\x01" /* rta type */
646 + "\x00\x00\x00\x10" /* enc key length */
647 + "authenticationkey20benckeyis16_bytes",
648 + .klen = 8 + 20 + 16,
649 + .iv = "iv0123456789abcd",
650 + .input = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
651 + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
652 + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
653 + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
654 + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
655 + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
656 + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
657 + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
658 + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
659 + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
660 + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
661 + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
662 + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
663 + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
664 + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
665 + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
666 + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
667 + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
668 + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
669 + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
670 + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
671 + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
672 + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
673 + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
674 + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
675 + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
676 + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
678 + .ilen = 285 + 20 + 15,
679 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
680 + "\x00\x03\x01\x01\x40",
682 + .result = "285 bytes plaintext285 bytes plaintext285 bytes"
683 + " plaintext285 bytes plaintext285 bytes plaintext285"
684 + " bytes plaintext285 bytes plaintext285 bytes"
685 + " plaintext285 bytes plaintext285 bytes plaintext285"
686 + " bytes plaintext285 bytes plaintext285 bytes"
687 + " plaintext285 bytes plaintext285 bytes plaintext",
693 * RSA test vectors. Borrowed from openSSL.
695 static const struct akcipher_testvec rsa_tv_template[] = {
700 + * Copyright 2013 Freescale Semiconductor, Inc.
701 + * Copyright 2017 NXP Semiconductor, Inc.
703 + * This program is free software; you can redistribute it and/or modify it
704 + * under the terms of the GNU General Public License as published by the Free
705 + * Software Foundation; either version 2 of the License, or (at your option)
706 + * any later version.
710 +#include <crypto/internal/aead.h>
711 +#include <crypto/internal/hash.h>
712 +#include <crypto/internal/skcipher.h>
713 +#include <crypto/authenc.h>
714 +#include <crypto/null.h>
715 +#include <crypto/scatterwalk.h>
716 +#include <linux/err.h>
717 +#include <linux/init.h>
718 +#include <linux/module.h>
719 +#include <linux/rtnetlink.h>
721 +struct tls_instance_ctx {
722 + struct crypto_ahash_spawn auth;
723 + struct crypto_skcipher_spawn enc;
726 +struct crypto_tls_ctx {
727 + unsigned int reqoff;
728 + struct crypto_ahash *auth;
729 + struct crypto_skcipher *enc;
730 + struct crypto_skcipher *null;
733 +struct tls_request_ctx {
735 + * cryptlen holds the payload length in the case of encryption or
736 + * payload_len + icv_len + padding_len in case of decryption
738 + unsigned int cryptlen;
739 + /* working space for partial results */
740 + struct scatterlist tmp[2];
741 + struct scatterlist cipher[2];
742 + struct scatterlist dst[2];
747 + struct completion completion;
751 +static void tls_async_op_done(struct crypto_async_request *req, int err)
753 + struct async_op *areq = req->data;
755 + if (err == -EINPROGRESS)
759 + complete(&areq->completion);
762 +static int crypto_tls_setkey(struct crypto_aead *tls, const u8 *key,
763 + unsigned int keylen)
765 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
766 + struct crypto_ahash *auth = ctx->auth;
767 + struct crypto_skcipher *enc = ctx->enc;
768 + struct crypto_authenc_keys keys;
771 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
774 + crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
775 + crypto_ahash_set_flags(auth, crypto_aead_get_flags(tls) &
776 + CRYPTO_TFM_REQ_MASK);
777 + err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
778 + crypto_aead_set_flags(tls, crypto_ahash_get_flags(auth) &
779 + CRYPTO_TFM_RES_MASK);
784 + crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
785 + crypto_skcipher_set_flags(enc, crypto_aead_get_flags(tls) &
786 + CRYPTO_TFM_REQ_MASK);
787 + err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
788 + crypto_aead_set_flags(tls, crypto_skcipher_get_flags(enc) &
789 + CRYPTO_TFM_RES_MASK);
795 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
800 + * crypto_tls_genicv - Calculate hmac digest for a TLS record
801 + * @hash: (output) buffer to save the digest into
802 + * @src: (input) scatterlist with the assoc and payload data
803 + * @srclen: (input) size of the source buffer (assoclen + cryptlen)
804 + * @req: (input) aead request
806 +static int crypto_tls_genicv(u8 *hash, struct scatterlist *src,
807 + unsigned int srclen, struct aead_request *req)
809 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
810 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
811 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
812 + struct async_op ahash_op;
813 + struct ahash_request *ahreq = (void *)(treq_ctx->tail + ctx->reqoff);
814 + unsigned int flags = CRYPTO_TFM_REQ_MAY_SLEEP;
815 + int err = -EBADMSG;
817 + /* Bail out if the request assoc len is 0 */
818 + if (!req->assoclen)
821 + init_completion(&ahash_op.completion);
823 + /* the hash transform to be executed comes from the original request */
824 + ahash_request_set_tfm(ahreq, ctx->auth);
825 + /* prepare the hash request with input data and result pointer */
826 + ahash_request_set_crypt(ahreq, src, hash, srclen);
827 + /* set the notifier for when the async hash function returns */
828 + ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
829 + tls_async_op_done, &ahash_op);
831 + /* Calculate the digest on the given data. The result is put in hash */
832 + err = crypto_ahash_digest(ahreq);
833 + if (err == -EINPROGRESS) {
834 + err = wait_for_completion_interruptible(&ahash_op.completion);
836 + err = ahash_op.err;
843 + * crypto_tls_gen_padicv - Calculate and pad hmac digest for a TLS record
844 + * @hash: (output) buffer to save the digest and padding into
845 + * @phashlen: (output) the size of digest + padding
846 + * @req: (input) aead request
848 +static int crypto_tls_gen_padicv(u8 *hash, unsigned int *phashlen,
849 + struct aead_request *req)
851 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
852 + unsigned int hash_size = crypto_aead_authsize(tls);
853 + unsigned int block_size = crypto_aead_blocksize(tls);
854 + unsigned int srclen = req->cryptlen + hash_size;
855 + unsigned int icvlen = req->cryptlen + req->assoclen;
856 + unsigned int padlen;
859 + err = crypto_tls_genicv(hash, req->src, icvlen, req);
863 + /* add padding after digest */
864 + padlen = block_size - (srclen % block_size);
865 + memset(hash + hash_size, padlen - 1, padlen);
867 + *phashlen = hash_size + padlen;
872 +static int crypto_tls_copy_data(struct aead_request *req,
873 + struct scatterlist *src,
874 + struct scatterlist *dst,
877 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
878 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
879 + SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
881 + skcipher_request_set_tfm(skreq, ctx->null);
882 + skcipher_request_set_callback(skreq, aead_request_flags(req),
884 + skcipher_request_set_crypt(skreq, src, dst, len, NULL);
886 + return crypto_skcipher_encrypt(skreq);
889 +static int crypto_tls_encrypt(struct aead_request *req)
891 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
892 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
893 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
894 + struct skcipher_request *skreq;
895 + struct scatterlist *cipher = treq_ctx->cipher;
896 + struct scatterlist *tmp = treq_ctx->tmp;
897 + struct scatterlist *sg, *src, *dst;
898 + unsigned int cryptlen, phashlen;
899 + u8 *hash = treq_ctx->tail;
903 + * The hash result is saved at the beginning of the tls request ctx
904 + * and is aligned as required by the hash transform. Enough space was
905 + * allocated in crypto_tls_init_tfm to accommodate the difference. The
906 + * requests themselves start later at treq_ctx->tail + ctx->reqoff so
907 + * the result is not overwritten by the second (cipher) request.
909 + hash = (u8 *)ALIGN((unsigned long)hash +
910 + crypto_ahash_alignmask(ctx->auth),
911 + crypto_ahash_alignmask(ctx->auth) + 1);
914 + * STEP 1: create ICV together with necessary padding
916 + err = crypto_tls_gen_padicv(hash, &phashlen, req);
921 + * STEP 2: Hash and padding are combined with the payload
922 + * depending on the form it arrives. Scatter tables must have at least
923 + * one page of data before chaining with another table and can't have
924 + * an empty data page. The following code addresses these requirements.
926 + * If the payload is empty, only the hash is encrypted, otherwise the
927 + * payload scatterlist is merged with the hash. A special merging case
928 + * is when the payload has only one page of data. In that case the
929 + * payload page is moved to another scatterlist and prepared there for
932 + if (req->cryptlen) {
933 + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
935 + sg_init_table(cipher, 2);
936 + sg_set_buf(cipher + 1, hash, phashlen);
938 + if (sg_is_last(src)) {
939 + sg_set_page(cipher, sg_page(src), req->cryptlen,
943 + unsigned int rem_len = req->cryptlen;
945 + for (sg = src; rem_len > sg->length; sg = sg_next(sg))
946 + rem_len -= min(rem_len, sg->length);
948 + sg_set_page(cipher, sg_page(sg), rem_len, sg->offset);
949 + sg_chain(sg, 1, cipher);
952 + sg_init_one(cipher, hash, phashlen);
957 + * If src != dst copy the associated data from source to destination.
958 + * In both cases fast-forward passed the associated data in the dest.
960 + if (req->src != req->dst) {
961 + err = crypto_tls_copy_data(req, req->src, req->dst,
966 + dst = scatterwalk_ffwd(treq_ctx->dst, req->dst, req->assoclen);
969 + * STEP 3: encrypt the frame and return the result
971 + cryptlen = req->cryptlen + phashlen;
974 + * The hash and the cipher are applied at different times and their
975 + * requests can use the same memory space without interference
977 + skreq = (void *)(treq_ctx->tail + ctx->reqoff);
978 + skcipher_request_set_tfm(skreq, ctx->enc);
979 + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
980 + skcipher_request_set_callback(skreq, aead_request_flags(req),
981 + req->base.complete, req->base.data);
983 + * Apply the cipher transform. The result will be in req->dst when the
984 + * asynchronuous call terminates
986 + err = crypto_skcipher_encrypt(skreq);
991 +static int crypto_tls_decrypt(struct aead_request *req)
993 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
994 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
995 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
996 + unsigned int cryptlen = req->cryptlen;
997 + unsigned int hash_size = crypto_aead_authsize(tls);
998 + unsigned int block_size = crypto_aead_blocksize(tls);
999 + struct skcipher_request *skreq = (void *)(treq_ctx->tail + ctx->reqoff);
1000 + struct scatterlist *tmp = treq_ctx->tmp;
1001 + struct scatterlist *src, *dst;
1003 + u8 padding[255]; /* padding can be 0-255 bytes */
1006 + u8 *ihash, *hash = treq_ctx->tail;
1009 + int err = -EINVAL;
1011 + struct async_op ciph_op;
1014 + * Rule out bad packets. The input packet length must be at least one
1015 + * byte more than the hash_size
1017 + if (cryptlen <= hash_size || cryptlen % block_size)
1021 + * Step 1 - Decrypt the source. Fast-forward past the associated data
1022 + * to the encrypted data. The result will be overwritten in place so
1023 + * that the decrypted data will be adjacent to the associated data. The
1024 + * last step (computing the hash) will have it's input data already
1025 + * prepared and ready to be accessed at req->src.
1027 + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
1030 + init_completion(&ciph_op.completion);
1031 + skcipher_request_set_tfm(skreq, ctx->enc);
1032 + skcipher_request_set_callback(skreq, aead_request_flags(req),
1033 + tls_async_op_done, &ciph_op);
1034 + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
1035 + err = crypto_skcipher_decrypt(skreq);
1036 + if (err == -EINPROGRESS) {
1037 + err = wait_for_completion_interruptible(&ciph_op.completion);
1039 + err = ciph_op.err;
1045 + * Step 2 - Verify padding
1046 + * Retrieve the last byte of the payload; this is the padding size.
1049 + scatterwalk_map_and_copy(&pad_size, dst, cryptlen, 1, 0);
1051 + /* RFC recommendation for invalid padding size. */
1052 + if (cryptlen < pad_size + hash_size) {
1054 + paderr = -EBADMSG;
1056 + cryptlen -= pad_size;
1057 + scatterwalk_map_and_copy(padding, dst, cryptlen, pad_size, 0);
1059 + /* Padding content must be equal with pad_size. We verify it all */
1060 + for (i = 0; i < pad_size; i++)
1061 + if (padding[i] != pad_size)
1062 + paderr = -EBADMSG;
1065 + * Step 3 - Verify hash
1066 + * Align the digest result as required by the hash transform. Enough
1067 + * space was allocated in crypto_tls_init_tfm
1069 + hash = (u8 *)ALIGN((unsigned long)hash +
1070 + crypto_ahash_alignmask(ctx->auth),
1071 + crypto_ahash_alignmask(ctx->auth) + 1);
1073 + * Two bytes at the end of the associated data make the length field.
1074 + * It must be updated with the length of the cleartext message before
1075 + * the hash is calculated.
1077 + len_field = sg_virt(req->src) + req->assoclen - 2;
1078 + cryptlen -= hash_size;
1079 + *len_field = htons(cryptlen);
1081 + /* This is the hash from the decrypted packet. Save it for later */
1082 + ihash = hash + hash_size;
1083 + scatterwalk_map_and_copy(ihash, dst, cryptlen, hash_size, 0);
1085 + /* Now compute and compare our ICV with the one from the packet */
1086 + err = crypto_tls_genicv(hash, req->src, cryptlen + req->assoclen, req);
1088 + err = memcmp(hash, ihash, hash_size) ? -EBADMSG : 0;
1090 + if (req->src != req->dst) {
1091 + err = crypto_tls_copy_data(req, req->src, req->dst, cryptlen +
1097 + /* return the first found error */
1102 + aead_request_complete(req, err);
1106 +static int crypto_tls_init_tfm(struct crypto_aead *tfm)
1108 + struct aead_instance *inst = aead_alg_instance(tfm);
1109 + struct tls_instance_ctx *ictx = aead_instance_ctx(inst);
1110 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
1111 + struct crypto_ahash *auth;
1112 + struct crypto_skcipher *enc;
1113 + struct crypto_skcipher *null;
1116 + auth = crypto_spawn_ahash(&ictx->auth);
1118 + return PTR_ERR(auth);
1120 + enc = crypto_spawn_skcipher(&ictx->enc);
1121 + err = PTR_ERR(enc);
1123 + goto err_free_ahash;
1125 + null = crypto_get_default_null_skcipher2();
1126 + err = PTR_ERR(null);
1128 + goto err_free_skcipher;
1135 + * Allow enough space for two digests. The two digests will be compared
1136 + * during the decryption phase. One will come from the decrypted packet
1137 + * and the other will be calculated. For encryption, one digest is
1138 + * padded (up to a cipher blocksize) and chained with the payload
1140 + ctx->reqoff = ALIGN(crypto_ahash_digestsize(auth) +
1141 + crypto_ahash_alignmask(auth),
1142 + crypto_ahash_alignmask(auth) + 1) +
1143 + max(crypto_ahash_digestsize(auth),
1144 + crypto_skcipher_blocksize(enc));
1146 + crypto_aead_set_reqsize(tfm,
1147 + sizeof(struct tls_request_ctx) +
1149 + max_t(unsigned int,
1150 + crypto_ahash_reqsize(auth) +
1151 + sizeof(struct ahash_request),
1152 + crypto_skcipher_reqsize(enc) +
1153 + sizeof(struct skcipher_request)));
1158 + crypto_free_skcipher(enc);
1160 + crypto_free_ahash(auth);
1164 +static void crypto_tls_exit_tfm(struct crypto_aead *tfm)
1166 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
1168 + crypto_free_ahash(ctx->auth);
1169 + crypto_free_skcipher(ctx->enc);
1170 + crypto_put_default_null_skcipher2();
1173 +static void crypto_tls_free(struct aead_instance *inst)
1175 + struct tls_instance_ctx *ctx = aead_instance_ctx(inst);
1177 + crypto_drop_skcipher(&ctx->enc);
1178 + crypto_drop_ahash(&ctx->auth);
1182 +static int crypto_tls_create(struct crypto_template *tmpl, struct rtattr **tb)
1184 + struct crypto_attr_type *algt;
1185 + struct aead_instance *inst;
1186 + struct hash_alg_common *auth;
1187 + struct crypto_alg *auth_base;
1188 + struct skcipher_alg *enc;
1189 + struct tls_instance_ctx *ctx;
1190 + const char *enc_name;
1193 + algt = crypto_get_attr_type(tb);
1195 + return PTR_ERR(algt);
1197 + if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
1200 + auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
1201 + CRYPTO_ALG_TYPE_AHASH_MASK |
1202 + crypto_requires_sync(algt->type, algt->mask));
1204 + return PTR_ERR(auth);
1206 + auth_base = &auth->base;
1208 + enc_name = crypto_attr_alg_name(tb[2]);
1209 + err = PTR_ERR(enc_name);
1210 + if (IS_ERR(enc_name))
1211 + goto out_put_auth;
1213 + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
1216 + goto out_put_auth;
1218 + ctx = aead_instance_ctx(inst);
1220 + err = crypto_init_ahash_spawn(&ctx->auth, auth,
1221 + aead_crypto_instance(inst));
1223 + goto err_free_inst;
1225 + crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
1226 + err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
1227 + crypto_requires_sync(algt->type,
1230 + goto err_drop_auth;
1232 + enc = crypto_spawn_skcipher_alg(&ctx->enc);
1234 + err = -ENAMETOOLONG;
1235 + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
1236 + "tls10(%s,%s)", auth_base->cra_name,
1237 + enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
1238 + goto err_drop_enc;
1240 + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1241 + "tls10(%s,%s)", auth_base->cra_driver_name,
1242 + enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
1243 + goto err_drop_enc;
1245 + inst->alg.base.cra_flags = (auth_base->cra_flags |
1246 + enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
1247 + inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
1248 + auth_base->cra_priority;
1249 + inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
1250 + inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
1251 + enc->base.cra_alignmask;
1252 + inst->alg.base.cra_ctxsize = sizeof(struct crypto_tls_ctx);
1254 + inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
1255 + inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
1256 + inst->alg.maxauthsize = auth->digestsize;
1258 + inst->alg.init = crypto_tls_init_tfm;
1259 + inst->alg.exit = crypto_tls_exit_tfm;
1261 + inst->alg.setkey = crypto_tls_setkey;
1262 + inst->alg.encrypt = crypto_tls_encrypt;
1263 + inst->alg.decrypt = crypto_tls_decrypt;
1265 + inst->free = crypto_tls_free;
1267 + err = aead_register_instance(tmpl, inst);
1269 + goto err_drop_enc;
1272 + crypto_mod_put(auth_base);
1276 + crypto_drop_skcipher(&ctx->enc);
1278 + crypto_drop_ahash(&ctx->auth);
1285 +static struct crypto_template crypto_tls_tmpl = {
1287 + .create = crypto_tls_create,
1288 + .module = THIS_MODULE,
1291 +static int __init crypto_tls_module_init(void)
1293 + return crypto_register_template(&crypto_tls_tmpl);
1296 +static void __exit crypto_tls_module_exit(void)
1298 + crypto_unregister_template(&crypto_tls_tmpl);
1301 +module_init(crypto_tls_module_init);
1302 +module_exit(crypto_tls_module_exit);
1304 +MODULE_LICENSE("GPL");
1305 +MODULE_DESCRIPTION("TLS 1.0 record encryption");
1306 --- a/drivers/crypto/Makefile
1307 +++ b/drivers/crypto/Makefile
1308 @@ -10,7 +10,7 @@ obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chel
1309 obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
1310 obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
1311 obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
1312 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
1313 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
1314 obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
1315 obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
1316 obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
1317 --- a/drivers/crypto/caam/Kconfig
1318 +++ b/drivers/crypto/caam/Kconfig
1320 +config CRYPTO_DEV_FSL_CAAM_COMMON
1323 config CRYPTO_DEV_FSL_CAAM
1324 - tristate "Freescale CAAM-Multicore driver backend"
1325 + tristate "Freescale CAAM-Multicore platform driver backend"
1326 depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
1328 + select CRYPTO_DEV_FSL_CAAM_COMMON
1330 Enables the driver module for Freescale's Cryptographic Accelerator
1331 and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
1332 @@ -12,9 +16,16 @@ config CRYPTO_DEV_FSL_CAAM
1333 To compile this driver as a module, choose M here: the module
1334 will be called caam.
1336 +if CRYPTO_DEV_FSL_CAAM
1338 +config CRYPTO_DEV_FSL_CAAM_DEBUG
1339 + bool "Enable debug output in CAAM driver"
1341 + Selecting this will enable printing of various debug
1342 + information in the CAAM driver.
1344 config CRYPTO_DEV_FSL_CAAM_JR
1345 tristate "Freescale CAAM Job Ring driver backend"
1346 - depends on CRYPTO_DEV_FSL_CAAM
1349 Enables the driver module for Job Rings which are part of
1350 @@ -25,9 +36,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
1351 To compile this driver as a module, choose M here: the module
1352 will be called caam_jr.
1354 +if CRYPTO_DEV_FSL_CAAM_JR
1356 config CRYPTO_DEV_FSL_CAAM_RINGSIZE
1358 - depends on CRYPTO_DEV_FSL_CAAM_JR
1362 @@ -45,7 +57,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
1364 config CRYPTO_DEV_FSL_CAAM_INTC
1365 bool "Job Ring interrupt coalescing"
1366 - depends on CRYPTO_DEV_FSL_CAAM_JR
1368 Enable the Job Ring's interrupt coalescing feature.
1370 @@ -75,7 +86,6 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THL
1372 config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
1373 tristate "Register algorithm implementations with the Crypto API"
1374 - depends on CRYPTO_DEV_FSL_CAAM_JR
1377 select CRYPTO_AUTHENC
1378 @@ -90,7 +100,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
1380 config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
1381 tristate "Queue Interface as Crypto API backend"
1382 - depends on CRYPTO_DEV_FSL_CAAM_JR && FSL_DPAA && NET
1383 + depends on FSL_SDK_DPA && NET
1385 select CRYPTO_AUTHENC
1386 select CRYPTO_BLKCIPHER
1387 @@ -107,7 +117,6 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
1389 config CRYPTO_DEV_FSL_CAAM_AHASH_API
1390 tristate "Register hash algorithm implementations with Crypto API"
1391 - depends on CRYPTO_DEV_FSL_CAAM_JR
1395 @@ -119,7 +128,6 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
1397 config CRYPTO_DEV_FSL_CAAM_PKC_API
1398 tristate "Register public key cryptography implementations with Crypto API"
1399 - depends on CRYPTO_DEV_FSL_CAAM_JR
1403 @@ -131,7 +139,6 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
1405 config CRYPTO_DEV_FSL_CAAM_RNG_API
1406 tristate "Register caam device for hwrng API"
1407 - depends on CRYPTO_DEV_FSL_CAAM_JR
1411 @@ -142,13 +149,31 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
1412 To compile this as a module, choose M here: the module
1413 will be called caamrng.
1415 -config CRYPTO_DEV_FSL_CAAM_DEBUG
1416 - bool "Enable debug output in CAAM driver"
1417 - depends on CRYPTO_DEV_FSL_CAAM
1419 - Selecting this will enable printing of various debug
1420 - information in the CAAM driver.
1421 +endif # CRYPTO_DEV_FSL_CAAM_JR
1423 +endif # CRYPTO_DEV_FSL_CAAM
1425 +config CRYPTO_DEV_FSL_DPAA2_CAAM
1426 + tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
1427 + depends on FSL_MC_DPIO
1428 + select CRYPTO_DEV_FSL_CAAM_COMMON
1429 + select CRYPTO_BLKCIPHER
1430 + select CRYPTO_AUTHENC
1431 + select CRYPTO_AEAD
1432 + select CRYPTO_HASH
1434 + CAAM driver for QorIQ Data Path Acceleration Architecture 2.
1435 + It handles DPSECI DPAA2 objects that sit on the Management Complex
1438 + To compile this as a module, choose M here: the module
1439 + will be called dpaa2_caam.
1441 config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1442 def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
1443 - CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
1444 + CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
1445 + CRYPTO_DEV_FSL_DPAA2_CAAM)
1447 +config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
1448 + def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \
1449 + CRYPTO_DEV_FSL_DPAA2_CAAM)
1450 --- a/drivers/crypto/caam/Makefile
1451 +++ b/drivers/crypto/caam/Makefile
1452 @@ -6,19 +6,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG
1453 ccflags-y := -DDEBUG
1456 +ccflags-y += -DVERSION=\"\"
1458 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
1459 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
1460 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
1461 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
1462 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
1463 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
1464 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
1465 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
1466 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
1467 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
1470 -caam_jr-objs := jr.o key_gen.o error.o
1471 +caam_jr-objs := jr.o key_gen.o
1472 caam_pkc-y := caampkc.o pkc_desc.o
1473 ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
1474 ccflags-y += -DCONFIG_CAAM_QI
1478 +obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
1480 +dpaa2_caam-y := caamalg_qi2.o dpseci.o
1481 --- a/drivers/crypto/caam/caamalg.c
1482 +++ b/drivers/crypto/caam/caamalg.c
1483 @@ -108,6 +108,7 @@ struct caam_ctx {
1484 dma_addr_t sh_desc_dec_dma;
1485 dma_addr_t sh_desc_givenc_dma;
1487 + enum dma_data_direction dir;
1488 struct device *jrdev;
1489 struct alginfo adata;
1490 struct alginfo cdata;
1491 @@ -118,6 +119,7 @@ static int aead_null_set_sh_desc(struct
1493 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1494 struct device *jrdev = ctx->jrdev;
1495 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
1497 int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
1498 ctx->adata.keylen_pad;
1499 @@ -136,9 +138,10 @@ static int aead_null_set_sh_desc(struct
1501 /* aead_encrypt shared descriptor */
1502 desc = ctx->sh_desc_enc;
1503 - cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
1504 + cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
1506 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1507 - desc_bytes(desc), DMA_TO_DEVICE);
1508 + desc_bytes(desc), ctx->dir);
1511 * Job Descriptor and Shared Descriptors
1512 @@ -154,9 +157,10 @@ static int aead_null_set_sh_desc(struct
1514 /* aead_decrypt shared descriptor */
1515 desc = ctx->sh_desc_dec;
1516 - cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
1517 + cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
1519 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1520 - desc_bytes(desc), DMA_TO_DEVICE);
1521 + desc_bytes(desc), ctx->dir);
1525 @@ -168,6 +172,7 @@ static int aead_set_sh_desc(struct crypt
1526 unsigned int ivsize = crypto_aead_ivsize(aead);
1527 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1528 struct device *jrdev = ctx->jrdev;
1529 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
1530 u32 ctx1_iv_off = 0;
1531 u32 *desc, *nonce = NULL;
1533 @@ -234,9 +239,9 @@ static int aead_set_sh_desc(struct crypt
1534 desc = ctx->sh_desc_enc;
1535 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
1536 ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
1538 + false, ctrlpriv->era);
1539 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1540 - desc_bytes(desc), DMA_TO_DEVICE);
1541 + desc_bytes(desc), ctx->dir);
1545 @@ -266,9 +271,9 @@ skip_enc:
1546 desc = ctx->sh_desc_dec;
1547 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
1548 ctx->authsize, alg->caam.geniv, is_rfc3686,
1549 - nonce, ctx1_iv_off, false);
1550 + nonce, ctx1_iv_off, false, ctrlpriv->era);
1551 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1552 - desc_bytes(desc), DMA_TO_DEVICE);
1553 + desc_bytes(desc), ctx->dir);
1555 if (!alg->caam.geniv)
1557 @@ -300,9 +305,9 @@ skip_enc:
1558 desc = ctx->sh_desc_enc;
1559 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
1560 ctx->authsize, is_rfc3686, nonce,
1561 - ctx1_iv_off, false);
1562 + ctx1_iv_off, false, ctrlpriv->era);
1563 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1564 - desc_bytes(desc), DMA_TO_DEVICE);
1565 + desc_bytes(desc), ctx->dir);
1569 @@ -323,6 +328,7 @@ static int gcm_set_sh_desc(struct crypto
1571 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1572 struct device *jrdev = ctx->jrdev;
1573 + unsigned int ivsize = crypto_aead_ivsize(aead);
1575 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
1577 @@ -344,9 +350,9 @@ static int gcm_set_sh_desc(struct crypto
1580 desc = ctx->sh_desc_enc;
1581 - cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
1582 + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
1583 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1584 - desc_bytes(desc), DMA_TO_DEVICE);
1585 + desc_bytes(desc), ctx->dir);
1588 * Job Descriptor and Shared Descriptors
1589 @@ -361,9 +367,9 @@ static int gcm_set_sh_desc(struct crypto
1592 desc = ctx->sh_desc_dec;
1593 - cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
1594 + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
1595 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1596 - desc_bytes(desc), DMA_TO_DEVICE);
1597 + desc_bytes(desc), ctx->dir);
1601 @@ -382,6 +388,7 @@ static int rfc4106_set_sh_desc(struct cr
1603 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1604 struct device *jrdev = ctx->jrdev;
1605 + unsigned int ivsize = crypto_aead_ivsize(aead);
1607 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
1609 @@ -403,9 +410,10 @@ static int rfc4106_set_sh_desc(struct cr
1612 desc = ctx->sh_desc_enc;
1613 - cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
1614 + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
1616 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1617 - desc_bytes(desc), DMA_TO_DEVICE);
1618 + desc_bytes(desc), ctx->dir);
1621 * Job Descriptor and Shared Descriptors
1622 @@ -420,9 +428,10 @@ static int rfc4106_set_sh_desc(struct cr
1625 desc = ctx->sh_desc_dec;
1626 - cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
1627 + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
1629 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1630 - desc_bytes(desc), DMA_TO_DEVICE);
1631 + desc_bytes(desc), ctx->dir);
1635 @@ -442,6 +451,7 @@ static int rfc4543_set_sh_desc(struct cr
1637 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1638 struct device *jrdev = ctx->jrdev;
1639 + unsigned int ivsize = crypto_aead_ivsize(aead);
1641 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
1643 @@ -463,9 +473,10 @@ static int rfc4543_set_sh_desc(struct cr
1646 desc = ctx->sh_desc_enc;
1647 - cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
1648 + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
1650 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1651 - desc_bytes(desc), DMA_TO_DEVICE);
1652 + desc_bytes(desc), ctx->dir);
1655 * Job Descriptor and Shared Descriptors
1656 @@ -480,9 +491,10 @@ static int rfc4543_set_sh_desc(struct cr
1659 desc = ctx->sh_desc_dec;
1660 - cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
1661 + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
1663 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1664 - desc_bytes(desc), DMA_TO_DEVICE);
1665 + desc_bytes(desc), ctx->dir);
1669 @@ -503,6 +515,7 @@ static int aead_setkey(struct crypto_aea
1671 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1672 struct device *jrdev = ctx->jrdev;
1673 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
1674 struct crypto_authenc_keys keys;
1677 @@ -517,6 +530,27 @@ static int aead_setkey(struct crypto_aea
1678 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1682 + * If DKP is supported, use it in the shared descriptor to generate
1685 + if (ctrlpriv->era >= 6) {
1686 + ctx->adata.keylen = keys.authkeylen;
1687 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
1688 + OP_ALG_ALGSEL_MASK);
1690 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1693 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
1694 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
1696 + dma_sync_single_for_device(jrdev, ctx->key_dma,
1697 + ctx->adata.keylen_pad +
1698 + keys.enckeylen, ctx->dir);
1699 + goto skip_split_key;
1702 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
1703 keys.authkeylen, CAAM_MAX_KEY_SIZE -
1705 @@ -527,12 +561,14 @@ static int aead_setkey(struct crypto_aea
1706 /* postpend encryption key to auth split key */
1707 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
1708 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
1709 - keys.enckeylen, DMA_TO_DEVICE);
1710 + keys.enckeylen, ctx->dir);
1712 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
1713 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
1714 ctx->adata.keylen_pad + keys.enckeylen, 1);
1718 ctx->cdata.keylen = keys.enckeylen;
1719 return aead_set_sh_desc(aead);
1721 @@ -552,7 +588,7 @@ static int gcm_setkey(struct crypto_aead
1724 memcpy(ctx->key, key, keylen);
1725 - dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
1726 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
1727 ctx->cdata.keylen = keylen;
1729 return gcm_set_sh_desc(aead);
1730 @@ -580,7 +616,7 @@ static int rfc4106_setkey(struct crypto_
1732 ctx->cdata.keylen = keylen - 4;
1733 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
1736 return rfc4106_set_sh_desc(aead);
1739 @@ -606,7 +642,7 @@ static int rfc4543_setkey(struct crypto_
1741 ctx->cdata.keylen = keylen - 4;
1742 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
1745 return rfc4543_set_sh_desc(aead);
1748 @@ -658,21 +694,21 @@ static int ablkcipher_setkey(struct cryp
1749 cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
1751 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1752 - desc_bytes(desc), DMA_TO_DEVICE);
1753 + desc_bytes(desc), ctx->dir);
1755 /* ablkcipher_decrypt shared descriptor */
1756 desc = ctx->sh_desc_dec;
1757 cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
1759 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1760 - desc_bytes(desc), DMA_TO_DEVICE);
1761 + desc_bytes(desc), ctx->dir);
1763 /* ablkcipher_givencrypt shared descriptor */
1764 desc = ctx->sh_desc_givenc;
1765 cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
1767 dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
1768 - desc_bytes(desc), DMA_TO_DEVICE);
1769 + desc_bytes(desc), ctx->dir);
1773 @@ -701,13 +737,13 @@ static int xts_ablkcipher_setkey(struct
1774 desc = ctx->sh_desc_enc;
1775 cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
1776 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1777 - desc_bytes(desc), DMA_TO_DEVICE);
1778 + desc_bytes(desc), ctx->dir);
1780 /* xts_ablkcipher_decrypt shared descriptor */
1781 desc = ctx->sh_desc_dec;
1782 cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
1783 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1784 - desc_bytes(desc), DMA_TO_DEVICE);
1785 + desc_bytes(desc), ctx->dir);
1789 @@ -987,9 +1023,6 @@ static void init_aead_job(struct aead_re
1790 append_seq_out_ptr(desc, dst_dma,
1791 req->assoclen + req->cryptlen - authsize,
1794 - /* REG3 = assoclen */
1795 - append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1798 static void init_gcm_job(struct aead_request *req,
1799 @@ -1004,6 +1037,7 @@ static void init_gcm_job(struct aead_req
1802 init_aead_job(req, edesc, all_contig, encrypt);
1803 + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1805 /* BUG This should not be specific to generic GCM. */
1807 @@ -1030,6 +1064,7 @@ static void init_authenc_job(struct aead
1808 struct caam_aead_alg, aead);
1809 unsigned int ivsize = crypto_aead_ivsize(aead);
1810 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1811 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
1812 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
1813 OP_ALG_AAI_CTR_MOD128);
1814 const bool is_rfc3686 = alg->caam.rfc3686;
1815 @@ -1053,6 +1088,15 @@ static void init_authenc_job(struct aead
1817 init_aead_job(req, edesc, all_contig, encrypt);
1820 + * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
1821 + * having DPOVRD as destination.
1823 + if (ctrlpriv->era < 3)
1824 + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1826 + append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
1828 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
1829 append_load_as_imm(desc, req->iv, ivsize,
1831 @@ -3204,9 +3248,11 @@ struct caam_crypto_alg {
1832 struct caam_alg_entry caam;
1835 -static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
1836 +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1839 dma_addr_t dma_addr;
1840 + struct caam_drv_private *priv;
1842 ctx->jrdev = caam_jr_alloc();
1843 if (IS_ERR(ctx->jrdev)) {
1844 @@ -3214,10 +3260,16 @@ static int caam_init_common(struct caam_
1845 return PTR_ERR(ctx->jrdev);
1848 + priv = dev_get_drvdata(ctx->jrdev->parent);
1849 + if (priv->era >= 6 && uses_dkp)
1850 + ctx->dir = DMA_BIDIRECTIONAL;
1852 + ctx->dir = DMA_TO_DEVICE;
1854 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
1855 offsetof(struct caam_ctx,
1857 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1858 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1859 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1860 dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
1861 caam_jr_free(ctx->jrdev);
1862 @@ -3245,7 +3297,7 @@ static int caam_cra_init(struct crypto_t
1863 container_of(alg, struct caam_crypto_alg, crypto_alg);
1864 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
1866 - return caam_init_common(ctx, &caam_alg->caam);
1867 + return caam_init_common(ctx, &caam_alg->caam, false);
1870 static int caam_aead_init(struct crypto_aead *tfm)
1871 @@ -3255,14 +3307,15 @@ static int caam_aead_init(struct crypto_
1872 container_of(alg, struct caam_aead_alg, aead);
1873 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
1875 - return caam_init_common(ctx, &caam_alg->caam);
1876 + return caam_init_common(ctx, &caam_alg->caam,
1877 + alg->setkey == aead_setkey);
1880 static void caam_exit_common(struct caam_ctx *ctx)
1882 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
1883 offsetof(struct caam_ctx, sh_desc_enc_dma),
1884 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1885 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1886 caam_jr_free(ctx->jrdev);
1889 --- a/drivers/crypto/caam/caamalg_desc.c
1890 +++ b/drivers/crypto/caam/caamalg_desc.c
1891 @@ -45,16 +45,16 @@ static inline void append_dec_op1(u32 *d
1892 * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
1893 * (non-protocol) with no (null) encryption.
1894 * @desc: pointer to buffer used for descriptor construction
1895 - * @adata: pointer to authentication transform definitions. Note that since a
1896 - * split key is to be used, the size of the split key itself is
1897 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
1898 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
1899 + * @adata: pointer to authentication transform definitions.
1900 + * A split key is required for SEC Era < 6; the size of the split key
1901 + * is specified in this case. Valid algorithm values - one of
1902 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
1903 + * with OP_ALG_AAI_HMAC_PRECOMP.
1904 * @icvsize: integrity check value (ICV) size (truncated or full)
1906 - * Note: Requires an MDHA split key.
1909 void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
1910 - unsigned int icvsize)
1911 + unsigned int icvsize, int era)
1913 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
1915 @@ -63,13 +63,18 @@ void cnstr_shdsc_aead_null_encap(u32 * c
1916 /* Skip if already shared */
1917 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1919 - if (adata->key_inline)
1920 - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
1921 - adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
1924 - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
1925 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
1927 + if (adata->key_inline)
1928 + append_key_as_imm(desc, adata->key_virt,
1929 + adata->keylen_pad, adata->keylen,
1930 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
1933 + append_key(desc, adata->key_dma, adata->keylen,
1934 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
1936 + append_proto_dkp(desc, adata);
1938 set_jump_tgt_here(desc, key_jump_cmd);
1940 /* assoclen + cryptlen = seqinlen */
1941 @@ -121,16 +126,16 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_enca
1942 * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
1943 * (non-protocol) with no (null) decryption.
1944 * @desc: pointer to buffer used for descriptor construction
1945 - * @adata: pointer to authentication transform definitions. Note that since a
1946 - * split key is to be used, the size of the split key itself is
1947 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
1948 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
1949 + * @adata: pointer to authentication transform definitions.
1950 + * A split key is required for SEC Era < 6; the size of the split key
1951 + * is specified in this case. Valid algorithm values - one of
1952 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
1953 + * with OP_ALG_AAI_HMAC_PRECOMP.
1954 * @icvsize: integrity check value (ICV) size (truncated or full)
1956 - * Note: Requires an MDHA split key.
1959 void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
1960 - unsigned int icvsize)
1961 + unsigned int icvsize, int era)
1963 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
1965 @@ -139,13 +144,18 @@ void cnstr_shdsc_aead_null_decap(u32 * c
1966 /* Skip if already shared */
1967 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1969 - if (adata->key_inline)
1970 - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
1971 - adata->keylen, CLASS_2 |
1972 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
1974 - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
1975 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
1977 + if (adata->key_inline)
1978 + append_key_as_imm(desc, adata->key_virt,
1979 + adata->keylen_pad, adata->keylen,
1980 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
1983 + append_key(desc, adata->key_dma, adata->keylen,
1984 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
1986 + append_proto_dkp(desc, adata);
1988 set_jump_tgt_here(desc, key_jump_cmd);
1990 /* Class 2 operation */
1991 @@ -204,7 +214,7 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_deca
1992 static void init_sh_desc_key_aead(u32 * const desc,
1993 struct alginfo * const cdata,
1994 struct alginfo * const adata,
1995 - const bool is_rfc3686, u32 *nonce)
1996 + const bool is_rfc3686, u32 *nonce, int era)
1999 unsigned int enckeylen = cdata->keylen;
2000 @@ -224,13 +234,18 @@ static void init_sh_desc_key_aead(u32 *
2002 enckeylen -= CTR_RFC3686_NONCE_SIZE;
2004 - if (adata->key_inline)
2005 - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
2006 - adata->keylen, CLASS_2 |
2007 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
2009 - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
2010 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
2012 + if (adata->key_inline)
2013 + append_key_as_imm(desc, adata->key_virt,
2014 + adata->keylen_pad, adata->keylen,
2015 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
2018 + append_key(desc, adata->key_dma, adata->keylen,
2019 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
2021 + append_proto_dkp(desc, adata);
2024 if (cdata->key_inline)
2025 append_key_as_imm(desc, cdata->key_virt, enckeylen,
2026 @@ -261,26 +276,27 @@ static void init_sh_desc_key_aead(u32 *
2027 * @cdata: pointer to block cipher transform definitions
2028 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
2029 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
2030 - * @adata: pointer to authentication transform definitions. Note that since a
2031 - * split key is to be used, the size of the split key itself is
2032 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2033 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2034 + * @adata: pointer to authentication transform definitions.
2035 + * A split key is required for SEC Era < 6; the size of the split key
2036 + * is specified in this case. Valid algorithm values - one of
2037 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2038 + * with OP_ALG_AAI_HMAC_PRECOMP.
2039 * @ivsize: initialization vector size
2040 * @icvsize: integrity check value (ICV) size (truncated or full)
2041 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
2042 * @nonce: pointer to rfc3686 nonce
2043 * @ctx1_iv_off: IV offset in CONTEXT1 register
2044 * @is_qi: true when called from caam/qi
2046 - * Note: Requires an MDHA split key.
2049 void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
2050 struct alginfo *adata, unsigned int ivsize,
2051 unsigned int icvsize, const bool is_rfc3686,
2052 - u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
2053 + u32 *nonce, const u32 ctx1_iv_off, const bool is_qi,
2056 /* Note: Context registers are saved. */
2057 - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
2058 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
2060 /* Class 2 operation */
2061 append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2062 @@ -306,8 +322,13 @@ void cnstr_shdsc_aead_encap(u32 * const
2065 /* Read and write assoclen bytes */
2066 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2067 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2068 + if (is_qi || era < 3) {
2069 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2070 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2072 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2073 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2076 /* Skip assoc data */
2077 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
2078 @@ -350,27 +371,27 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
2079 * @cdata: pointer to block cipher transform definitions
2080 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
2081 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
2082 - * @adata: pointer to authentication transform definitions. Note that since a
2083 - * split key is to be used, the size of the split key itself is
2084 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2085 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2086 + * @adata: pointer to authentication transform definitions.
2087 + * A split key is required for SEC Era < 6; the size of the split key
2088 + * is specified in this case. Valid algorithm values - one of
2089 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2090 + * with OP_ALG_AAI_HMAC_PRECOMP.
2091 * @ivsize: initialization vector size
2092 * @icvsize: integrity check value (ICV) size (truncated or full)
2093 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
2094 * @nonce: pointer to rfc3686 nonce
2095 * @ctx1_iv_off: IV offset in CONTEXT1 register
2096 * @is_qi: true when called from caam/qi
2098 - * Note: Requires an MDHA split key.
2101 void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
2102 struct alginfo *adata, unsigned int ivsize,
2103 unsigned int icvsize, const bool geniv,
2104 const bool is_rfc3686, u32 *nonce,
2105 - const u32 ctx1_iv_off, const bool is_qi)
2106 + const u32 ctx1_iv_off, const bool is_qi, int era)
2108 /* Note: Context registers are saved. */
2109 - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
2110 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
2112 /* Class 2 operation */
2113 append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2114 @@ -397,11 +418,23 @@ void cnstr_shdsc_aead_decap(u32 * const
2117 /* Read and write assoclen bytes */
2118 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2120 - append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
2122 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2123 + if (is_qi || era < 3) {
2124 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2126 + append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM,
2129 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3,
2132 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2134 + append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM,
2137 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD,
2141 /* Skip assoc data */
2142 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
2143 @@ -456,29 +489,29 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
2144 * @cdata: pointer to block cipher transform definitions
2145 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
2146 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
2147 - * @adata: pointer to authentication transform definitions. Note that since a
2148 - * split key is to be used, the size of the split key itself is
2149 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2150 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2151 + * @adata: pointer to authentication transform definitions.
2152 + * A split key is required for SEC Era < 6; the size of the split key
2153 + * is specified in this case. Valid algorithm values - one of
2154 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2155 + * with OP_ALG_AAI_HMAC_PRECOMP.
2156 * @ivsize: initialization vector size
2157 * @icvsize: integrity check value (ICV) size (truncated or full)
2158 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
2159 * @nonce: pointer to rfc3686 nonce
2160 * @ctx1_iv_off: IV offset in CONTEXT1 register
2161 * @is_qi: true when called from caam/qi
2163 - * Note: Requires an MDHA split key.
2166 void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
2167 struct alginfo *adata, unsigned int ivsize,
2168 unsigned int icvsize, const bool is_rfc3686,
2169 u32 *nonce, const u32 ctx1_iv_off,
2171 + const bool is_qi, int era)
2175 /* Note: Context registers are saved. */
2176 - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
2177 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
2181 @@ -528,8 +561,13 @@ copy_iv:
2184 /* Read and write assoclen bytes */
2185 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2186 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2187 + if (is_qi || era < 3) {
2188 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2189 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2191 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2192 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2195 /* Skip assoc data */
2196 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
2197 @@ -583,14 +621,431 @@ copy_iv:
2198 EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
2201 + * cnstr_shdsc_tls_encap - tls encapsulation shared descriptor
2202 + * @desc: pointer to buffer used for descriptor construction
2203 + * @cdata: pointer to block cipher transform definitions
2204 + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
2205 + * with OP_ALG_AAI_CBC
2206 + * @adata: pointer to authentication transform definitions.
2207 + * A split key is required for SEC Era < 6; the size of the split key
2208 + * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
2209 + * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2210 + * @assoclen: associated data length
2211 + * @ivsize: initialization vector size
2212 + * @authsize: authentication data size
2213 + * @blocksize: block cipher size
2216 +void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
2217 + struct alginfo *adata, unsigned int assoclen,
2218 + unsigned int ivsize, unsigned int authsize,
2219 + unsigned int blocksize, int era)
2221 + u32 *key_jump_cmd, *zero_payload_jump_cmd;
2222 + u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
2225 + * Compute the index (in bytes) for the LOAD with destination of
2226 + * Class 1 Data Size Register and for the LOAD that generates padding
2228 + if (adata->key_inline) {
2229 + idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad +
2230 + cdata->keylen - 4 * CAAM_CMD_SZ;
2231 + idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad +
2232 + cdata->keylen - 2 * CAAM_CMD_SZ;
2234 + idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
2236 + idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
2240 + stidx = 1 << HDR_START_IDX_SHIFT;
2241 + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
2243 + /* skip key loading if they are loaded due to sharing */
2244 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2248 + if (adata->key_inline)
2249 + append_key_as_imm(desc, adata->key_virt,
2250 + adata->keylen_pad, adata->keylen,
2251 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
2254 + append_key(desc, adata->key_dma, adata->keylen,
2255 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
2257 + append_proto_dkp(desc, adata);
2260 + if (cdata->key_inline)
2261 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
2262 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
2264 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
2265 + KEY_DEST_CLASS_REG);
2267 + set_jump_tgt_here(desc, key_jump_cmd);
2269 + /* class 2 operation */
2270 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2272 + /* class 1 operation */
2273 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
2276 + /* payloadlen = input data length - (assoclen + ivlen) */
2277 + append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize);
2279 + /* math1 = payloadlen + icvlen */
2280 + append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize);
2282 + /* padlen = block_size - math1 % block_size */
2283 + append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
2284 + append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
2286 + /* cryptlen = payloadlen + icvlen + padlen */
2287 + append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
2290 + * update immediate data with the padding length value
2291 + * for the LOAD in the class 1 data size register.
2293 + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
2294 + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7);
2295 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
2296 + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8);
2298 + /* overwrite PL field for the padding iNFO FIFO entry */
2299 + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
2300 + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 7);
2301 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
2302 + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 8);
2304 + /* store encrypted payload, icv and padding */
2305 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
2307 + /* if payload length is zero, jump to zero-payload commands */
2308 + append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4);
2309 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
2310 + JUMP_COND_MATH_Z);
2312 + /* load iv in context1 */
2313 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
2314 + LDST_CLASS_1_CCB | ivsize);
2316 + /* read assoc for authentication */
2317 + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
2319 + /* insnoop payload */
2320 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
2321 + FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
2323 + /* jump the zero-payload commands */
2324 + append_jump(desc, JUMP_TEST_ALL | 3);
2326 + /* zero-payload commands */
2327 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
2329 + /* load iv in context1 */
2330 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
2331 + LDST_CLASS_1_CCB | ivsize);
2333 + /* assoc data is the only data for authentication */
2334 + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
2335 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
2337 + /* send icv to encryption */
2338 + append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
2341 + /* update class 1 data size register with padding length */
2342 + append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB |
2343 + LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
2345 + /* generate padding and send it to encryption */
2346 + genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
2347 + NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
2348 + append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
2349 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
2352 + print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ",
2353 + DUMP_PREFIX_ADDRESS, 16, 4, desc,
2354 + desc_bytes(desc), 1);
2357 +EXPORT_SYMBOL(cnstr_shdsc_tls_encap);
2360 + * cnstr_shdsc_tls_decap - tls decapsulation shared descriptor
2361 + * @desc: pointer to buffer used for descriptor construction
2362 + * @cdata: pointer to block cipher transform definitions
2363 + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
2364 + * with OP_ALG_AAI_CBC
2365 + * @adata: pointer to authentication transform definitions.
2366 + * A split key is required for SEC Era < 6; the size of the split key
2367 + * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
2368 + * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2369 + * @assoclen: associated data length
2370 + * @ivsize: initialization vector size
2371 + * @authsize: authentication data size
2372 + * @blocksize: block cipher size
2375 +void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
2376 + struct alginfo *adata, unsigned int assoclen,
2377 + unsigned int ivsize, unsigned int authsize,
2378 + unsigned int blocksize, int era)
2380 + u32 stidx, jumpback;
2381 + u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
2383 + * Pointer Size bool determines the size of address pointers.
2384 + * false - Pointers fit in one 32-bit word.
2385 + * true - Pointers fit in two 32-bit words.
2387 + static const bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ);
2389 + stidx = 1 << HDR_START_IDX_SHIFT;
2390 + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
2392 + /* skip key loading if they are loaded due to sharing */
2393 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2397 + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
2398 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
2400 + append_proto_dkp(desc, adata);
2402 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
2403 + KEY_DEST_CLASS_REG);
2405 + set_jump_tgt_here(desc, key_jump_cmd);
2407 + /* class 2 operation */
2408 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2409 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
2410 + /* class 1 operation */
2411 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
2414 + /* VSIL = input data length - 2 * block_size */
2415 + append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
2419 + * payloadlen + icvlen + padlen = input data length - (assoclen +
2422 + append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize);
2424 + /* skip data to the last but one cipher block */
2425 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
2427 + /* load iv for the last cipher block */
2428 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
2429 + LDST_CLASS_1_CCB | ivsize);
2431 + /* read last cipher block */
2432 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
2433 + FIFOLD_TYPE_LAST1 | blocksize);
2435 + /* move decrypted block into math0 and math1 */
2436 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
2439 + /* reset AES CHA */
2440 + append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB |
2441 + LDST_SRCDST_WORD_CHACTRL | LDST_IMM);
2443 + /* rewind input sequence */
2444 + append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
2446 + /* key1 is in decryption form */
2447 + append_operation(desc, cdata->algtype | OP_ALG_AAI_DK |
2448 + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
2450 + /* load iv in context1 */
2451 + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
2452 + LDST_SRCDST_WORD_CLASS_CTX | ivsize);
2454 + /* read sequence number */
2455 + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
2456 + /* load Type, Version and Len fields in math0 */
2457 + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
2458 + LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5);
2460 + /* compute (padlen - 1) */
2461 + append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
2463 + /* math2 = icvlen + (padlen - 1) + 1 */
2464 + append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1);
2466 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
2468 + /* VSOL = payloadlen + icvlen + padlen */
2469 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
2471 + if (caam_little_end)
2472 + append_moveb(desc, MOVE_WAITCOMP |
2473 + MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
2475 + /* update Len field */
2476 + append_math_sub(desc, REG0, REG0, REG2, 8);
2478 + /* store decrypted payload, icv and padding */
2479 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
2481 + /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
2482 + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
2484 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
2485 + JUMP_COND_MATH_Z);
2487 + /* send Type, Version and Len(pre ICV) fields to authentication */
2488 + append_move(desc, MOVE_WAITCOMP |
2489 + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
2490 + (3 << MOVE_OFFSET_SHIFT) | 5);
2492 + /* outsnooping payload */
2493 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
2494 + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
2496 + skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2);
2498 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
2499 + /* send Type, Version and Len(pre ICV) fields to authentication */
2500 + append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS |
2501 + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
2502 + (3 << MOVE_OFFSET_SHIFT) | 5);
2504 + set_jump_tgt_here(desc, skip_zero_jump_cmd);
2505 + append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
2507 + /* load icvlen and padlen */
2508 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
2509 + FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
2511 + /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
2512 + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
2515 + * Start a new input sequence using the SEQ OUT PTR command options,
2516 + * pointer and length used when the current output sequence was defined.
2520 + * Move the lower 32 bits of Shared Descriptor address, the
2521 + * SEQ OUT PTR command, Output Pointer (2 words) and
2522 + * Output Length into math registers.
2524 + if (caam_little_end)
2525 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
2527 + (55 * 4 << MOVE_OFFSET_SHIFT) | 20);
2529 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
2531 + (54 * 4 << MOVE_OFFSET_SHIFT) | 20);
2533 + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
2534 + append_math_and_imm_u32(desc, REG0, REG0, IMM,
2535 + ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
2536 + /* Append a JUMP command after the copied fields */
2537 + jumpback = CMD_JUMP | (char)-9;
2538 + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
2539 + LDST_SRCDST_WORD_DECO_MATH2 |
2540 + (4 << LDST_OFFSET_SHIFT));
2541 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
2542 + /* Move the updated fields back to the Job Descriptor */
2543 + if (caam_little_end)
2544 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
2545 + MOVE_DEST_DESCBUF |
2546 + (55 * 4 << MOVE_OFFSET_SHIFT) | 24);
2548 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
2549 + MOVE_DEST_DESCBUF |
2550 + (54 * 4 << MOVE_OFFSET_SHIFT) | 24);
2553 + * Read the new SEQ IN PTR command, Input Pointer, Input Length
2554 + * and then jump back to the next command from the
2555 + * Shared Descriptor.
2557 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
2560 + * Move the SEQ OUT PTR command, Output Pointer (1 word) and
2561 + * Output Length into math registers.
2563 + if (caam_little_end)
2564 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
2566 + (54 * 4 << MOVE_OFFSET_SHIFT) | 12);
2568 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
2570 + (53 * 4 << MOVE_OFFSET_SHIFT) | 12);
2572 + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
2573 + append_math_and_imm_u64(desc, REG0, REG0, IMM,
2574 + ~(((u64)(CMD_SEQ_IN_PTR ^
2575 + CMD_SEQ_OUT_PTR)) << 32));
2576 + /* Append a JUMP command after the copied fields */
2577 + jumpback = CMD_JUMP | (char)-7;
2578 + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
2579 + LDST_SRCDST_WORD_DECO_MATH1 |
2580 + (4 << LDST_OFFSET_SHIFT));
2581 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
2582 + /* Move the updated fields back to the Job Descriptor */
2583 + if (caam_little_end)
2584 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
2585 + MOVE_DEST_DESCBUF |
2586 + (54 * 4 << MOVE_OFFSET_SHIFT) | 16);
2588 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
2589 + MOVE_DEST_DESCBUF |
2590 + (53 * 4 << MOVE_OFFSET_SHIFT) | 16);
2593 + * Read the new SEQ IN PTR command, Input Pointer, Input Length
2594 + * and then jump back to the next command from the
2595 + * Shared Descriptor.
2597 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5);
2600 + /* skip payload */
2601 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
2603 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
2604 + FIFOLD_TYPE_LAST2 | authsize);
2607 + print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ",
2608 + DUMP_PREFIX_ADDRESS, 16, 4, desc,
2609 + desc_bytes(desc), 1);
2612 +EXPORT_SYMBOL(cnstr_shdsc_tls_decap);
2615 * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
2616 * @desc: pointer to buffer used for descriptor construction
2617 * @cdata: pointer to block cipher transform definitions
2618 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
2619 + * @ivsize: initialization vector size
2620 * @icvsize: integrity check value (ICV) size (truncated or full)
2621 + * @is_qi: true when called from caam/qi
2623 void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
2624 - unsigned int icvsize)
2625 + unsigned int ivsize, unsigned int icvsize,
2628 u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
2629 *zero_assoc_jump_cmd2;
2630 @@ -612,11 +1067,35 @@ void cnstr_shdsc_gcm_encap(u32 * const d
2631 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
2635 + u32 *wait_load_cmd;
2637 + /* REG3 = assoclen */
2638 + append_seq_load(desc, 4, LDST_CLASS_DECO |
2639 + LDST_SRCDST_WORD_DECO_MATH3 |
2640 + (4 << LDST_OFFSET_SHIFT));
2642 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2643 + JUMP_COND_CALM | JUMP_COND_NCP |
2644 + JUMP_COND_NOP | JUMP_COND_NIP |
2646 + set_jump_tgt_here(desc, wait_load_cmd);
2648 + append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
2651 + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
2655 /* if assoclen + cryptlen is ZERO, skip to ICV write */
2656 - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
2657 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
2661 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
2662 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
2664 /* if assoclen is ZERO, skip reading the assoc data */
2665 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2666 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
2667 @@ -648,8 +1127,11 @@ void cnstr_shdsc_gcm_encap(u32 * const d
2668 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
2669 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
2671 - /* jump the zero-payload commands */
2672 - append_jump(desc, JUMP_TEST_ALL | 2);
2673 + /* jump to ICV writing */
2675 + append_jump(desc, JUMP_TEST_ALL | 4);
2677 + append_jump(desc, JUMP_TEST_ALL | 2);
2679 /* zero-payload commands */
2680 set_jump_tgt_here(desc, zero_payload_jump_cmd);
2681 @@ -657,10 +1139,18 @@ void cnstr_shdsc_gcm_encap(u32 * const d
2682 /* read assoc data */
2683 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
2684 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
2686 + /* jump to ICV writing */
2687 + append_jump(desc, JUMP_TEST_ALL | 2);
2689 /* There is no input data */
2690 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
2693 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
2694 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
2695 + FIFOLD_TYPE_LAST1);
2698 append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
2699 LDST_SRCDST_BYTE_CONTEXT);
2700 @@ -677,10 +1167,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
2701 * @desc: pointer to buffer used for descriptor construction
2702 * @cdata: pointer to block cipher transform definitions
2703 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
2704 + * @ivsize: initialization vector size
2705 * @icvsize: integrity check value (ICV) size (truncated or full)
2706 + * @is_qi: true when called from caam/qi
2708 void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
2709 - unsigned int icvsize)
2710 + unsigned int ivsize, unsigned int icvsize,
2713 u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
2715 @@ -701,6 +1194,24 @@ void cnstr_shdsc_gcm_decap(u32 * const d
2716 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
2717 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
2720 + u32 *wait_load_cmd;
2722 + /* REG3 = assoclen */
2723 + append_seq_load(desc, 4, LDST_CLASS_DECO |
2724 + LDST_SRCDST_WORD_DECO_MATH3 |
2725 + (4 << LDST_OFFSET_SHIFT));
2727 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2728 + JUMP_COND_CALM | JUMP_COND_NCP |
2729 + JUMP_COND_NOP | JUMP_COND_NIP |
2731 + set_jump_tgt_here(desc, wait_load_cmd);
2733 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
2734 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
2737 /* if assoclen is ZERO, skip reading the assoc data */
2738 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2739 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
2740 @@ -753,10 +1264,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
2741 * @desc: pointer to buffer used for descriptor construction
2742 * @cdata: pointer to block cipher transform definitions
2743 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
2744 + * @ivsize: initialization vector size
2745 * @icvsize: integrity check value (ICV) size (truncated or full)
2746 + * @is_qi: true when called from caam/qi
2748 void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
2749 - unsigned int icvsize)
2750 + unsigned int ivsize, unsigned int icvsize,
2755 @@ -777,7 +1291,29 @@ void cnstr_shdsc_rfc4106_encap(u32 * con
2756 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
2759 - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
2761 + u32 *wait_load_cmd;
2763 + /* REG3 = assoclen */
2764 + append_seq_load(desc, 4, LDST_CLASS_DECO |
2765 + LDST_SRCDST_WORD_DECO_MATH3 |
2766 + (4 << LDST_OFFSET_SHIFT));
2768 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2769 + JUMP_COND_CALM | JUMP_COND_NCP |
2770 + JUMP_COND_NOP | JUMP_COND_NIP |
2772 + set_jump_tgt_here(desc, wait_load_cmd);
2774 + /* Read salt and IV */
2775 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
2776 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
2778 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
2779 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
2782 + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
2783 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2785 /* Read assoc data */
2786 @@ -785,7 +1321,7 @@ void cnstr_shdsc_rfc4106_encap(u32 * con
2787 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
2790 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
2791 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
2793 /* Will read cryptlen bytes */
2794 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
2795 @@ -824,10 +1360,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap)
2796 * @desc: pointer to buffer used for descriptor construction
2797 * @cdata: pointer to block cipher transform definitions
2798 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
2799 + * @ivsize: initialization vector size
2800 * @icvsize: integrity check value (ICV) size (truncated or full)
2801 + * @is_qi: true when called from caam/qi
2803 void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
2804 - unsigned int icvsize)
2805 + unsigned int ivsize, unsigned int icvsize,
2810 @@ -849,7 +1388,29 @@ void cnstr_shdsc_rfc4106_decap(u32 * con
2811 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
2812 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
2814 - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
2816 + u32 *wait_load_cmd;
2818 + /* REG3 = assoclen */
2819 + append_seq_load(desc, 4, LDST_CLASS_DECO |
2820 + LDST_SRCDST_WORD_DECO_MATH3 |
2821 + (4 << LDST_OFFSET_SHIFT));
2823 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2824 + JUMP_COND_CALM | JUMP_COND_NCP |
2825 + JUMP_COND_NOP | JUMP_COND_NIP |
2827 + set_jump_tgt_here(desc, wait_load_cmd);
2829 + /* Read salt and IV */
2830 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
2831 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
2833 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
2834 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
2837 + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
2838 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2840 /* Read assoc data */
2841 @@ -857,7 +1418,7 @@ void cnstr_shdsc_rfc4106_decap(u32 * con
2842 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
2845 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
2846 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
2848 /* Will read cryptlen bytes */
2849 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
2850 @@ -896,10 +1457,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap)
2851 * @desc: pointer to buffer used for descriptor construction
2852 * @cdata: pointer to block cipher transform definitions
2853 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
2854 + * @ivsize: initialization vector size
2855 * @icvsize: integrity check value (ICV) size (truncated or full)
2856 + * @is_qi: true when called from caam/qi
2858 void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
2859 - unsigned int icvsize)
2860 + unsigned int ivsize, unsigned int icvsize,
2863 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
2865 @@ -920,6 +1484,18 @@ void cnstr_shdsc_rfc4543_encap(u32 * con
2866 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
2870 + /* assoclen is not needed, skip it */
2871 + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
2873 + /* Read salt and IV */
2874 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
2875 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
2877 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
2878 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
2881 /* assoclen + cryptlen = seqinlen */
2882 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
2884 @@ -966,10 +1542,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap)
2885 * @desc: pointer to buffer used for descriptor construction
2886 * @cdata: pointer to block cipher transform definitions
2887 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
2888 + * @ivsize: initialization vector size
2889 * @icvsize: integrity check value (ICV) size (truncated or full)
2890 + * @is_qi: true when called from caam/qi
2892 void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
2893 - unsigned int icvsize)
2894 + unsigned int ivsize, unsigned int icvsize,
2897 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
2899 @@ -990,6 +1569,18 @@ void cnstr_shdsc_rfc4543_decap(u32 * con
2900 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
2901 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
2904 + /* assoclen is not needed, skip it */
2905 + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
2907 + /* Read salt and IV */
2908 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
2909 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
2911 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
2912 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
2915 /* assoclen + cryptlen = seqoutlen */
2916 append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
2918 @@ -1075,7 +1666,7 @@ void cnstr_shdsc_ablkcipher_encap(u32 *
2920 /* Load nonce into CONTEXT1 reg */
2922 - u8 *nonce = cdata->key_virt + cdata->keylen;
2923 + const u8 *nonce = cdata->key_virt + cdata->keylen;
2925 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
2926 LDST_CLASS_IND_CCB |
2927 @@ -1140,7 +1731,7 @@ void cnstr_shdsc_ablkcipher_decap(u32 *
2929 /* Load nonce into CONTEXT1 reg */
2931 - u8 *nonce = cdata->key_virt + cdata->keylen;
2932 + const u8 *nonce = cdata->key_virt + cdata->keylen;
2934 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
2935 LDST_CLASS_IND_CCB |
2936 @@ -1209,7 +1800,7 @@ void cnstr_shdsc_ablkcipher_givencap(u32
2938 /* Load Nonce into CONTEXT1 reg */
2940 - u8 *nonce = cdata->key_virt + cdata->keylen;
2941 + const u8 *nonce = cdata->key_virt + cdata->keylen;
2943 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
2944 LDST_CLASS_IND_CCB |
2945 --- a/drivers/crypto/caam/caamalg_desc.h
2946 +++ b/drivers/crypto/caam/caamalg_desc.h
2948 #define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
2949 #define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
2951 +#define DESC_TLS_BASE (4 * CAAM_CMD_SZ)
2952 +#define DESC_TLS10_ENC_LEN (DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
2954 /* Note: Nonce is counted in cdata.keylen */
2955 #define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
2958 #define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
2959 #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
2960 #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
2961 +#define DESC_QI_GCM_ENC_LEN (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
2962 +#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
2964 #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
2965 #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
2966 #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
2967 +#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
2968 +#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
2970 #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
2971 #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
2972 #define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
2973 +#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
2974 +#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
2976 #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
2977 #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
2981 void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
2982 - unsigned int icvsize);
2983 + unsigned int icvsize, int era);
2985 void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
2986 - unsigned int icvsize);
2987 + unsigned int icvsize, int era);
2989 void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
2990 struct alginfo *adata, unsigned int ivsize,
2991 unsigned int icvsize, const bool is_rfc3686,
2992 u32 *nonce, const u32 ctx1_iv_off,
2993 - const bool is_qi);
2994 + const bool is_qi, int era);
2996 void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
2997 struct alginfo *adata, unsigned int ivsize,
2998 unsigned int icvsize, const bool geniv,
2999 const bool is_rfc3686, u32 *nonce,
3000 - const u32 ctx1_iv_off, const bool is_qi);
3001 + const u32 ctx1_iv_off, const bool is_qi, int era);
3003 void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
3004 struct alginfo *adata, unsigned int ivsize,
3005 unsigned int icvsize, const bool is_rfc3686,
3006 u32 *nonce, const u32 ctx1_iv_off,
3007 - const bool is_qi);
3008 + const bool is_qi, int era);
3010 +void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
3011 + struct alginfo *adata, unsigned int assoclen,
3012 + unsigned int ivsize, unsigned int authsize,
3013 + unsigned int blocksize, int era);
3015 +void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
3016 + struct alginfo *adata, unsigned int assoclen,
3017 + unsigned int ivsize, unsigned int authsize,
3018 + unsigned int blocksize, int era);
3020 void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
3021 - unsigned int icvsize);
3022 + unsigned int ivsize, unsigned int icvsize,
3023 + const bool is_qi);
3025 void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
3026 - unsigned int icvsize);
3027 + unsigned int ivsize, unsigned int icvsize,
3028 + const bool is_qi);
3030 void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
3031 - unsigned int icvsize);
3032 + unsigned int ivsize, unsigned int icvsize,
3033 + const bool is_qi);
3035 void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
3036 - unsigned int icvsize);
3037 + unsigned int ivsize, unsigned int icvsize,
3038 + const bool is_qi);
3040 void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
3041 - unsigned int icvsize);
3042 + unsigned int ivsize, unsigned int icvsize,
3043 + const bool is_qi);
3045 void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
3046 - unsigned int icvsize);
3047 + unsigned int ivsize, unsigned int icvsize,
3048 + const bool is_qi);
3050 void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
3051 unsigned int ivsize, const bool is_rfc3686,
3052 --- a/drivers/crypto/caam/caamalg_qi.c
3053 +++ b/drivers/crypto/caam/caamalg_qi.c
3062 #include "desc_constr.h"
3063 @@ -53,6 +53,7 @@ struct caam_ctx {
3064 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
3065 u8 key[CAAM_MAX_KEY_SIZE];
3067 + enum dma_data_direction dir;
3068 struct alginfo adata;
3069 struct alginfo cdata;
3070 unsigned int authsize;
3071 @@ -74,6 +75,7 @@ static int aead_set_sh_desc(struct crypt
3072 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
3073 OP_ALG_AAI_CTR_MOD128);
3074 const bool is_rfc3686 = alg->caam.rfc3686;
3075 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
3077 if (!ctx->cdata.keylen || !ctx->authsize)
3079 @@ -124,7 +126,7 @@ static int aead_set_sh_desc(struct crypt
3081 cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
3082 ivsize, ctx->authsize, is_rfc3686, nonce,
3083 - ctx1_iv_off, true);
3084 + ctx1_iv_off, true, ctrlpriv->era);
3087 /* aead_decrypt shared descriptor */
3088 @@ -149,7 +151,8 @@ skip_enc:
3090 cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
3091 ivsize, ctx->authsize, alg->caam.geniv,
3092 - is_rfc3686, nonce, ctx1_iv_off, true);
3093 + is_rfc3686, nonce, ctx1_iv_off, true,
3096 if (!alg->caam.geniv)
3098 @@ -176,7 +179,7 @@ skip_enc:
3100 cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
3101 ivsize, ctx->authsize, is_rfc3686, nonce,
3102 - ctx1_iv_off, true);
3103 + ctx1_iv_off, true, ctrlpriv->era);
3107 @@ -197,6 +200,7 @@ static int aead_setkey(struct crypto_aea
3109 struct caam_ctx *ctx = crypto_aead_ctx(aead);
3110 struct device *jrdev = ctx->jrdev;
3111 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
3112 struct crypto_authenc_keys keys;
3115 @@ -211,6 +215,27 @@ static int aead_setkey(struct crypto_aea
3116 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
3120 + * If DKP is supported, use it in the shared descriptor to generate
3123 + if (ctrlpriv->era >= 6) {
3124 + ctx->adata.keylen = keys.authkeylen;
3125 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3126 + OP_ALG_ALGSEL_MASK);
3128 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
3131 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
3132 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
3134 + dma_sync_single_for_device(jrdev, ctx->key_dma,
3135 + ctx->adata.keylen_pad +
3136 + keys.enckeylen, ctx->dir);
3137 + goto skip_split_key;
3140 ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
3141 keys.authkeylen, CAAM_MAX_KEY_SIZE -
3143 @@ -220,13 +245,14 @@ static int aead_setkey(struct crypto_aea
3144 /* postpend encryption key to auth split key */
3145 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
3146 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
3147 - keys.enckeylen, DMA_TO_DEVICE);
3148 + keys.enckeylen, ctx->dir);
3150 print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
3151 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
3152 ctx->adata.keylen_pad + keys.enckeylen, 1);
3156 ctx->cdata.keylen = keys.enckeylen;
3158 ret = aead_set_sh_desc(aead);
3159 @@ -258,6 +284,468 @@ badkey:
3163 +static int tls_set_sh_desc(struct crypto_aead *tls)
3165 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
3166 + unsigned int ivsize = crypto_aead_ivsize(tls);
3167 + unsigned int blocksize = crypto_aead_blocksize(tls);
3168 + unsigned int assoclen = 13; /* always 13 bytes for TLS */
3169 + unsigned int data_len[2];
3171 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
3173 + if (!ctx->cdata.keylen || !ctx->authsize)
3177 + * TLS 1.0 encrypt shared descriptor
3178 + * Job Descriptor and Shared Descriptor
3179 + * must fit into the 64-word Descriptor h/w Buffer
3181 + data_len[0] = ctx->adata.keylen_pad;
3182 + data_len[1] = ctx->cdata.keylen;
3184 + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
3185 + &inl_mask, ARRAY_SIZE(data_len)) < 0)
3189 + ctx->adata.key_virt = ctx->key;
3191 + ctx->adata.key_dma = ctx->key_dma;
3194 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
3196 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
3198 + ctx->adata.key_inline = !!(inl_mask & 1);
3199 + ctx->cdata.key_inline = !!(inl_mask & 2);
3201 + cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
3202 + assoclen, ivsize, ctx->authsize, blocksize,
3206 + * TLS 1.0 decrypt shared descriptor
3207 + * Keys do not fit inline, regardless of algorithms used
3209 + ctx->adata.key_inline = false;
3210 + ctx->adata.key_dma = ctx->key_dma;
3211 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
3213 + cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
3214 + assoclen, ivsize, ctx->authsize, blocksize,
3220 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
3222 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
3224 + ctx->authsize = authsize;
3225 + tls_set_sh_desc(tls);
3230 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
3231 + unsigned int keylen)
3233 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
3234 + struct device *jrdev = ctx->jrdev;
3235 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
3236 + struct crypto_authenc_keys keys;
3239 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3243 + dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
3244 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
3246 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
3247 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
3251 + * If DKP is supported, use it in the shared descriptor to generate
3254 + if (ctrlpriv->era >= 6) {
3255 + ctx->adata.keylen = keys.authkeylen;
3256 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3257 + OP_ALG_ALGSEL_MASK);
3259 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
3262 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
3263 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
3265 + dma_sync_single_for_device(jrdev, ctx->key_dma,
3266 + ctx->adata.keylen_pad +
3267 + keys.enckeylen, ctx->dir);
3268 + goto skip_split_key;
3271 + ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
3272 + keys.authkeylen, CAAM_MAX_KEY_SIZE -
3277 + /* postpend encryption key to auth split key */
3278 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
3279 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
3280 + keys.enckeylen, ctx->dir);
3283 + dev_err(jrdev, "split keylen %d split keylen padded %d\n",
3284 + ctx->adata.keylen, ctx->adata.keylen_pad);
3285 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
3286 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
3287 + ctx->adata.keylen_pad + keys.enckeylen, 1);
3291 + ctx->cdata.keylen = keys.enckeylen;
3293 + ret = tls_set_sh_desc(tls);
3297 + /* Now update the driver contexts with the new shared descriptor */
3298 + if (ctx->drv_ctx[ENCRYPT]) {
3299 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
3300 + ctx->sh_desc_enc);
3302 + dev_err(jrdev, "driver enc context update failed\n");
3307 + if (ctx->drv_ctx[DECRYPT]) {
3308 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
3309 + ctx->sh_desc_dec);
3311 + dev_err(jrdev, "driver dec context update failed\n");
3318 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
3322 +static int gcm_set_sh_desc(struct crypto_aead *aead)
3324 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
3325 + unsigned int ivsize = crypto_aead_ivsize(aead);
3326 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
3327 + ctx->cdata.keylen;
3329 + if (!ctx->cdata.keylen || !ctx->authsize)
3333 + * Job Descriptor and Shared Descriptor
3334 + * must fit into the 64-word Descriptor h/w Buffer
3336 + if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
3337 + ctx->cdata.key_inline = true;
3338 + ctx->cdata.key_virt = ctx->key;
3340 + ctx->cdata.key_inline = false;
3341 + ctx->cdata.key_dma = ctx->key_dma;
3344 + cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
3345 + ctx->authsize, true);
3348 + * Job Descriptor and Shared Descriptor
3349 + * must fit into the 64-word Descriptor h/w Buffer
3351 + if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
3352 + ctx->cdata.key_inline = true;
3353 + ctx->cdata.key_virt = ctx->key;
3355 + ctx->cdata.key_inline = false;
3356 + ctx->cdata.key_dma = ctx->key_dma;
3359 + cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
3360 + ctx->authsize, true);
3365 +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
3367 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
3369 + ctx->authsize = authsize;
3370 + gcm_set_sh_desc(authenc);
3375 +static int gcm_setkey(struct crypto_aead *aead,
3376 + const u8 *key, unsigned int keylen)
3378 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
3379 + struct device *jrdev = ctx->jrdev;
3383 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
3384 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
3387 + memcpy(ctx->key, key, keylen);
3388 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
3389 + ctx->cdata.keylen = keylen;
3391 + ret = gcm_set_sh_desc(aead);
3395 + /* Now update the driver contexts with the new shared descriptor */
3396 + if (ctx->drv_ctx[ENCRYPT]) {
3397 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
3398 + ctx->sh_desc_enc);
3400 + dev_err(jrdev, "driver enc context update failed\n");
3405 + if (ctx->drv_ctx[DECRYPT]) {
3406 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
3407 + ctx->sh_desc_dec);
3409 + dev_err(jrdev, "driver dec context update failed\n");
3417 +static int rfc4106_set_sh_desc(struct crypto_aead *aead)
3419 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
3420 + unsigned int ivsize = crypto_aead_ivsize(aead);
3421 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
3422 + ctx->cdata.keylen;
3424 + if (!ctx->cdata.keylen || !ctx->authsize)
3427 + ctx->cdata.key_virt = ctx->key;
3430 + * Job Descriptor and Shared Descriptor
3431 + * must fit into the 64-word Descriptor h/w Buffer
3433 + if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
3434 + ctx->cdata.key_inline = true;
3436 + ctx->cdata.key_inline = false;
3437 + ctx->cdata.key_dma = ctx->key_dma;
3440 + cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
3441 + ctx->authsize, true);
3444 + * Job Descriptor and Shared Descriptor
3445 + * must fit into the 64-word Descriptor h/w Buffer
3447 + if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
3448 + ctx->cdata.key_inline = true;
3450 + ctx->cdata.key_inline = false;
3451 + ctx->cdata.key_dma = ctx->key_dma;
3454 + cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
3455 + ctx->authsize, true);
3460 +static int rfc4106_setauthsize(struct crypto_aead *authenc,
3461 + unsigned int authsize)
3463 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
3465 + ctx->authsize = authsize;
3466 + rfc4106_set_sh_desc(authenc);
3471 +static int rfc4106_setkey(struct crypto_aead *aead,
3472 + const u8 *key, unsigned int keylen)
3474 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
3475 + struct device *jrdev = ctx->jrdev;
3482 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
3483 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
3486 + memcpy(ctx->key, key, keylen);
3488 + * The last four bytes of the key material are used as the salt value
3489 + * in the nonce. Update the AES key length.
3491 + ctx->cdata.keylen = keylen - 4;
3492 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
3495 + ret = rfc4106_set_sh_desc(aead);
3499 + /* Now update the driver contexts with the new shared descriptor */
3500 + if (ctx->drv_ctx[ENCRYPT]) {
3501 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
3502 + ctx->sh_desc_enc);
3504 + dev_err(jrdev, "driver enc context update failed\n");
3509 + if (ctx->drv_ctx[DECRYPT]) {
3510 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
3511 + ctx->sh_desc_dec);
3513 + dev_err(jrdev, "driver dec context update failed\n");
3521 +static int rfc4543_set_sh_desc(struct crypto_aead *aead)
3523 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
3524 + unsigned int ivsize = crypto_aead_ivsize(aead);
3525 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
3526 + ctx->cdata.keylen;
3528 + if (!ctx->cdata.keylen || !ctx->authsize)
3531 + ctx->cdata.key_virt = ctx->key;
3534 + * Job Descriptor and Shared Descriptor
3535 + * must fit into the 64-word Descriptor h/w Buffer
3537 + if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
3538 + ctx->cdata.key_inline = true;
3540 + ctx->cdata.key_inline = false;
3541 + ctx->cdata.key_dma = ctx->key_dma;
3544 + cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
3545 + ctx->authsize, true);
3548 + * Job Descriptor and Shared Descriptor
3549 + * must fit into the 64-word Descriptor h/w Buffer
3551 + if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
3552 + ctx->cdata.key_inline = true;
3554 + ctx->cdata.key_inline = false;
3555 + ctx->cdata.key_dma = ctx->key_dma;
3558 + cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
3559 + ctx->authsize, true);
3564 +static int rfc4543_setauthsize(struct crypto_aead *authenc,
3565 + unsigned int authsize)
3567 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
3569 + ctx->authsize = authsize;
3570 + rfc4543_set_sh_desc(authenc);
3575 +static int rfc4543_setkey(struct crypto_aead *aead,
3576 + const u8 *key, unsigned int keylen)
3578 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
3579 + struct device *jrdev = ctx->jrdev;
3586 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
3587 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
3590 + memcpy(ctx->key, key, keylen);
3592 + * The last four bytes of the key material are used as the salt value
3593 + * in the nonce. Update the AES key length.
3595 + ctx->cdata.keylen = keylen - 4;
3596 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
3599 + ret = rfc4543_set_sh_desc(aead);
3603 + /* Now update the driver contexts with the new shared descriptor */
3604 + if (ctx->drv_ctx[ENCRYPT]) {
3605 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
3606 + ctx->sh_desc_enc);
3608 + dev_err(jrdev, "driver enc context update failed\n");
3613 + if (ctx->drv_ctx[DECRYPT]) {
3614 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
3615 + ctx->sh_desc_dec);
3617 + dev_err(jrdev, "driver dec context update failed\n");
3625 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
3626 const u8 *key, unsigned int keylen)
3628 @@ -414,6 +902,29 @@ struct aead_edesc {
3632 + * tls_edesc - s/w-extended tls descriptor
3633 + * @src_nents: number of segments in input scatterlist
3634 + * @dst_nents: number of segments in output scatterlist
3635 + * @iv_dma: dma address of iv for checking continuity and link table
3636 + * @qm_sg_bytes: length of dma mapped h/w link table
3637 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
3638 + * @qm_sg_dma: bus physical mapped address of h/w link table
3639 + * @drv_req: driver-specific request structure
3640 + * @sgt: the h/w link table, followed by IV
3645 + dma_addr_t iv_dma;
3647 + dma_addr_t qm_sg_dma;
3648 + struct scatterlist tmp[2];
3649 + struct scatterlist *dst;
3650 + struct caam_drv_req drv_req;
3651 + struct qm_sg_entry sgt[0];
3655 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
3656 * @src_nents: number of segments in input scatterlist
3657 * @dst_nents: number of segments in output scatterlist
3658 @@ -508,6 +1019,19 @@ static void aead_unmap(struct device *de
3659 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
3662 +static void tls_unmap(struct device *dev,
3663 + struct tls_edesc *edesc,
3664 + struct aead_request *req)
3666 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
3667 + int ivsize = crypto_aead_ivsize(aead);
3669 + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
3670 + edesc->dst_nents, edesc->iv_dma, ivsize,
3671 + edesc->drv_req.drv_ctx->op_type, edesc->qm_sg_dma,
3672 + edesc->qm_sg_bytes);
3675 static void ablkcipher_unmap(struct device *dev,
3676 struct ablkcipher_edesc *edesc,
3677 struct ablkcipher_request *req)
3678 @@ -532,8 +1056,18 @@ static void aead_done(struct caam_drv_re
3679 qidev = caam_ctx->qidev;
3681 if (unlikely(status)) {
3682 + u32 ssrc = status & JRSTA_SSRC_MASK;
3683 + u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
3685 caam_jr_strstatus(qidev, status);
3688 + * verify hw auth check passed else return -EBADMSG
3690 + if (ssrc == JRSTA_SSRC_CCB_ERROR &&
3691 + err_id == JRSTA_CCBERR_ERRID_ICVCHK)
3697 edesc = container_of(drv_req, typeof(*edesc), drv_req);
3698 @@ -785,6 +1319,260 @@ static int aead_decrypt(struct aead_requ
3699 return aead_crypt(req, false);
3702 +static int ipsec_gcm_encrypt(struct aead_request *req)
3704 + if (req->assoclen < 8)
3707 + return aead_crypt(req, true);
3710 +static int ipsec_gcm_decrypt(struct aead_request *req)
3712 + if (req->assoclen < 8)
3715 + return aead_crypt(req, false);
3718 +static void tls_done(struct caam_drv_req *drv_req, u32 status)
3720 + struct device *qidev;
3721 + struct tls_edesc *edesc;
3722 + struct aead_request *aead_req = drv_req->app_ctx;
3723 + struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
3724 + struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
3727 + qidev = caam_ctx->qidev;
3729 + if (unlikely(status)) {
3730 + caam_jr_strstatus(qidev, status);
3734 + edesc = container_of(drv_req, typeof(*edesc), drv_req);
3735 + tls_unmap(qidev, edesc, aead_req);
3737 + aead_request_complete(aead_req, ecode);
3738 + qi_cache_free(edesc);
3742 + * allocate and map the tls extended descriptor
3744 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
3746 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
3747 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
3748 + unsigned int blocksize = crypto_aead_blocksize(aead);
3749 + unsigned int padsize, authsize;
3750 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
3751 + typeof(*alg), aead);
3752 + struct device *qidev = ctx->qidev;
3753 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3754 + GFP_KERNEL : GFP_ATOMIC;
3755 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
3756 + struct tls_edesc *edesc;
3757 + dma_addr_t qm_sg_dma, iv_dma = 0;
3760 + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
3761 + int in_len, out_len;
3762 + struct qm_sg_entry *sg_table, *fd_sgt;
3763 + struct caam_drv_ctx *drv_ctx;
3764 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
3765 + struct scatterlist *dst;
3768 + padsize = blocksize - ((req->cryptlen + ctx->authsize) %
3770 + authsize = ctx->authsize + padsize;
3772 + authsize = ctx->authsize;
3775 + drv_ctx = get_drv_ctx(ctx, op_type);
3776 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
3777 + return (struct tls_edesc *)drv_ctx;
3779 + /* allocate space for base edesc, link tables and IV */
3780 + edesc = qi_cache_alloc(GFP_DMA | flags);
3781 + if (unlikely(!edesc)) {
3782 + dev_err(qidev, "could not allocate extended descriptor\n");
3783 + return ERR_PTR(-ENOMEM);
3786 + if (likely(req->src == req->dst)) {
3787 + src_nents = sg_nents_for_len(req->src, req->assoclen +
3789 + (encrypt ? authsize : 0));
3790 + if (unlikely(src_nents < 0)) {
3791 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
3792 + req->assoclen + req->cryptlen +
3793 + (encrypt ? authsize : 0));
3794 + qi_cache_free(edesc);
3795 + return ERR_PTR(src_nents);
3798 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
3799 + DMA_BIDIRECTIONAL);
3800 + if (unlikely(!mapped_src_nents)) {
3801 + dev_err(qidev, "unable to map source\n");
3802 + qi_cache_free(edesc);
3803 + return ERR_PTR(-ENOMEM);
3807 + src_nents = sg_nents_for_len(req->src, req->assoclen +
3809 + if (unlikely(src_nents < 0)) {
3810 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
3811 + req->assoclen + req->cryptlen);
3812 + qi_cache_free(edesc);
3813 + return ERR_PTR(src_nents);
3816 + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
3817 + dst_nents = sg_nents_for_len(dst, req->cryptlen +
3818 + (encrypt ? authsize : 0));
3819 + if (unlikely(dst_nents < 0)) {
3820 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
3822 + (encrypt ? authsize : 0));
3823 + qi_cache_free(edesc);
3824 + return ERR_PTR(dst_nents);
3828 + mapped_src_nents = dma_map_sg(qidev, req->src,
3829 + src_nents, DMA_TO_DEVICE);
3830 + if (unlikely(!mapped_src_nents)) {
3831 + dev_err(qidev, "unable to map source\n");
3832 + qi_cache_free(edesc);
3833 + return ERR_PTR(-ENOMEM);
3836 + mapped_src_nents = 0;
3839 + mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
3841 + if (unlikely(!mapped_dst_nents)) {
3842 + dev_err(qidev, "unable to map destination\n");
3843 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
3844 + qi_cache_free(edesc);
3845 + return ERR_PTR(-ENOMEM);
3850 + * Create S/G table: IV, src, dst.
3851 + * Input is not contiguous.
3853 + qm_sg_ents = 1 + mapped_src_nents +
3854 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
3855 + sg_table = &edesc->sgt[0];
3856 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
3858 + ivsize = crypto_aead_ivsize(aead);
3859 + iv = (u8 *)(sg_table + qm_sg_ents);
3860 + /* Make sure IV is located in a DMAable area */
3861 + memcpy(iv, req->iv, ivsize);
3862 + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
3863 + if (dma_mapping_error(qidev, iv_dma)) {
3864 + dev_err(qidev, "unable to map IV\n");
3865 + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0, 0,
3867 + qi_cache_free(edesc);
3868 + return ERR_PTR(-ENOMEM);
3871 + edesc->src_nents = src_nents;
3872 + edesc->dst_nents = dst_nents;
3874 + edesc->iv_dma = iv_dma;
3875 + edesc->drv_req.app_ctx = req;
3876 + edesc->drv_req.cbk = tls_done;
3877 + edesc->drv_req.drv_ctx = drv_ctx;
3879 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
3882 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
3883 + qm_sg_index += mapped_src_nents;
3885 + if (mapped_dst_nents > 1)
3886 + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
3889 + qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
3890 + if (dma_mapping_error(qidev, qm_sg_dma)) {
3891 + dev_err(qidev, "unable to map S/G table\n");
3892 + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
3893 + ivsize, op_type, 0, 0);
3894 + qi_cache_free(edesc);
3895 + return ERR_PTR(-ENOMEM);
3898 + edesc->qm_sg_dma = qm_sg_dma;
3899 + edesc->qm_sg_bytes = qm_sg_bytes;
3901 + out_len = req->cryptlen + (encrypt ? authsize : 0);
3902 + in_len = ivsize + req->assoclen + req->cryptlen;
3904 + fd_sgt = &edesc->drv_req.fd_sgt[0];
3906 + dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
3908 + if (req->dst == req->src)
3909 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
3910 + (sg_nents_for_len(req->src, req->assoclen) +
3911 + 1) * sizeof(*sg_table), out_len, 0);
3912 + else if (mapped_dst_nents == 1)
3913 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
3915 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
3916 + qm_sg_index, out_len, 0);
3921 +static int tls_crypt(struct aead_request *req, bool encrypt)
3923 + struct tls_edesc *edesc;
3924 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
3925 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
3928 + if (unlikely(caam_congested))
3931 + edesc = tls_edesc_alloc(req, encrypt);
3932 + if (IS_ERR_OR_NULL(edesc))
3933 + return PTR_ERR(edesc);
3935 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
3937 + ret = -EINPROGRESS;
3939 + tls_unmap(ctx->qidev, edesc, req);
3940 + qi_cache_free(edesc);
3946 +static int tls_encrypt(struct aead_request *req)
3948 + return tls_crypt(req, true);
3951 +static int tls_decrypt(struct aead_request *req)
3953 + return tls_crypt(req, false);
3956 static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
3958 struct ablkcipher_edesc *edesc;
3959 @@ -1308,6 +2096,61 @@ static struct caam_alg_template driver_a
3962 static struct caam_aead_alg driver_aeads[] = {
3966 + .cra_name = "rfc4106(gcm(aes))",
3967 + .cra_driver_name = "rfc4106-gcm-aes-caam-qi",
3968 + .cra_blocksize = 1,
3970 + .setkey = rfc4106_setkey,
3971 + .setauthsize = rfc4106_setauthsize,
3972 + .encrypt = ipsec_gcm_encrypt,
3973 + .decrypt = ipsec_gcm_decrypt,
3975 + .maxauthsize = AES_BLOCK_SIZE,
3978 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3984 + .cra_name = "rfc4543(gcm(aes))",
3985 + .cra_driver_name = "rfc4543-gcm-aes-caam-qi",
3986 + .cra_blocksize = 1,
3988 + .setkey = rfc4543_setkey,
3989 + .setauthsize = rfc4543_setauthsize,
3990 + .encrypt = ipsec_gcm_encrypt,
3991 + .decrypt = ipsec_gcm_decrypt,
3993 + .maxauthsize = AES_BLOCK_SIZE,
3996 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3999 + /* Galois Counter Mode */
4003 + .cra_name = "gcm(aes)",
4004 + .cra_driver_name = "gcm-aes-caam-qi",
4005 + .cra_blocksize = 1,
4007 + .setkey = gcm_setkey,
4008 + .setauthsize = gcm_setauthsize,
4009 + .encrypt = aead_encrypt,
4010 + .decrypt = aead_decrypt,
4012 + .maxauthsize = AES_BLOCK_SIZE,
4015 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4018 /* single-pass ipsec_esp descriptor */
4021 @@ -2118,6 +2961,26 @@ static struct caam_aead_alg driver_aeads
4028 + .cra_name = "tls10(hmac(sha1),cbc(aes))",
4029 + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi",
4030 + .cra_blocksize = AES_BLOCK_SIZE,
4032 + .setkey = tls_setkey,
4033 + .setauthsize = tls_setauthsize,
4034 + .encrypt = tls_encrypt,
4035 + .decrypt = tls_decrypt,
4036 + .ivsize = AES_BLOCK_SIZE,
4037 + .maxauthsize = SHA1_DIGEST_SIZE,
4040 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
4041 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4042 + OP_ALG_AAI_HMAC_PRECOMP,
4047 struct caam_crypto_alg {
4048 @@ -2126,9 +2989,20 @@ struct caam_crypto_alg {
4049 struct caam_alg_entry caam;
4052 -static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
4053 +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
4056 struct caam_drv_private *priv;
4057 + /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
4058 + static const u8 digest_size[] = {
4061 + SHA224_DIGEST_SIZE,
4062 + SHA256_DIGEST_SIZE,
4063 + SHA384_DIGEST_SIZE,
4064 + SHA512_DIGEST_SIZE
4069 * distribute tfms across job rings to ensure in-order
4070 @@ -2140,8 +3014,14 @@ static int caam_init_common(struct caam_
4071 return PTR_ERR(ctx->jrdev);
4074 + priv = dev_get_drvdata(ctx->jrdev->parent);
4075 + if (priv->era >= 6 && uses_dkp)
4076 + ctx->dir = DMA_BIDIRECTIONAL;
4078 + ctx->dir = DMA_TO_DEVICE;
4080 ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
4083 if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
4084 dev_err(ctx->jrdev, "unable to map key\n");
4085 caam_jr_free(ctx->jrdev);
4086 @@ -2152,7 +3032,22 @@ static int caam_init_common(struct caam_
4087 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
4088 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
4090 - priv = dev_get_drvdata(ctx->jrdev->parent);
4091 + if (ctx->adata.algtype) {
4092 + op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK)
4093 + >> OP_ALG_ALGSEL_SHIFT;
4094 + if (op_id < ARRAY_SIZE(digest_size)) {
4095 + ctx->authsize = digest_size[op_id];
4097 + dev_err(ctx->jrdev,
4098 + "incorrect op_id %d; must be less than %zu\n",
4099 + op_id, ARRAY_SIZE(digest_size));
4100 + caam_jr_free(ctx->jrdev);
4104 + ctx->authsize = 0;
4107 ctx->qidev = priv->qidev;
4109 spin_lock_init(&ctx->lock);
4110 @@ -2170,7 +3065,7 @@ static int caam_cra_init(struct crypto_t
4112 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
4114 - return caam_init_common(ctx, &caam_alg->caam);
4115 + return caam_init_common(ctx, &caam_alg->caam, false);
4118 static int caam_aead_init(struct crypto_aead *tfm)
4119 @@ -2180,7 +3075,9 @@ static int caam_aead_init(struct crypto_
4121 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
4123 - return caam_init_common(ctx, &caam_alg->caam);
4124 + return caam_init_common(ctx, &caam_alg->caam,
4125 + (alg->setkey == aead_setkey) ||
4126 + (alg->setkey == tls_setkey));
4129 static void caam_exit_common(struct caam_ctx *ctx)
4130 @@ -2189,8 +3086,7 @@ static void caam_exit_common(struct caam
4131 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
4132 caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
4134 - dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
4136 + dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
4138 caam_jr_free(ctx->jrdev);
4140 @@ -2315,6 +3211,11 @@ static int __init caam_qi_algapi_init(vo
4141 if (!priv || !priv->qi_present)
4145 + dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
4149 INIT_LIST_HEAD(&alg_list);
4153 +++ b/drivers/crypto/caam/caamalg_qi2.c
4155 +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
4157 + * Copyright 2015-2016 Freescale Semiconductor Inc.
4158 + * Copyright 2017-2018 NXP
4161 +#include <linux/fsl/mc.h>
4162 +#include "compat.h"
4164 +#include "caamalg_qi2.h"
4165 +#include "dpseci_cmd.h"
4166 +#include "desc_constr.h"
4168 +#include "sg_sw_sec4.h"
4169 +#include "sg_sw_qm2.h"
4170 +#include "key_gen.h"
4171 +#include "caamalg_desc.h"
4172 +#include "caamhash_desc.h"
4173 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
4174 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
4176 +#define CAAM_CRA_PRIORITY 2000
4178 +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
4179 +#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
4180 + SHA512_DIGEST_SIZE * 2)
4182 +#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM
4183 +bool caam_little_end;
4184 +EXPORT_SYMBOL(caam_little_end);
4186 +EXPORT_SYMBOL(caam_imx);
4190 + * This is a a cache of buffers, from which the users of CAAM QI driver
4191 + * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
4192 + * NOTE: A more elegant solution would be to have some headroom in the frames
4193 + * being processed. This can be added by the dpaa2-eth driver. This would
4194 + * pose a problem for userspace application processing which cannot
4195 + * know of this limitation. So for now, this will work.
4196 + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
4198 +static struct kmem_cache *qi_cache;
4200 +struct caam_alg_entry {
4201 + struct device *dev;
4202 + int class1_alg_type;
4203 + int class2_alg_type;
4208 +struct caam_aead_alg {
4209 + struct aead_alg aead;
4210 + struct caam_alg_entry caam;
4214 +struct caam_skcipher_alg {
4215 + struct skcipher_alg skcipher;
4216 + struct caam_alg_entry caam;
4221 + * caam_ctx - per-session context
4222 + * @flc: Flow Contexts array
4223 + * @key: virtual address of the key(s): [authentication key], encryption key
4224 + * @flc_dma: I/O virtual addresses of the Flow Contexts
4225 + * @key_dma: I/O virtual address of the key
4226 + * @dir: DMA direction for mapping key and Flow Contexts
4227 + * @dev: dpseci device
4228 + * @adata: authentication algorithm details
4229 + * @cdata: encryption algorithm details
4230 + * @authsize: authentication tag (a.k.a. ICV / MAC) size
4233 + struct caam_flc flc[NUM_OP];
4234 + u8 key[CAAM_MAX_KEY_SIZE];
4235 + dma_addr_t flc_dma[NUM_OP];
4236 + dma_addr_t key_dma;
4237 + enum dma_data_direction dir;
4238 + struct device *dev;
4239 + struct alginfo adata;
4240 + struct alginfo cdata;
4241 + unsigned int authsize;
4244 +void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
4245 + dma_addr_t iova_addr)
4247 + phys_addr_t phys_addr;
4249 + phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
4252 + return phys_to_virt(phys_addr);
4256 + * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
4258 + * Allocate data on the hotpath. Instead of using kzalloc, one can use the
4259 + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
4260 + * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
4261 + * hosting 16 SG entries.
4263 + * @flags - flags that would be used for the equivalent kmalloc(..) call
4265 + * Returns a pointer to a retrieved buffer on success or NULL on failure.
4267 +static inline void *qi_cache_zalloc(gfp_t flags)
4269 + return kmem_cache_zalloc(qi_cache, flags);
4273 + * qi_cache_free - Frees buffers allocated from CAAM-QI cache
4275 + * @obj - buffer previously allocated by qi_cache_zalloc
4277 + * No checking is being done, the call is a passthrough call to
4278 + * kmem_cache_free(...)
4280 +static inline void qi_cache_free(void *obj)
4282 + kmem_cache_free(qi_cache, obj);
4285 +static struct caam_request *to_caam_req(struct crypto_async_request *areq)
4287 + switch (crypto_tfm_alg_type(areq->tfm)) {
4288 + case CRYPTO_ALG_TYPE_SKCIPHER:
4289 + return skcipher_request_ctx(skcipher_request_cast(areq));
4290 + case CRYPTO_ALG_TYPE_AEAD:
4291 + return aead_request_ctx(container_of(areq, struct aead_request,
4293 + case CRYPTO_ALG_TYPE_AHASH:
4294 + return ahash_request_ctx(ahash_request_cast(areq));
4296 + return ERR_PTR(-EINVAL);
4300 +static void caam_unmap(struct device *dev, struct scatterlist *src,
4301 + struct scatterlist *dst, int src_nents,
4302 + int dst_nents, dma_addr_t iv_dma, int ivsize,
4303 + dma_addr_t qm_sg_dma, int qm_sg_bytes)
4307 + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
4308 + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
4310 + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
4314 + dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
4317 + dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
4320 +static int aead_set_sh_desc(struct crypto_aead *aead)
4322 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
4323 + typeof(*alg), aead);
4324 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4325 + unsigned int ivsize = crypto_aead_ivsize(aead);
4326 + struct device *dev = ctx->dev;
4327 + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
4328 + struct caam_flc *flc;
4330 + u32 ctx1_iv_off = 0;
4331 + u32 *nonce = NULL;
4332 + unsigned int data_len[2];
4334 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
4335 + OP_ALG_AAI_CTR_MOD128);
4336 + const bool is_rfc3686 = alg->caam.rfc3686;
4338 + if (!ctx->cdata.keylen || !ctx->authsize)
4342 + * AES-CTR needs to load IV in CONTEXT1 reg
4343 + * at an offset of 128bits (16bytes)
4344 + * CONTEXT1[255:128] = IV
4350 + * RFC3686 specific:
4351 + * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
4354 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
4355 + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
4356 + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
4359 + data_len[0] = ctx->adata.keylen_pad;
4360 + data_len[1] = ctx->cdata.keylen;
4362 + /* aead_encrypt shared descriptor */
4363 + if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
4364 + DESC_QI_AEAD_ENC_LEN) +
4365 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
4366 + DESC_JOB_IO_LEN, data_len, &inl_mask,
4367 + ARRAY_SIZE(data_len)) < 0)
4371 + ctx->adata.key_virt = ctx->key;
4373 + ctx->adata.key_dma = ctx->key_dma;
4376 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
4378 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
4380 + ctx->adata.key_inline = !!(inl_mask & 1);
4381 + ctx->cdata.key_inline = !!(inl_mask & 2);
4383 + flc = &ctx->flc[ENCRYPT];
4384 + desc = flc->sh_desc;
4386 + if (alg->caam.geniv)
4387 + cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
4388 + ivsize, ctx->authsize, is_rfc3686,
4389 + nonce, ctx1_iv_off, true,
4390 + priv->sec_attr.era);
4392 + cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
4393 + ivsize, ctx->authsize, is_rfc3686, nonce,
4394 + ctx1_iv_off, true, priv->sec_attr.era);
4396 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
4397 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
4398 + sizeof(flc->flc) + desc_bytes(desc),
4401 + /* aead_decrypt shared descriptor */
4402 + if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
4403 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
4404 + DESC_JOB_IO_LEN, data_len, &inl_mask,
4405 + ARRAY_SIZE(data_len)) < 0)
4409 + ctx->adata.key_virt = ctx->key;
4411 + ctx->adata.key_dma = ctx->key_dma;
4414 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
4416 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
4418 + ctx->adata.key_inline = !!(inl_mask & 1);
4419 + ctx->cdata.key_inline = !!(inl_mask & 2);
4421 + flc = &ctx->flc[DECRYPT];
4422 + desc = flc->sh_desc;
4423 + cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
4424 + ivsize, ctx->authsize, alg->caam.geniv,
4425 + is_rfc3686, nonce, ctx1_iv_off, true,
4426 + priv->sec_attr.era);
4427 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
4428 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
4429 + sizeof(flc->flc) + desc_bytes(desc),
4435 +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
4437 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
4439 + ctx->authsize = authsize;
4440 + aead_set_sh_desc(authenc);
4445 +struct split_key_sh_result {
4446 + struct completion completion;
4448 + struct device *dev;
4451 +static void split_key_sh_done(void *cbk_ctx, u32 err)
4453 + struct split_key_sh_result *res = cbk_ctx;
4456 + dev_err(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
4460 + caam_qi2_strstatus(res->dev, err);
4463 + complete(&res->completion);
4466 +static int aead_setkey(struct crypto_aead *aead, const u8 *key,
4467 + unsigned int keylen)
4469 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4470 + struct device *dev = ctx->dev;
4471 + struct crypto_authenc_keys keys;
4473 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
4477 + dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
4478 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
4480 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
4481 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
4484 + ctx->adata.keylen = keys.authkeylen;
4485 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
4486 + OP_ALG_ALGSEL_MASK);
4488 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
4491 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
4492 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
4493 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
4494 + keys.enckeylen, ctx->dir);
4496 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
4497 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
4498 + ctx->adata.keylen_pad + keys.enckeylen, 1);
4501 + ctx->cdata.keylen = keys.enckeylen;
4503 + return aead_set_sh_desc(aead);
4505 + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
4509 +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
4512 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
4513 + struct caam_request *req_ctx = aead_request_ctx(req);
4514 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4515 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4516 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4517 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
4518 + typeof(*alg), aead);
4519 + struct device *dev = ctx->dev;
4520 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4521 + GFP_KERNEL : GFP_ATOMIC;
4522 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
4523 + struct aead_edesc *edesc;
4524 + dma_addr_t qm_sg_dma, iv_dma = 0;
4526 + unsigned int authsize = ctx->authsize;
4527 + int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
4528 + int in_len, out_len;
4529 + struct dpaa2_sg_entry *sg_table;
4531 + /* allocate space for base edesc, link tables and IV */
4532 + edesc = qi_cache_zalloc(GFP_DMA | flags);
4533 + if (unlikely(!edesc)) {
4534 + dev_err(dev, "could not allocate extended descriptor\n");
4535 + return ERR_PTR(-ENOMEM);
4538 + if (unlikely(req->dst != req->src)) {
4539 + src_nents = sg_nents_for_len(req->src, req->assoclen +
4541 + if (unlikely(src_nents < 0)) {
4542 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
4543 + req->assoclen + req->cryptlen);
4544 + qi_cache_free(edesc);
4545 + return ERR_PTR(src_nents);
4548 + dst_nents = sg_nents_for_len(req->dst, req->assoclen +
4550 + (encrypt ? authsize :
4552 + if (unlikely(dst_nents < 0)) {
4553 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
4554 + req->assoclen + req->cryptlen +
4555 + (encrypt ? authsize : (-authsize)));
4556 + qi_cache_free(edesc);
4557 + return ERR_PTR(dst_nents);
4561 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
4563 + if (unlikely(!mapped_src_nents)) {
4564 + dev_err(dev, "unable to map source\n");
4565 + qi_cache_free(edesc);
4566 + return ERR_PTR(-ENOMEM);
4569 + mapped_src_nents = 0;
4572 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
4574 + if (unlikely(!mapped_dst_nents)) {
4575 + dev_err(dev, "unable to map destination\n");
4576 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
4577 + qi_cache_free(edesc);
4578 + return ERR_PTR(-ENOMEM);
4581 + src_nents = sg_nents_for_len(req->src, req->assoclen +
4583 + (encrypt ? authsize : 0));
4584 + if (unlikely(src_nents < 0)) {
4585 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
4586 + req->assoclen + req->cryptlen +
4587 + (encrypt ? authsize : 0));
4588 + qi_cache_free(edesc);
4589 + return ERR_PTR(src_nents);
4592 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
4593 + DMA_BIDIRECTIONAL);
4594 + if (unlikely(!mapped_src_nents)) {
4595 + dev_err(dev, "unable to map source\n");
4596 + qi_cache_free(edesc);
4597 + return ERR_PTR(-ENOMEM);
4601 + if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
4602 + ivsize = crypto_aead_ivsize(aead);
4605 + * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
4606 + * Input is not contiguous.
4608 + qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
4609 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
4610 + sg_table = &edesc->sgt[0];
4611 + qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
4612 + if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
4613 + CAAM_QI_MEMCACHE_SIZE)) {
4614 + dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
4615 + qm_sg_nents, ivsize);
4616 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
4618 + qi_cache_free(edesc);
4619 + return ERR_PTR(-ENOMEM);
4623 + u8 *iv = (u8 *)(sg_table + qm_sg_nents);
4625 + /* Make sure IV is located in a DMAable area */
4626 + memcpy(iv, req->iv, ivsize);
4628 + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
4629 + if (dma_mapping_error(dev, iv_dma)) {
4630 + dev_err(dev, "unable to map IV\n");
4631 + caam_unmap(dev, req->src, req->dst, src_nents,
4632 + dst_nents, 0, 0, 0, 0);
4633 + qi_cache_free(edesc);
4634 + return ERR_PTR(-ENOMEM);
4638 + edesc->src_nents = src_nents;
4639 + edesc->dst_nents = dst_nents;
4640 + edesc->iv_dma = iv_dma;
4642 + edesc->assoclen = cpu_to_caam32(req->assoclen);
4643 + edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
4645 + if (dma_mapping_error(dev, edesc->assoclen_dma)) {
4646 + dev_err(dev, "unable to map assoclen\n");
4647 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
4648 + iv_dma, ivsize, 0, 0);
4649 + qi_cache_free(edesc);
4650 + return ERR_PTR(-ENOMEM);
4653 + dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
4656 + dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
4659 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
4660 + qm_sg_index += mapped_src_nents;
4662 + if (mapped_dst_nents > 1)
4663 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
4666 + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
4667 + if (dma_mapping_error(dev, qm_sg_dma)) {
4668 + dev_err(dev, "unable to map S/G table\n");
4669 + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
4670 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
4671 + iv_dma, ivsize, 0, 0);
4672 + qi_cache_free(edesc);
4673 + return ERR_PTR(-ENOMEM);
4676 + edesc->qm_sg_dma = qm_sg_dma;
4677 + edesc->qm_sg_bytes = qm_sg_bytes;
4679 + out_len = req->assoclen + req->cryptlen +
4680 + (encrypt ? ctx->authsize : (-ctx->authsize));
4681 + in_len = 4 + ivsize + req->assoclen + req->cryptlen;
4683 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4684 + dpaa2_fl_set_final(in_fle, true);
4685 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4686 + dpaa2_fl_set_addr(in_fle, qm_sg_dma);
4687 + dpaa2_fl_set_len(in_fle, in_len);
4689 + if (req->dst == req->src) {
4690 + if (mapped_src_nents == 1) {
4691 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4692 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
4694 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
4695 + dpaa2_fl_set_addr(out_fle, qm_sg_dma +
4696 + (1 + !!ivsize) * sizeof(*sg_table));
4698 + } else if (mapped_dst_nents == 1) {
4699 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4700 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
4702 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
4703 + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
4704 + sizeof(*sg_table));
4707 + dpaa2_fl_set_len(out_fle, out_len);
4712 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req,
4715 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
4716 + unsigned int blocksize = crypto_aead_blocksize(tls);
4717 + unsigned int padsize, authsize;
4718 + struct caam_request *req_ctx = aead_request_ctx(req);
4719 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4720 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4721 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
4722 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(tls),
4723 + typeof(*alg), aead);
4724 + struct device *dev = ctx->dev;
4725 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4726 + GFP_KERNEL : GFP_ATOMIC;
4727 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
4728 + struct tls_edesc *edesc;
4729 + dma_addr_t qm_sg_dma, iv_dma = 0;
4732 + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
4733 + int in_len, out_len;
4734 + struct dpaa2_sg_entry *sg_table;
4735 + struct scatterlist *dst;
4738 + padsize = blocksize - ((req->cryptlen + ctx->authsize) %
4740 + authsize = ctx->authsize + padsize;
4742 + authsize = ctx->authsize;
4745 + /* allocate space for base edesc, link tables and IV */
4746 + edesc = qi_cache_zalloc(GFP_DMA | flags);
4747 + if (unlikely(!edesc)) {
4748 + dev_err(dev, "could not allocate extended descriptor\n");
4749 + return ERR_PTR(-ENOMEM);
4752 + if (likely(req->src == req->dst)) {
4753 + src_nents = sg_nents_for_len(req->src, req->assoclen +
4755 + (encrypt ? authsize : 0));
4756 + if (unlikely(src_nents < 0)) {
4757 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
4758 + req->assoclen + req->cryptlen +
4759 + (encrypt ? authsize : 0));
4760 + qi_cache_free(edesc);
4761 + return ERR_PTR(src_nents);
4764 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
4765 + DMA_BIDIRECTIONAL);
4766 + if (unlikely(!mapped_src_nents)) {
4767 + dev_err(dev, "unable to map source\n");
4768 + qi_cache_free(edesc);
4769 + return ERR_PTR(-ENOMEM);
4773 + src_nents = sg_nents_for_len(req->src, req->assoclen +
4775 + if (unlikely(src_nents < 0)) {
4776 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
4777 + req->assoclen + req->cryptlen);
4778 + qi_cache_free(edesc);
4779 + return ERR_PTR(src_nents);
4782 + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
4783 + dst_nents = sg_nents_for_len(dst, req->cryptlen +
4784 + (encrypt ? authsize : 0));
4785 + if (unlikely(dst_nents < 0)) {
4786 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
4788 + (encrypt ? authsize : 0));
4789 + qi_cache_free(edesc);
4790 + return ERR_PTR(dst_nents);
4794 + mapped_src_nents = dma_map_sg(dev, req->src,
4795 + src_nents, DMA_TO_DEVICE);
4796 + if (unlikely(!mapped_src_nents)) {
4797 + dev_err(dev, "unable to map source\n");
4798 + qi_cache_free(edesc);
4799 + return ERR_PTR(-ENOMEM);
4802 + mapped_src_nents = 0;
4805 + mapped_dst_nents = dma_map_sg(dev, dst, dst_nents,
4807 + if (unlikely(!mapped_dst_nents)) {
4808 + dev_err(dev, "unable to map destination\n");
4809 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
4810 + qi_cache_free(edesc);
4811 + return ERR_PTR(-ENOMEM);
4816 + * Create S/G table: IV, src, dst.
4817 + * Input is not contiguous.
4819 + qm_sg_ents = 1 + mapped_src_nents +
4820 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
4821 + sg_table = &edesc->sgt[0];
4822 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
4824 + ivsize = crypto_aead_ivsize(tls);
4825 + iv = (u8 *)(sg_table + qm_sg_ents);
4826 + /* Make sure IV is located in a DMAable area */
4827 + memcpy(iv, req->iv, ivsize);
4828 + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
4829 + if (dma_mapping_error(dev, iv_dma)) {
4830 + dev_err(dev, "unable to map IV\n");
4831 + caam_unmap(dev, req->src, dst, src_nents, dst_nents, 0, 0, 0,
4833 + qi_cache_free(edesc);
4834 + return ERR_PTR(-ENOMEM);
4837 + edesc->src_nents = src_nents;
4838 + edesc->dst_nents = dst_nents;
4840 + edesc->iv_dma = iv_dma;
4842 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
4845 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
4846 + qm_sg_index += mapped_src_nents;
4848 + if (mapped_dst_nents > 1)
4849 + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
4852 + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
4853 + if (dma_mapping_error(dev, qm_sg_dma)) {
4854 + dev_err(dev, "unable to map S/G table\n");
4855 + caam_unmap(dev, req->src, dst, src_nents, dst_nents, iv_dma,
4857 + qi_cache_free(edesc);
4858 + return ERR_PTR(-ENOMEM);
4861 + edesc->qm_sg_dma = qm_sg_dma;
4862 + edesc->qm_sg_bytes = qm_sg_bytes;
4864 + out_len = req->cryptlen + (encrypt ? authsize : 0);
4865 + in_len = ivsize + req->assoclen + req->cryptlen;
4867 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4868 + dpaa2_fl_set_final(in_fle, true);
4869 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4870 + dpaa2_fl_set_addr(in_fle, qm_sg_dma);
4871 + dpaa2_fl_set_len(in_fle, in_len);
4873 + if (req->dst == req->src) {
4874 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
4875 + dpaa2_fl_set_addr(out_fle, qm_sg_dma +
4876 + (sg_nents_for_len(req->src, req->assoclen) +
4877 + 1) * sizeof(*sg_table));
4878 + } else if (mapped_dst_nents == 1) {
4879 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4880 + dpaa2_fl_set_addr(out_fle, sg_dma_address(dst));
4882 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
4883 + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
4884 + sizeof(*sg_table));
4887 + dpaa2_fl_set_len(out_fle, out_len);
4892 +static int tls_set_sh_desc(struct crypto_aead *tls)
4894 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
4895 + unsigned int ivsize = crypto_aead_ivsize(tls);
4896 + unsigned int blocksize = crypto_aead_blocksize(tls);
4897 + struct device *dev = ctx->dev;
4898 + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
4899 + struct caam_flc *flc;
4901 + unsigned int assoclen = 13; /* always 13 bytes for TLS */
4902 + unsigned int data_len[2];
4905 + if (!ctx->cdata.keylen || !ctx->authsize)
4909 + * TLS 1.0 encrypt shared descriptor
4910 + * Job Descriptor and Shared Descriptor
4911 + * must fit into the 64-word Descriptor h/w Buffer
4913 + data_len[0] = ctx->adata.keylen_pad;
4914 + data_len[1] = ctx->cdata.keylen;
4916 + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
4917 + &inl_mask, ARRAY_SIZE(data_len)) < 0)
4921 + ctx->adata.key_virt = ctx->key;
4923 + ctx->adata.key_dma = ctx->key_dma;
4926 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
4928 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
4930 + ctx->adata.key_inline = !!(inl_mask & 1);
4931 + ctx->cdata.key_inline = !!(inl_mask & 2);
4933 + flc = &ctx->flc[ENCRYPT];
4934 + desc = flc->sh_desc;
4935 + cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata,
4936 + assoclen, ivsize, ctx->authsize, blocksize,
4937 + priv->sec_attr.era);
4938 + flc->flc[1] = cpu_to_caam32(desc_len(desc));
4939 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
4940 + sizeof(flc->flc) + desc_bytes(desc),
4944 + * TLS 1.0 decrypt shared descriptor
4945 + * Keys do not fit inline, regardless of algorithms used
4947 + ctx->adata.key_inline = false;
4948 + ctx->adata.key_dma = ctx->key_dma;
4949 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
4951 + flc = &ctx->flc[DECRYPT];
4952 + desc = flc->sh_desc;
4953 + cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize,
4954 + ctx->authsize, blocksize, priv->sec_attr.era);
4955 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
4956 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
4957 + sizeof(flc->flc) + desc_bytes(desc),
4963 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
4964 + unsigned int keylen)
4966 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
4967 + struct device *dev = ctx->dev;
4968 + struct crypto_authenc_keys keys;
4970 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
4974 + dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
4975 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
4977 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
4978 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
4981 + ctx->adata.keylen = keys.authkeylen;
4982 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
4983 + OP_ALG_ALGSEL_MASK);
4985 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
4988 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
4989 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
4990 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
4991 + keys.enckeylen, ctx->dir);
4993 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
4994 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
4995 + ctx->adata.keylen_pad + keys.enckeylen, 1);
4998 + ctx->cdata.keylen = keys.enckeylen;
5000 + return tls_set_sh_desc(tls);
5002 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
5006 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
5008 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
5010 + ctx->authsize = authsize;
5011 + tls_set_sh_desc(tls);
5016 +static int gcm_set_sh_desc(struct crypto_aead *aead)
5018 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5019 + struct device *dev = ctx->dev;
5020 + unsigned int ivsize = crypto_aead_ivsize(aead);
5021 + struct caam_flc *flc;
5023 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
5024 + ctx->cdata.keylen;
5026 + if (!ctx->cdata.keylen || !ctx->authsize)
5030 + * AES GCM encrypt shared descriptor
5031 + * Job Descriptor and Shared Descriptor
5032 + * must fit into the 64-word Descriptor h/w Buffer
5034 + if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
5035 + ctx->cdata.key_inline = true;
5036 + ctx->cdata.key_virt = ctx->key;
5038 + ctx->cdata.key_inline = false;
5039 + ctx->cdata.key_dma = ctx->key_dma;
5042 + flc = &ctx->flc[ENCRYPT];
5043 + desc = flc->sh_desc;
5044 + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
5045 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5046 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
5047 + sizeof(flc->flc) + desc_bytes(desc),
5051 + * Job Descriptor and Shared Descriptors
5052 + * must all fit into the 64-word Descriptor h/w Buffer
5054 + if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
5055 + ctx->cdata.key_inline = true;
5056 + ctx->cdata.key_virt = ctx->key;
5058 + ctx->cdata.key_inline = false;
5059 + ctx->cdata.key_dma = ctx->key_dma;
5062 + flc = &ctx->flc[DECRYPT];
5063 + desc = flc->sh_desc;
5064 + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
5065 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5066 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
5067 + sizeof(flc->flc) + desc_bytes(desc),
5073 +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
5075 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
5077 + ctx->authsize = authsize;
5078 + gcm_set_sh_desc(authenc);
5083 +static int gcm_setkey(struct crypto_aead *aead,
5084 + const u8 *key, unsigned int keylen)
5086 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5087 + struct device *dev = ctx->dev;
5090 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
5091 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
5094 + memcpy(ctx->key, key, keylen);
5095 + dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
5096 + ctx->cdata.keylen = keylen;
5098 + return gcm_set_sh_desc(aead);
5101 +static int rfc4106_set_sh_desc(struct crypto_aead *aead)
5103 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5104 + struct device *dev = ctx->dev;
5105 + unsigned int ivsize = crypto_aead_ivsize(aead);
5106 + struct caam_flc *flc;
5108 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
5109 + ctx->cdata.keylen;
5111 + if (!ctx->cdata.keylen || !ctx->authsize)
5114 + ctx->cdata.key_virt = ctx->key;
5117 + * RFC4106 encrypt shared descriptor
5118 + * Job Descriptor and Shared Descriptor
5119 + * must fit into the 64-word Descriptor h/w Buffer
5121 + if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
5122 + ctx->cdata.key_inline = true;
5124 + ctx->cdata.key_inline = false;
5125 + ctx->cdata.key_dma = ctx->key_dma;
5128 + flc = &ctx->flc[ENCRYPT];
5129 + desc = flc->sh_desc;
5130 + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
5132 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5133 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
5134 + sizeof(flc->flc) + desc_bytes(desc),
5138 + * Job Descriptor and Shared Descriptors
5139 + * must all fit into the 64-word Descriptor h/w Buffer
5141 + if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
5142 + ctx->cdata.key_inline = true;
5144 + ctx->cdata.key_inline = false;
5145 + ctx->cdata.key_dma = ctx->key_dma;
5148 + flc = &ctx->flc[DECRYPT];
5149 + desc = flc->sh_desc;
5150 + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
5152 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5153 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
5154 + sizeof(flc->flc) + desc_bytes(desc),
5160 +static int rfc4106_setauthsize(struct crypto_aead *authenc,
5161 + unsigned int authsize)
5163 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
5165 + ctx->authsize = authsize;
5166 + rfc4106_set_sh_desc(authenc);
5171 +static int rfc4106_setkey(struct crypto_aead *aead,
5172 + const u8 *key, unsigned int keylen)
5174 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5175 + struct device *dev = ctx->dev;
5181 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
5182 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
5185 + memcpy(ctx->key, key, keylen);
5187 + * The last four bytes of the key material are used as the salt value
5188 + * in the nonce. Update the AES key length.
5190 + ctx->cdata.keylen = keylen - 4;
5191 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
5194 + return rfc4106_set_sh_desc(aead);
5197 +static int rfc4543_set_sh_desc(struct crypto_aead *aead)
5199 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5200 + struct device *dev = ctx->dev;
5201 + unsigned int ivsize = crypto_aead_ivsize(aead);
5202 + struct caam_flc *flc;
5204 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
5205 + ctx->cdata.keylen;
5207 + if (!ctx->cdata.keylen || !ctx->authsize)
5210 + ctx->cdata.key_virt = ctx->key;
5213 + * RFC4543 encrypt shared descriptor
5214 + * Job Descriptor and Shared Descriptor
5215 + * must fit into the 64-word Descriptor h/w Buffer
5217 + if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
5218 + ctx->cdata.key_inline = true;
5220 + ctx->cdata.key_inline = false;
5221 + ctx->cdata.key_dma = ctx->key_dma;
5224 + flc = &ctx->flc[ENCRYPT];
5225 + desc = flc->sh_desc;
5226 + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
5228 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5229 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
5230 + sizeof(flc->flc) + desc_bytes(desc),
5234 + * Job Descriptor and Shared Descriptors
5235 + * must all fit into the 64-word Descriptor h/w Buffer
5237 + if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
5238 + ctx->cdata.key_inline = true;
5240 + ctx->cdata.key_inline = false;
5241 + ctx->cdata.key_dma = ctx->key_dma;
5244 + flc = &ctx->flc[DECRYPT];
5245 + desc = flc->sh_desc;
5246 + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
5248 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5249 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
5250 + sizeof(flc->flc) + desc_bytes(desc),
5256 +static int rfc4543_setauthsize(struct crypto_aead *authenc,
5257 + unsigned int authsize)
5259 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
5261 + ctx->authsize = authsize;
5262 + rfc4543_set_sh_desc(authenc);
5267 +static int rfc4543_setkey(struct crypto_aead *aead,
5268 + const u8 *key, unsigned int keylen)
5270 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5271 + struct device *dev = ctx->dev;
5277 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
5278 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
5281 + memcpy(ctx->key, key, keylen);
5283 + * The last four bytes of the key material are used as the salt value
5284 + * in the nonce. Update the AES key length.
5286 + ctx->cdata.keylen = keylen - 4;
5287 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
5290 + return rfc4543_set_sh_desc(aead);
5293 +static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
5294 + unsigned int keylen)
5296 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
5297 + struct caam_skcipher_alg *alg =
5298 + container_of(crypto_skcipher_alg(skcipher),
5299 + struct caam_skcipher_alg, skcipher);
5300 + struct device *dev = ctx->dev;
5301 + struct caam_flc *flc;
5302 + unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
5304 + u32 ctx1_iv_off = 0;
5305 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
5306 + OP_ALG_AAI_CTR_MOD128);
5307 + const bool is_rfc3686 = alg->caam.rfc3686;
5310 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
5311 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
5314 + * AES-CTR needs to load IV in CONTEXT1 reg
5315 + * at an offset of 128bits (16bytes)
5316 + * CONTEXT1[255:128] = IV
5322 + * RFC3686 specific:
5323 + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
5324 + * | *key = {KEY, NONCE}
5327 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
5328 + keylen -= CTR_RFC3686_NONCE_SIZE;
5331 + ctx->cdata.keylen = keylen;
5332 + ctx->cdata.key_virt = key;
5333 + ctx->cdata.key_inline = true;
5335 + /* skcipher_encrypt shared descriptor */
5336 + flc = &ctx->flc[ENCRYPT];
5337 + desc = flc->sh_desc;
5338 + cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize,
5339 + is_rfc3686, ctx1_iv_off);
5340 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5341 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
5342 + sizeof(flc->flc) + desc_bytes(desc),
5345 + /* skcipher_decrypt shared descriptor */
5346 + flc = &ctx->flc[DECRYPT];
5347 + desc = flc->sh_desc;
5348 + cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize,
5349 + is_rfc3686, ctx1_iv_off);
5350 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5351 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
5352 + sizeof(flc->flc) + desc_bytes(desc),
5358 +static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
5359 + unsigned int keylen)
5361 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
5362 + struct device *dev = ctx->dev;
5363 + struct caam_flc *flc;
5366 + if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
5367 + dev_err(dev, "key size mismatch\n");
5368 + crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
5372 + ctx->cdata.keylen = keylen;
5373 + ctx->cdata.key_virt = key;
5374 + ctx->cdata.key_inline = true;
5376 + /* xts_skcipher_encrypt shared descriptor */
5377 + flc = &ctx->flc[ENCRYPT];
5378 + desc = flc->sh_desc;
5379 + cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
5380 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5381 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
5382 + sizeof(flc->flc) + desc_bytes(desc),
5385 + /* xts_skcipher_decrypt shared descriptor */
5386 + flc = &ctx->flc[DECRYPT];
5387 + desc = flc->sh_desc;
5388 + cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
5389 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5390 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
5391 + sizeof(flc->flc) + desc_bytes(desc),
5397 +static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
5399 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
5400 + struct caam_request *req_ctx = skcipher_request_ctx(req);
5401 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
5402 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
5403 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
5404 + struct device *dev = ctx->dev;
5405 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
5406 + GFP_KERNEL : GFP_ATOMIC;
5407 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
5408 + struct skcipher_edesc *edesc;
5409 + dma_addr_t iv_dma;
5411 + int ivsize = crypto_skcipher_ivsize(skcipher);
5412 + int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
5413 + struct dpaa2_sg_entry *sg_table;
5415 + src_nents = sg_nents_for_len(req->src, req->cryptlen);
5416 + if (unlikely(src_nents < 0)) {
5417 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
5419 + return ERR_PTR(src_nents);
5422 + if (unlikely(req->dst != req->src)) {
5423 + dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
5424 + if (unlikely(dst_nents < 0)) {
5425 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
5427 + return ERR_PTR(dst_nents);
5430 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
5432 + if (unlikely(!mapped_src_nents)) {
5433 + dev_err(dev, "unable to map source\n");
5434 + return ERR_PTR(-ENOMEM);
5437 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
5439 + if (unlikely(!mapped_dst_nents)) {
5440 + dev_err(dev, "unable to map destination\n");
5441 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
5442 + return ERR_PTR(-ENOMEM);
5445 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
5446 + DMA_BIDIRECTIONAL);
5447 + if (unlikely(!mapped_src_nents)) {
5448 + dev_err(dev, "unable to map source\n");
5449 + return ERR_PTR(-ENOMEM);
5453 + qm_sg_ents = 1 + mapped_src_nents;
5454 + dst_sg_idx = qm_sg_ents;
5456 + qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
5457 + qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
5458 + if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
5459 + ivsize > CAAM_QI_MEMCACHE_SIZE)) {
5460 + dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
5461 + qm_sg_ents, ivsize);
5462 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
5464 + return ERR_PTR(-ENOMEM);
5467 + /* allocate space for base edesc, link tables and IV */
5468 + edesc = qi_cache_zalloc(GFP_DMA | flags);
5469 + if (unlikely(!edesc)) {
5470 + dev_err(dev, "could not allocate extended descriptor\n");
5471 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
5473 + return ERR_PTR(-ENOMEM);
5476 + /* Make sure IV is located in a DMAable area */
5477 + sg_table = &edesc->sgt[0];
5478 + iv = (u8 *)(sg_table + qm_sg_ents);
5479 + memcpy(iv, req->iv, ivsize);
5481 + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
5482 + if (dma_mapping_error(dev, iv_dma)) {
5483 + dev_err(dev, "unable to map IV\n");
5484 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
5486 + qi_cache_free(edesc);
5487 + return ERR_PTR(-ENOMEM);
5490 + edesc->src_nents = src_nents;
5491 + edesc->dst_nents = dst_nents;
5492 + edesc->iv_dma = iv_dma;
5493 + edesc->qm_sg_bytes = qm_sg_bytes;
5495 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
5496 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
5498 + if (mapped_dst_nents > 1)
5499 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
5502 + edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
5504 + if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
5505 + dev_err(dev, "unable to map S/G table\n");
5506 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
5507 + iv_dma, ivsize, 0, 0);
5508 + qi_cache_free(edesc);
5509 + return ERR_PTR(-ENOMEM);
5512 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
5513 + dpaa2_fl_set_final(in_fle, true);
5514 + dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
5515 + dpaa2_fl_set_len(out_fle, req->cryptlen);
5517 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
5518 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
5520 + if (req->src == req->dst) {
5521 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
5522 + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
5523 + sizeof(*sg_table));
5524 + } else if (mapped_dst_nents > 1) {
5525 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
5526 + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
5527 + sizeof(*sg_table));
5529 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
5530 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
5536 +static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
5537 + struct aead_request *req)
5539 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
5540 + int ivsize = crypto_aead_ivsize(aead);
5542 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
5543 + edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
5544 + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
5547 +static void tls_unmap(struct device *dev, struct tls_edesc *edesc,
5548 + struct aead_request *req)
5550 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
5551 + int ivsize = crypto_aead_ivsize(tls);
5553 + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
5554 + edesc->dst_nents, edesc->iv_dma, ivsize, edesc->qm_sg_dma,
5555 + edesc->qm_sg_bytes);
5558 +static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
5559 + struct skcipher_request *req)
5561 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
5562 + int ivsize = crypto_skcipher_ivsize(skcipher);
5564 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
5565 + edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
5568 +static void aead_encrypt_done(void *cbk_ctx, u32 status)
5570 + struct crypto_async_request *areq = cbk_ctx;
5571 + struct aead_request *req = container_of(areq, struct aead_request,
5573 + struct caam_request *req_ctx = to_caam_req(areq);
5574 + struct aead_edesc *edesc = req_ctx->edesc;
5575 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
5576 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5580 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
5583 + if (unlikely(status)) {
5584 + caam_qi2_strstatus(ctx->dev, status);
5588 + aead_unmap(ctx->dev, edesc, req);
5589 + qi_cache_free(edesc);
5590 + aead_request_complete(req, ecode);
5593 +static void aead_decrypt_done(void *cbk_ctx, u32 status)
5595 + struct crypto_async_request *areq = cbk_ctx;
5596 + struct aead_request *req = container_of(areq, struct aead_request,
5598 + struct caam_request *req_ctx = to_caam_req(areq);
5599 + struct aead_edesc *edesc = req_ctx->edesc;
5600 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
5601 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5605 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
5608 + if (unlikely(status)) {
5609 + caam_qi2_strstatus(ctx->dev, status);
5611 + * verify hw auth check passed else return -EBADMSG
5613 + if ((status & JRSTA_CCBERR_ERRID_MASK) ==
5614 + JRSTA_CCBERR_ERRID_ICVCHK)
5620 + aead_unmap(ctx->dev, edesc, req);
5621 + qi_cache_free(edesc);
5622 + aead_request_complete(req, ecode);
5625 +static int aead_encrypt(struct aead_request *req)
5627 + struct aead_edesc *edesc;
5628 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
5629 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5630 + struct caam_request *caam_req = aead_request_ctx(req);
5633 + /* allocate extended descriptor */
5634 + edesc = aead_edesc_alloc(req, true);
5635 + if (IS_ERR(edesc))
5636 + return PTR_ERR(edesc);
5638 + caam_req->flc = &ctx->flc[ENCRYPT];
5639 + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
5640 + caam_req->cbk = aead_encrypt_done;
5641 + caam_req->ctx = &req->base;
5642 + caam_req->edesc = edesc;
5643 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
5644 + if (ret != -EINPROGRESS &&
5645 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
5646 + aead_unmap(ctx->dev, edesc, req);
5647 + qi_cache_free(edesc);
5653 +static int aead_decrypt(struct aead_request *req)
5655 + struct aead_edesc *edesc;
5656 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
5657 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5658 + struct caam_request *caam_req = aead_request_ctx(req);
5661 + /* allocate extended descriptor */
5662 + edesc = aead_edesc_alloc(req, false);
5663 + if (IS_ERR(edesc))
5664 + return PTR_ERR(edesc);
5666 + caam_req->flc = &ctx->flc[DECRYPT];
5667 + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
5668 + caam_req->cbk = aead_decrypt_done;
5669 + caam_req->ctx = &req->base;
5670 + caam_req->edesc = edesc;
5671 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
5672 + if (ret != -EINPROGRESS &&
5673 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
5674 + aead_unmap(ctx->dev, edesc, req);
5675 + qi_cache_free(edesc);
5681 +static void tls_encrypt_done(void *cbk_ctx, u32 status)
5683 + struct crypto_async_request *areq = cbk_ctx;
5684 + struct aead_request *req = container_of(areq, struct aead_request,
5686 + struct caam_request *req_ctx = to_caam_req(areq);
5687 + struct tls_edesc *edesc = req_ctx->edesc;
5688 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
5689 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
5693 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
5696 + if (unlikely(status)) {
5697 + caam_qi2_strstatus(ctx->dev, status);
5701 + tls_unmap(ctx->dev, edesc, req);
5702 + qi_cache_free(edesc);
5703 + aead_request_complete(req, ecode);
5706 +static void tls_decrypt_done(void *cbk_ctx, u32 status)
5708 + struct crypto_async_request *areq = cbk_ctx;
5709 + struct aead_request *req = container_of(areq, struct aead_request,
5711 + struct caam_request *req_ctx = to_caam_req(areq);
5712 + struct tls_edesc *edesc = req_ctx->edesc;
5713 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
5714 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
5718 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
5721 + if (unlikely(status)) {
5722 + caam_qi2_strstatus(ctx->dev, status);
5724 + * verify hw auth check passed else return -EBADMSG
5726 + if ((status & JRSTA_CCBERR_ERRID_MASK) ==
5727 + JRSTA_CCBERR_ERRID_ICVCHK)
5733 + tls_unmap(ctx->dev, edesc, req);
5734 + qi_cache_free(edesc);
5735 + aead_request_complete(req, ecode);
5738 +static int tls_encrypt(struct aead_request *req)
5740 + struct tls_edesc *edesc;
5741 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
5742 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
5743 + struct caam_request *caam_req = aead_request_ctx(req);
5746 + /* allocate extended descriptor */
5747 + edesc = tls_edesc_alloc(req, true);
5748 + if (IS_ERR(edesc))
5749 + return PTR_ERR(edesc);
5751 + caam_req->flc = &ctx->flc[ENCRYPT];
5752 + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
5753 + caam_req->cbk = tls_encrypt_done;
5754 + caam_req->ctx = &req->base;
5755 + caam_req->edesc = edesc;
5756 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
5757 + if (ret != -EINPROGRESS &&
5758 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
5759 + tls_unmap(ctx->dev, edesc, req);
5760 + qi_cache_free(edesc);
5766 +static int tls_decrypt(struct aead_request *req)
5768 + struct tls_edesc *edesc;
5769 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
5770 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
5771 + struct caam_request *caam_req = aead_request_ctx(req);
5774 + /* allocate extended descriptor */
5775 + edesc = tls_edesc_alloc(req, false);
5776 + if (IS_ERR(edesc))
5777 + return PTR_ERR(edesc);
5779 + caam_req->flc = &ctx->flc[DECRYPT];
5780 + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
5781 + caam_req->cbk = tls_decrypt_done;
5782 + caam_req->ctx = &req->base;
5783 + caam_req->edesc = edesc;
5784 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
5785 + if (ret != -EINPROGRESS &&
5786 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
5787 + tls_unmap(ctx->dev, edesc, req);
5788 + qi_cache_free(edesc);
5794 +static int ipsec_gcm_encrypt(struct aead_request *req)
5796 + if (req->assoclen < 8)
5799 + return aead_encrypt(req);
5802 +static int ipsec_gcm_decrypt(struct aead_request *req)
5804 + if (req->assoclen < 8)
5807 + return aead_decrypt(req);
5810 +static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
5812 + struct crypto_async_request *areq = cbk_ctx;
5813 + struct skcipher_request *req = skcipher_request_cast(areq);
5814 + struct caam_request *req_ctx = to_caam_req(areq);
5815 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
5816 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
5817 + struct skcipher_edesc *edesc = req_ctx->edesc;
5819 + int ivsize = crypto_skcipher_ivsize(skcipher);
5822 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
5825 + if (unlikely(status)) {
5826 + caam_qi2_strstatus(ctx->dev, status);
5831 + print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
5832 + DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
5833 + edesc->src_nents > 1 ? 100 : ivsize, 1);
5834 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
5835 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
5836 + edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
5839 + skcipher_unmap(ctx->dev, edesc, req);
5842 + * The crypto API expects us to set the IV (req->iv) to the last
5843 + * ciphertext block. This is used e.g. by the CTS mode.
5845 + scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
5848 + qi_cache_free(edesc);
5849 + skcipher_request_complete(req, ecode);
5852 +static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
5854 + struct crypto_async_request *areq = cbk_ctx;
5855 + struct skcipher_request *req = skcipher_request_cast(areq);
5856 + struct caam_request *req_ctx = to_caam_req(areq);
5857 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
5858 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
5859 + struct skcipher_edesc *edesc = req_ctx->edesc;
5862 + int ivsize = crypto_skcipher_ivsize(skcipher);
5864 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
5867 + if (unlikely(status)) {
5868 + caam_qi2_strstatus(ctx->dev, status);
5873 + print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
5874 + DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
5875 + edesc->src_nents > 1 ? 100 : ivsize, 1);
5876 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
5877 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
5878 + edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
5881 + skcipher_unmap(ctx->dev, edesc, req);
5882 + qi_cache_free(edesc);
5883 + skcipher_request_complete(req, ecode);
5886 +static int skcipher_encrypt(struct skcipher_request *req)
5888 + struct skcipher_edesc *edesc;
5889 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
5890 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
5891 + struct caam_request *caam_req = skcipher_request_ctx(req);
5894 + /* allocate extended descriptor */
5895 + edesc = skcipher_edesc_alloc(req);
5896 + if (IS_ERR(edesc))
5897 + return PTR_ERR(edesc);
5899 + caam_req->flc = &ctx->flc[ENCRYPT];
5900 + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
5901 + caam_req->cbk = skcipher_encrypt_done;
5902 + caam_req->ctx = &req->base;
5903 + caam_req->edesc = edesc;
5904 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
5905 + if (ret != -EINPROGRESS &&
5906 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
5907 + skcipher_unmap(ctx->dev, edesc, req);
5908 + qi_cache_free(edesc);
5914 +static int skcipher_decrypt(struct skcipher_request *req)
5916 + struct skcipher_edesc *edesc;
5917 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
5918 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
5919 + struct caam_request *caam_req = skcipher_request_ctx(req);
5920 + int ivsize = crypto_skcipher_ivsize(skcipher);
5923 + /* allocate extended descriptor */
5924 + edesc = skcipher_edesc_alloc(req);
5925 + if (IS_ERR(edesc))
5926 + return PTR_ERR(edesc);
5929 + * The crypto API expects us to set the IV (req->iv) to the last
5930 + * ciphertext block.
5932 + scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
5935 + caam_req->flc = &ctx->flc[DECRYPT];
5936 + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
5937 + caam_req->cbk = skcipher_decrypt_done;
5938 + caam_req->ctx = &req->base;
5939 + caam_req->edesc = edesc;
5940 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
5941 + if (ret != -EINPROGRESS &&
5942 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
5943 + skcipher_unmap(ctx->dev, edesc, req);
5944 + qi_cache_free(edesc);
5950 +static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
5953 + dma_addr_t dma_addr;
5956 + /* copy descriptor header template value */
5957 + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
5958 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
5960 + ctx->dev = caam->dev;
5961 + ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
5963 + dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
5964 + offsetof(struct caam_ctx, flc_dma),
5965 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
5966 + if (dma_mapping_error(ctx->dev, dma_addr)) {
5967 + dev_err(ctx->dev, "unable to map key, shared descriptors\n");
5971 + for (i = 0; i < NUM_OP; i++)
5972 + ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
5973 + ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
5978 +static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
5980 + struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
5981 + struct caam_skcipher_alg *caam_alg =
5982 + container_of(alg, typeof(*caam_alg), skcipher);
5984 + crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
5985 + return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
5988 +static int caam_cra_init_aead(struct crypto_aead *tfm)
5990 + struct aead_alg *alg = crypto_aead_alg(tfm);
5991 + struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
5994 + crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
5995 + return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
5996 + (alg->setkey == aead_setkey) ||
5997 + (alg->setkey == tls_setkey));
6000 +static void caam_exit_common(struct caam_ctx *ctx)
6002 + dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
6003 + offsetof(struct caam_ctx, flc_dma), ctx->dir,
6004 + DMA_ATTR_SKIP_CPU_SYNC);
6007 +static void caam_cra_exit(struct crypto_skcipher *tfm)
6009 + caam_exit_common(crypto_skcipher_ctx(tfm));
6012 +static void caam_cra_exit_aead(struct crypto_aead *tfm)
6014 + caam_exit_common(crypto_aead_ctx(tfm));
6017 +static struct caam_skcipher_alg driver_algs[] = {
6021 + .cra_name = "cbc(aes)",
6022 + .cra_driver_name = "cbc-aes-caam-qi2",
6023 + .cra_blocksize = AES_BLOCK_SIZE,
6025 + .setkey = skcipher_setkey,
6026 + .encrypt = skcipher_encrypt,
6027 + .decrypt = skcipher_decrypt,
6028 + .min_keysize = AES_MIN_KEY_SIZE,
6029 + .max_keysize = AES_MAX_KEY_SIZE,
6030 + .ivsize = AES_BLOCK_SIZE,
6032 + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
6037 + .cra_name = "cbc(des3_ede)",
6038 + .cra_driver_name = "cbc-3des-caam-qi2",
6039 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
6041 + .setkey = skcipher_setkey,
6042 + .encrypt = skcipher_encrypt,
6043 + .decrypt = skcipher_decrypt,
6044 + .min_keysize = DES3_EDE_KEY_SIZE,
6045 + .max_keysize = DES3_EDE_KEY_SIZE,
6046 + .ivsize = DES3_EDE_BLOCK_SIZE,
6048 + .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
6053 + .cra_name = "cbc(des)",
6054 + .cra_driver_name = "cbc-des-caam-qi2",
6055 + .cra_blocksize = DES_BLOCK_SIZE,
6057 + .setkey = skcipher_setkey,
6058 + .encrypt = skcipher_encrypt,
6059 + .decrypt = skcipher_decrypt,
6060 + .min_keysize = DES_KEY_SIZE,
6061 + .max_keysize = DES_KEY_SIZE,
6062 + .ivsize = DES_BLOCK_SIZE,
6064 + .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
6069 + .cra_name = "ctr(aes)",
6070 + .cra_driver_name = "ctr-aes-caam-qi2",
6071 + .cra_blocksize = 1,
6073 + .setkey = skcipher_setkey,
6074 + .encrypt = skcipher_encrypt,
6075 + .decrypt = skcipher_decrypt,
6076 + .min_keysize = AES_MIN_KEY_SIZE,
6077 + .max_keysize = AES_MAX_KEY_SIZE,
6078 + .ivsize = AES_BLOCK_SIZE,
6079 + .chunksize = AES_BLOCK_SIZE,
6081 + .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
6082 + OP_ALG_AAI_CTR_MOD128,
6087 + .cra_name = "rfc3686(ctr(aes))",
6088 + .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
6089 + .cra_blocksize = 1,
6091 + .setkey = skcipher_setkey,
6092 + .encrypt = skcipher_encrypt,
6093 + .decrypt = skcipher_decrypt,
6094 + .min_keysize = AES_MIN_KEY_SIZE +
6095 + CTR_RFC3686_NONCE_SIZE,
6096 + .max_keysize = AES_MAX_KEY_SIZE +
6097 + CTR_RFC3686_NONCE_SIZE,
6098 + .ivsize = CTR_RFC3686_IV_SIZE,
6099 + .chunksize = AES_BLOCK_SIZE,
6102 + .class1_alg_type = OP_ALG_ALGSEL_AES |
6103 + OP_ALG_AAI_CTR_MOD128,
6110 + .cra_name = "xts(aes)",
6111 + .cra_driver_name = "xts-aes-caam-qi2",
6112 + .cra_blocksize = AES_BLOCK_SIZE,
6114 + .setkey = xts_skcipher_setkey,
6115 + .encrypt = skcipher_encrypt,
6116 + .decrypt = skcipher_decrypt,
6117 + .min_keysize = 2 * AES_MIN_KEY_SIZE,
6118 + .max_keysize = 2 * AES_MAX_KEY_SIZE,
6119 + .ivsize = AES_BLOCK_SIZE,
6121 + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
6125 +static struct caam_aead_alg driver_aeads[] = {
6129 + .cra_name = "rfc4106(gcm(aes))",
6130 + .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
6131 + .cra_blocksize = 1,
6133 + .setkey = rfc4106_setkey,
6134 + .setauthsize = rfc4106_setauthsize,
6135 + .encrypt = ipsec_gcm_encrypt,
6136 + .decrypt = ipsec_gcm_decrypt,
6138 + .maxauthsize = AES_BLOCK_SIZE,
6141 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
6147 + .cra_name = "rfc4543(gcm(aes))",
6148 + .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
6149 + .cra_blocksize = 1,
6151 + .setkey = rfc4543_setkey,
6152 + .setauthsize = rfc4543_setauthsize,
6153 + .encrypt = ipsec_gcm_encrypt,
6154 + .decrypt = ipsec_gcm_decrypt,
6156 + .maxauthsize = AES_BLOCK_SIZE,
6159 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
6162 + /* Galois Counter Mode */
6166 + .cra_name = "gcm(aes)",
6167 + .cra_driver_name = "gcm-aes-caam-qi2",
6168 + .cra_blocksize = 1,
6170 + .setkey = gcm_setkey,
6171 + .setauthsize = gcm_setauthsize,
6172 + .encrypt = aead_encrypt,
6173 + .decrypt = aead_decrypt,
6175 + .maxauthsize = AES_BLOCK_SIZE,
6178 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
6181 + /* single-pass ipsec_esp descriptor */
6185 + .cra_name = "authenc(hmac(md5),cbc(aes))",
6186 + .cra_driver_name = "authenc-hmac-md5-"
6187 + "cbc-aes-caam-qi2",
6188 + .cra_blocksize = AES_BLOCK_SIZE,
6190 + .setkey = aead_setkey,
6191 + .setauthsize = aead_setauthsize,
6192 + .encrypt = aead_encrypt,
6193 + .decrypt = aead_decrypt,
6194 + .ivsize = AES_BLOCK_SIZE,
6195 + .maxauthsize = MD5_DIGEST_SIZE,
6198 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
6199 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
6200 + OP_ALG_AAI_HMAC_PRECOMP,
6206 + .cra_name = "echainiv(authenc(hmac(md5),"
6208 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
6209 + "cbc-aes-caam-qi2",
6210 + .cra_blocksize = AES_BLOCK_SIZE,
6212 + .setkey = aead_setkey,
6213 + .setauthsize = aead_setauthsize,
6214 + .encrypt = aead_encrypt,
6215 + .decrypt = aead_decrypt,
6216 + .ivsize = AES_BLOCK_SIZE,
6217 + .maxauthsize = MD5_DIGEST_SIZE,
6220 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
6221 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
6222 + OP_ALG_AAI_HMAC_PRECOMP,
6229 + .cra_name = "authenc(hmac(sha1),cbc(aes))",
6230 + .cra_driver_name = "authenc-hmac-sha1-"
6231 + "cbc-aes-caam-qi2",
6232 + .cra_blocksize = AES_BLOCK_SIZE,
6234 + .setkey = aead_setkey,
6235 + .setauthsize = aead_setauthsize,
6236 + .encrypt = aead_encrypt,
6237 + .decrypt = aead_decrypt,
6238 + .ivsize = AES_BLOCK_SIZE,
6239 + .maxauthsize = SHA1_DIGEST_SIZE,
6242 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
6243 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
6244 + OP_ALG_AAI_HMAC_PRECOMP,
6250 + .cra_name = "echainiv(authenc(hmac(sha1),"
6252 + .cra_driver_name = "echainiv-authenc-"
6253 + "hmac-sha1-cbc-aes-caam-qi2",
6254 + .cra_blocksize = AES_BLOCK_SIZE,
6256 + .setkey = aead_setkey,
6257 + .setauthsize = aead_setauthsize,
6258 + .encrypt = aead_encrypt,
6259 + .decrypt = aead_decrypt,
6260 + .ivsize = AES_BLOCK_SIZE,
6261 + .maxauthsize = SHA1_DIGEST_SIZE,
6264 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
6265 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
6266 + OP_ALG_AAI_HMAC_PRECOMP,
6273 + .cra_name = "authenc(hmac(sha224),cbc(aes))",
6274 + .cra_driver_name = "authenc-hmac-sha224-"
6275 + "cbc-aes-caam-qi2",
6276 + .cra_blocksize = AES_BLOCK_SIZE,
6278 + .setkey = aead_setkey,
6279 + .setauthsize = aead_setauthsize,
6280 + .encrypt = aead_encrypt,
6281 + .decrypt = aead_decrypt,
6282 + .ivsize = AES_BLOCK_SIZE,
6283 + .maxauthsize = SHA224_DIGEST_SIZE,
6286 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
6287 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
6288 + OP_ALG_AAI_HMAC_PRECOMP,
6294 + .cra_name = "echainiv(authenc(hmac(sha224),"
6296 + .cra_driver_name = "echainiv-authenc-"
6297 + "hmac-sha224-cbc-aes-caam-qi2",
6298 + .cra_blocksize = AES_BLOCK_SIZE,
6300 + .setkey = aead_setkey,
6301 + .setauthsize = aead_setauthsize,
6302 + .encrypt = aead_encrypt,
6303 + .decrypt = aead_decrypt,
6304 + .ivsize = AES_BLOCK_SIZE,
6305 + .maxauthsize = SHA224_DIGEST_SIZE,
6308 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
6309 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
6310 + OP_ALG_AAI_HMAC_PRECOMP,
6317 + .cra_name = "authenc(hmac(sha256),cbc(aes))",
6318 + .cra_driver_name = "authenc-hmac-sha256-"
6319 + "cbc-aes-caam-qi2",
6320 + .cra_blocksize = AES_BLOCK_SIZE,
6322 + .setkey = aead_setkey,
6323 + .setauthsize = aead_setauthsize,
6324 + .encrypt = aead_encrypt,
6325 + .decrypt = aead_decrypt,
6326 + .ivsize = AES_BLOCK_SIZE,
6327 + .maxauthsize = SHA256_DIGEST_SIZE,
6330 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
6331 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
6332 + OP_ALG_AAI_HMAC_PRECOMP,
6338 + .cra_name = "echainiv(authenc(hmac(sha256),"
6340 + .cra_driver_name = "echainiv-authenc-"
6341 + "hmac-sha256-cbc-aes-"
6343 + .cra_blocksize = AES_BLOCK_SIZE,
6345 + .setkey = aead_setkey,
6346 + .setauthsize = aead_setauthsize,
6347 + .encrypt = aead_encrypt,
6348 + .decrypt = aead_decrypt,
6349 + .ivsize = AES_BLOCK_SIZE,
6350 + .maxauthsize = SHA256_DIGEST_SIZE,
6353 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
6354 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
6355 + OP_ALG_AAI_HMAC_PRECOMP,
6362 + .cra_name = "authenc(hmac(sha384),cbc(aes))",
6363 + .cra_driver_name = "authenc-hmac-sha384-"
6364 + "cbc-aes-caam-qi2",
6365 + .cra_blocksize = AES_BLOCK_SIZE,
6367 + .setkey = aead_setkey,
6368 + .setauthsize = aead_setauthsize,
6369 + .encrypt = aead_encrypt,
6370 + .decrypt = aead_decrypt,
6371 + .ivsize = AES_BLOCK_SIZE,
6372 + .maxauthsize = SHA384_DIGEST_SIZE,
6375 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
6376 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
6377 + OP_ALG_AAI_HMAC_PRECOMP,
6383 + .cra_name = "echainiv(authenc(hmac(sha384),"
6385 + .cra_driver_name = "echainiv-authenc-"
6386 + "hmac-sha384-cbc-aes-"
6388 + .cra_blocksize = AES_BLOCK_SIZE,
6390 + .setkey = aead_setkey,
6391 + .setauthsize = aead_setauthsize,
6392 + .encrypt = aead_encrypt,
6393 + .decrypt = aead_decrypt,
6394 + .ivsize = AES_BLOCK_SIZE,
6395 + .maxauthsize = SHA384_DIGEST_SIZE,
6398 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
6399 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
6400 + OP_ALG_AAI_HMAC_PRECOMP,
6407 + .cra_name = "authenc(hmac(sha512),cbc(aes))",
6408 + .cra_driver_name = "authenc-hmac-sha512-"
6409 + "cbc-aes-caam-qi2",
6410 + .cra_blocksize = AES_BLOCK_SIZE,
6412 + .setkey = aead_setkey,
6413 + .setauthsize = aead_setauthsize,
6414 + .encrypt = aead_encrypt,
6415 + .decrypt = aead_decrypt,
6416 + .ivsize = AES_BLOCK_SIZE,
6417 + .maxauthsize = SHA512_DIGEST_SIZE,
6420 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
6421 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
6422 + OP_ALG_AAI_HMAC_PRECOMP,
6428 + .cra_name = "echainiv(authenc(hmac(sha512),"
6430 + .cra_driver_name = "echainiv-authenc-"
6431 + "hmac-sha512-cbc-aes-"
6433 + .cra_blocksize = AES_BLOCK_SIZE,
6435 + .setkey = aead_setkey,
6436 + .setauthsize = aead_setauthsize,
6437 + .encrypt = aead_encrypt,
6438 + .decrypt = aead_decrypt,
6439 + .ivsize = AES_BLOCK_SIZE,
6440 + .maxauthsize = SHA512_DIGEST_SIZE,
6443 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
6444 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
6445 + OP_ALG_AAI_HMAC_PRECOMP,
6452 + .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
6453 + .cra_driver_name = "authenc-hmac-md5-"
6454 + "cbc-des3_ede-caam-qi2",
6455 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
6457 + .setkey = aead_setkey,
6458 + .setauthsize = aead_setauthsize,
6459 + .encrypt = aead_encrypt,
6460 + .decrypt = aead_decrypt,
6461 + .ivsize = DES3_EDE_BLOCK_SIZE,
6462 + .maxauthsize = MD5_DIGEST_SIZE,
6465 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
6466 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
6467 + OP_ALG_AAI_HMAC_PRECOMP,
6473 + .cra_name = "echainiv(authenc(hmac(md5),"
6474 + "cbc(des3_ede)))",
6475 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
6476 + "cbc-des3_ede-caam-qi2",
6477 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
6479 + .setkey = aead_setkey,
6480 + .setauthsize = aead_setauthsize,
6481 + .encrypt = aead_encrypt,
6482 + .decrypt = aead_decrypt,
6483 + .ivsize = DES3_EDE_BLOCK_SIZE,
6484 + .maxauthsize = MD5_DIGEST_SIZE,
6487 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
6488 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
6489 + OP_ALG_AAI_HMAC_PRECOMP,
6496 + .cra_name = "authenc(hmac(sha1),"
6498 + .cra_driver_name = "authenc-hmac-sha1-"
6499 + "cbc-des3_ede-caam-qi2",
6500 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
6502 + .setkey = aead_setkey,
6503 + .setauthsize = aead_setauthsize,
6504 + .encrypt = aead_encrypt,
6505 + .decrypt = aead_decrypt,
6506 + .ivsize = DES3_EDE_BLOCK_SIZE,
6507 + .maxauthsize = SHA1_DIGEST_SIZE,
6510 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
6511 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
6512 + OP_ALG_AAI_HMAC_PRECOMP,
6518 + .cra_name = "echainiv(authenc(hmac(sha1),"
6519 + "cbc(des3_ede)))",
6520 + .cra_driver_name = "echainiv-authenc-"
6522 + "cbc-des3_ede-caam-qi2",
6523 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
6525 + .setkey = aead_setkey,
6526 + .setauthsize = aead_setauthsize,
6527 + .encrypt = aead_encrypt,
6528 + .decrypt = aead_decrypt,
6529 + .ivsize = DES3_EDE_BLOCK_SIZE,
6530 + .maxauthsize = SHA1_DIGEST_SIZE,
6533 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
6534 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
6535 + OP_ALG_AAI_HMAC_PRECOMP,
6542 + .cra_name = "authenc(hmac(sha224),"
6544 + .cra_driver_name = "authenc-hmac-sha224-"
6545 + "cbc-des3_ede-caam-qi2",
6546 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
6548 + .setkey = aead_setkey,
6549 + .setauthsize = aead_setauthsize,
6550 + .encrypt = aead_encrypt,
6551 + .decrypt = aead_decrypt,
6552 + .ivsize = DES3_EDE_BLOCK_SIZE,
6553 + .maxauthsize = SHA224_DIGEST_SIZE,
6556 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
6557 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
6558 + OP_ALG_AAI_HMAC_PRECOMP,
6564 + .cra_name = "echainiv(authenc(hmac(sha224),"
6565 + "cbc(des3_ede)))",
6566 + .cra_driver_name = "echainiv-authenc-"
6568 + "cbc-des3_ede-caam-qi2",
6569 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
6571 + .setkey = aead_setkey,
6572 + .setauthsize = aead_setauthsize,
6573 + .encrypt = aead_encrypt,
6574 + .decrypt = aead_decrypt,
6575 + .ivsize = DES3_EDE_BLOCK_SIZE,
6576 + .maxauthsize = SHA224_DIGEST_SIZE,
6579 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
6580 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
6581 + OP_ALG_AAI_HMAC_PRECOMP,
6588 + .cra_name = "authenc(hmac(sha256),"
6590 + .cra_driver_name = "authenc-hmac-sha256-"
6591 + "cbc-des3_ede-caam-qi2",
6592 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
6594 + .setkey = aead_setkey,
6595 + .setauthsize = aead_setauthsize,
6596 + .encrypt = aead_encrypt,
6597 + .decrypt = aead_decrypt,
6598 + .ivsize = DES3_EDE_BLOCK_SIZE,
6599 + .maxauthsize = SHA256_DIGEST_SIZE,
6602 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
6603 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
6604 + OP_ALG_AAI_HMAC_PRECOMP,
6610 + .cra_name = "echainiv(authenc(hmac(sha256),"
6611 + "cbc(des3_ede)))",
6612 + .cra_driver_name = "echainiv-authenc-"
6614 + "cbc-des3_ede-caam-qi2",
6615 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
6617 + .setkey = aead_setkey,
6618 + .setauthsize = aead_setauthsize,
6619 + .encrypt = aead_encrypt,
6620 + .decrypt = aead_decrypt,
6621 + .ivsize = DES3_EDE_BLOCK_SIZE,
6622 + .maxauthsize = SHA256_DIGEST_SIZE,
6625 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
6626 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
6627 + OP_ALG_AAI_HMAC_PRECOMP,
6634 + .cra_name = "authenc(hmac(sha384),"
6636 + .cra_driver_name = "authenc-hmac-sha384-"
6637 + "cbc-des3_ede-caam-qi2",
6638 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
6640 + .setkey = aead_setkey,
6641 + .setauthsize = aead_setauthsize,
6642 + .encrypt = aead_encrypt,
6643 + .decrypt = aead_decrypt,
6644 + .ivsize = DES3_EDE_BLOCK_SIZE,
6645 + .maxauthsize = SHA384_DIGEST_SIZE,
6648 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
6649 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
6650 + OP_ALG_AAI_HMAC_PRECOMP,
6656 + .cra_name = "echainiv(authenc(hmac(sha384),"
6657 + "cbc(des3_ede)))",
6658 + .cra_driver_name = "echainiv-authenc-"
6660 + "cbc-des3_ede-caam-qi2",
6661 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
6663 + .setkey = aead_setkey,
6664 + .setauthsize = aead_setauthsize,
6665 + .encrypt = aead_encrypt,
6666 + .decrypt = aead_decrypt,
6667 + .ivsize = DES3_EDE_BLOCK_SIZE,
6668 + .maxauthsize = SHA384_DIGEST_SIZE,
6671 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
6672 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
6673 + OP_ALG_AAI_HMAC_PRECOMP,
6680 + .cra_name = "authenc(hmac(sha512),"
6682 + .cra_driver_name = "authenc-hmac-sha512-"
6683 + "cbc-des3_ede-caam-qi2",
6684 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
6686 + .setkey = aead_setkey,
6687 + .setauthsize = aead_setauthsize,
6688 + .encrypt = aead_encrypt,
6689 + .decrypt = aead_decrypt,
6690 + .ivsize = DES3_EDE_BLOCK_SIZE,
6691 + .maxauthsize = SHA512_DIGEST_SIZE,
6694 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
6695 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
6696 + OP_ALG_AAI_HMAC_PRECOMP,
6702 + .cra_name = "echainiv(authenc(hmac(sha512),"
6703 + "cbc(des3_ede)))",
6704 + .cra_driver_name = "echainiv-authenc-"
6706 + "cbc-des3_ede-caam-qi2",
6707 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
6709 + .setkey = aead_setkey,
6710 + .setauthsize = aead_setauthsize,
6711 + .encrypt = aead_encrypt,
6712 + .decrypt = aead_decrypt,
6713 + .ivsize = DES3_EDE_BLOCK_SIZE,
6714 + .maxauthsize = SHA512_DIGEST_SIZE,
6717 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
6718 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
6719 + OP_ALG_AAI_HMAC_PRECOMP,
6726 + .cra_name = "authenc(hmac(md5),cbc(des))",
6727 + .cra_driver_name = "authenc-hmac-md5-"
6728 + "cbc-des-caam-qi2",
6729 + .cra_blocksize = DES_BLOCK_SIZE,
6731 + .setkey = aead_setkey,
6732 + .setauthsize = aead_setauthsize,
6733 + .encrypt = aead_encrypt,
6734 + .decrypt = aead_decrypt,
6735 + .ivsize = DES_BLOCK_SIZE,
6736 + .maxauthsize = MD5_DIGEST_SIZE,
6739 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
6740 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
6741 + OP_ALG_AAI_HMAC_PRECOMP,
6747 + .cra_name = "echainiv(authenc(hmac(md5),"
6749 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
6750 + "cbc-des-caam-qi2",
6751 + .cra_blocksize = DES_BLOCK_SIZE,
6753 + .setkey = aead_setkey,
6754 + .setauthsize = aead_setauthsize,
6755 + .encrypt = aead_encrypt,
6756 + .decrypt = aead_decrypt,
6757 + .ivsize = DES_BLOCK_SIZE,
6758 + .maxauthsize = MD5_DIGEST_SIZE,
6761 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
6762 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
6763 + OP_ALG_AAI_HMAC_PRECOMP,
6770 + .cra_name = "authenc(hmac(sha1),cbc(des))",
6771 + .cra_driver_name = "authenc-hmac-sha1-"
6772 + "cbc-des-caam-qi2",
6773 + .cra_blocksize = DES_BLOCK_SIZE,
6775 + .setkey = aead_setkey,
6776 + .setauthsize = aead_setauthsize,
6777 + .encrypt = aead_encrypt,
6778 + .decrypt = aead_decrypt,
6779 + .ivsize = DES_BLOCK_SIZE,
6780 + .maxauthsize = SHA1_DIGEST_SIZE,
6783 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
6784 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
6785 + OP_ALG_AAI_HMAC_PRECOMP,
6791 + .cra_name = "echainiv(authenc(hmac(sha1),"
6793 + .cra_driver_name = "echainiv-authenc-"
6794 + "hmac-sha1-cbc-des-caam-qi2",
6795 + .cra_blocksize = DES_BLOCK_SIZE,
6797 + .setkey = aead_setkey,
6798 + .setauthsize = aead_setauthsize,
6799 + .encrypt = aead_encrypt,
6800 + .decrypt = aead_decrypt,
6801 + .ivsize = DES_BLOCK_SIZE,
6802 + .maxauthsize = SHA1_DIGEST_SIZE,
6805 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
6806 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
6807 + OP_ALG_AAI_HMAC_PRECOMP,
6814 + .cra_name = "authenc(hmac(sha224),cbc(des))",
6815 + .cra_driver_name = "authenc-hmac-sha224-"
6816 + "cbc-des-caam-qi2",
6817 + .cra_blocksize = DES_BLOCK_SIZE,
6819 + .setkey = aead_setkey,
6820 + .setauthsize = aead_setauthsize,
6821 + .encrypt = aead_encrypt,
6822 + .decrypt = aead_decrypt,
6823 + .ivsize = DES_BLOCK_SIZE,
6824 + .maxauthsize = SHA224_DIGEST_SIZE,
6827 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
6828 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
6829 + OP_ALG_AAI_HMAC_PRECOMP,
6835 + .cra_name = "echainiv(authenc(hmac(sha224),"
6837 + .cra_driver_name = "echainiv-authenc-"
6838 + "hmac-sha224-cbc-des-"
6840 + .cra_blocksize = DES_BLOCK_SIZE,
6842 + .setkey = aead_setkey,
6843 + .setauthsize = aead_setauthsize,
6844 + .encrypt = aead_encrypt,
6845 + .decrypt = aead_decrypt,
6846 + .ivsize = DES_BLOCK_SIZE,
6847 + .maxauthsize = SHA224_DIGEST_SIZE,
6850 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
6851 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
6852 + OP_ALG_AAI_HMAC_PRECOMP,
6859 + .cra_name = "authenc(hmac(sha256),cbc(des))",
6860 + .cra_driver_name = "authenc-hmac-sha256-"
6861 + "cbc-des-caam-qi2",
6862 + .cra_blocksize = DES_BLOCK_SIZE,
6864 + .setkey = aead_setkey,
6865 + .setauthsize = aead_setauthsize,
6866 + .encrypt = aead_encrypt,
6867 + .decrypt = aead_decrypt,
6868 + .ivsize = DES_BLOCK_SIZE,
6869 + .maxauthsize = SHA256_DIGEST_SIZE,
6872 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
6873 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
6874 + OP_ALG_AAI_HMAC_PRECOMP,
6880 + .cra_name = "echainiv(authenc(hmac(sha256),"
6882 + .cra_driver_name = "echainiv-authenc-"
6883 + "hmac-sha256-cbc-desi-"
6885 + .cra_blocksize = DES_BLOCK_SIZE,
6887 + .setkey = aead_setkey,
6888 + .setauthsize = aead_setauthsize,
6889 + .encrypt = aead_encrypt,
6890 + .decrypt = aead_decrypt,
6891 + .ivsize = DES_BLOCK_SIZE,
6892 + .maxauthsize = SHA256_DIGEST_SIZE,
6895 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
6896 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
6897 + OP_ALG_AAI_HMAC_PRECOMP,
6904 + .cra_name = "authenc(hmac(sha384),cbc(des))",
6905 + .cra_driver_name = "authenc-hmac-sha384-"
6906 + "cbc-des-caam-qi2",
6907 + .cra_blocksize = DES_BLOCK_SIZE,
6909 + .setkey = aead_setkey,
6910 + .setauthsize = aead_setauthsize,
6911 + .encrypt = aead_encrypt,
6912 + .decrypt = aead_decrypt,
6913 + .ivsize = DES_BLOCK_SIZE,
6914 + .maxauthsize = SHA384_DIGEST_SIZE,
6917 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
6918 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
6919 + OP_ALG_AAI_HMAC_PRECOMP,
6925 + .cra_name = "echainiv(authenc(hmac(sha384),"
6927 + .cra_driver_name = "echainiv-authenc-"
6928 + "hmac-sha384-cbc-des-"
6930 + .cra_blocksize = DES_BLOCK_SIZE,
6932 + .setkey = aead_setkey,
6933 + .setauthsize = aead_setauthsize,
6934 + .encrypt = aead_encrypt,
6935 + .decrypt = aead_decrypt,
6936 + .ivsize = DES_BLOCK_SIZE,
6937 + .maxauthsize = SHA384_DIGEST_SIZE,
6940 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
6941 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
6942 + OP_ALG_AAI_HMAC_PRECOMP,
6949 + .cra_name = "authenc(hmac(sha512),cbc(des))",
6950 + .cra_driver_name = "authenc-hmac-sha512-"
6951 + "cbc-des-caam-qi2",
6952 + .cra_blocksize = DES_BLOCK_SIZE,
6954 + .setkey = aead_setkey,
6955 + .setauthsize = aead_setauthsize,
6956 + .encrypt = aead_encrypt,
6957 + .decrypt = aead_decrypt,
6958 + .ivsize = DES_BLOCK_SIZE,
6959 + .maxauthsize = SHA512_DIGEST_SIZE,
6962 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
6963 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
6964 + OP_ALG_AAI_HMAC_PRECOMP,
6970 + .cra_name = "echainiv(authenc(hmac(sha512),"
6972 + .cra_driver_name = "echainiv-authenc-"
6973 + "hmac-sha512-cbc-des-"
6975 + .cra_blocksize = DES_BLOCK_SIZE,
6977 + .setkey = aead_setkey,
6978 + .setauthsize = aead_setauthsize,
6979 + .encrypt = aead_encrypt,
6980 + .decrypt = aead_decrypt,
6981 + .ivsize = DES_BLOCK_SIZE,
6982 + .maxauthsize = SHA512_DIGEST_SIZE,
6985 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
6986 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
6987 + OP_ALG_AAI_HMAC_PRECOMP,
6994 + .cra_name = "authenc(hmac(md5),"
6995 + "rfc3686(ctr(aes)))",
6996 + .cra_driver_name = "authenc-hmac-md5-"
6997 + "rfc3686-ctr-aes-caam-qi2",
6998 + .cra_blocksize = 1,
7000 + .setkey = aead_setkey,
7001 + .setauthsize = aead_setauthsize,
7002 + .encrypt = aead_encrypt,
7003 + .decrypt = aead_decrypt,
7004 + .ivsize = CTR_RFC3686_IV_SIZE,
7005 + .maxauthsize = MD5_DIGEST_SIZE,
7008 + .class1_alg_type = OP_ALG_ALGSEL_AES |
7009 + OP_ALG_AAI_CTR_MOD128,
7010 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7011 + OP_ALG_AAI_HMAC_PRECOMP,
7018 + .cra_name = "seqiv(authenc("
7019 + "hmac(md5),rfc3686(ctr(aes))))",
7020 + .cra_driver_name = "seqiv-authenc-hmac-md5-"
7021 + "rfc3686-ctr-aes-caam-qi2",
7022 + .cra_blocksize = 1,
7024 + .setkey = aead_setkey,
7025 + .setauthsize = aead_setauthsize,
7026 + .encrypt = aead_encrypt,
7027 + .decrypt = aead_decrypt,
7028 + .ivsize = CTR_RFC3686_IV_SIZE,
7029 + .maxauthsize = MD5_DIGEST_SIZE,
7032 + .class1_alg_type = OP_ALG_ALGSEL_AES |
7033 + OP_ALG_AAI_CTR_MOD128,
7034 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7035 + OP_ALG_AAI_HMAC_PRECOMP,
7043 + .cra_name = "authenc(hmac(sha1),"
7044 + "rfc3686(ctr(aes)))",
7045 + .cra_driver_name = "authenc-hmac-sha1-"
7046 + "rfc3686-ctr-aes-caam-qi2",
7047 + .cra_blocksize = 1,
7049 + .setkey = aead_setkey,
7050 + .setauthsize = aead_setauthsize,
7051 + .encrypt = aead_encrypt,
7052 + .decrypt = aead_decrypt,
7053 + .ivsize = CTR_RFC3686_IV_SIZE,
7054 + .maxauthsize = SHA1_DIGEST_SIZE,
7057 + .class1_alg_type = OP_ALG_ALGSEL_AES |
7058 + OP_ALG_AAI_CTR_MOD128,
7059 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7060 + OP_ALG_AAI_HMAC_PRECOMP,
7067 + .cra_name = "seqiv(authenc("
7068 + "hmac(sha1),rfc3686(ctr(aes))))",
7069 + .cra_driver_name = "seqiv-authenc-hmac-sha1-"
7070 + "rfc3686-ctr-aes-caam-qi2",
7071 + .cra_blocksize = 1,
7073 + .setkey = aead_setkey,
7074 + .setauthsize = aead_setauthsize,
7075 + .encrypt = aead_encrypt,
7076 + .decrypt = aead_decrypt,
7077 + .ivsize = CTR_RFC3686_IV_SIZE,
7078 + .maxauthsize = SHA1_DIGEST_SIZE,
7081 + .class1_alg_type = OP_ALG_ALGSEL_AES |
7082 + OP_ALG_AAI_CTR_MOD128,
7083 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7084 + OP_ALG_AAI_HMAC_PRECOMP,
7092 + .cra_name = "authenc(hmac(sha224),"
7093 + "rfc3686(ctr(aes)))",
7094 + .cra_driver_name = "authenc-hmac-sha224-"
7095 + "rfc3686-ctr-aes-caam-qi2",
7096 + .cra_blocksize = 1,
7098 + .setkey = aead_setkey,
7099 + .setauthsize = aead_setauthsize,
7100 + .encrypt = aead_encrypt,
7101 + .decrypt = aead_decrypt,
7102 + .ivsize = CTR_RFC3686_IV_SIZE,
7103 + .maxauthsize = SHA224_DIGEST_SIZE,
7106 + .class1_alg_type = OP_ALG_ALGSEL_AES |
7107 + OP_ALG_AAI_CTR_MOD128,
7108 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
7109 + OP_ALG_AAI_HMAC_PRECOMP,
7116 + .cra_name = "seqiv(authenc("
7117 + "hmac(sha224),rfc3686(ctr(aes))))",
7118 + .cra_driver_name = "seqiv-authenc-hmac-sha224-"
7119 + "rfc3686-ctr-aes-caam-qi2",
7120 + .cra_blocksize = 1,
7122 + .setkey = aead_setkey,
7123 + .setauthsize = aead_setauthsize,
7124 + .encrypt = aead_encrypt,
7125 + .decrypt = aead_decrypt,
7126 + .ivsize = CTR_RFC3686_IV_SIZE,
7127 + .maxauthsize = SHA224_DIGEST_SIZE,
7130 + .class1_alg_type = OP_ALG_ALGSEL_AES |
7131 + OP_ALG_AAI_CTR_MOD128,
7132 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
7133 + OP_ALG_AAI_HMAC_PRECOMP,
7141 + .cra_name = "authenc(hmac(sha256),"
7142 + "rfc3686(ctr(aes)))",
7143 + .cra_driver_name = "authenc-hmac-sha256-"
7144 + "rfc3686-ctr-aes-caam-qi2",
7145 + .cra_blocksize = 1,
7147 + .setkey = aead_setkey,
7148 + .setauthsize = aead_setauthsize,
7149 + .encrypt = aead_encrypt,
7150 + .decrypt = aead_decrypt,
7151 + .ivsize = CTR_RFC3686_IV_SIZE,
7152 + .maxauthsize = SHA256_DIGEST_SIZE,
7155 + .class1_alg_type = OP_ALG_ALGSEL_AES |
7156 + OP_ALG_AAI_CTR_MOD128,
7157 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
7158 + OP_ALG_AAI_HMAC_PRECOMP,
7165 + .cra_name = "seqiv(authenc(hmac(sha256),"
7166 + "rfc3686(ctr(aes))))",
7167 + .cra_driver_name = "seqiv-authenc-hmac-sha256-"
7168 + "rfc3686-ctr-aes-caam-qi2",
7169 + .cra_blocksize = 1,
7171 + .setkey = aead_setkey,
7172 + .setauthsize = aead_setauthsize,
7173 + .encrypt = aead_encrypt,
7174 + .decrypt = aead_decrypt,
7175 + .ivsize = CTR_RFC3686_IV_SIZE,
7176 + .maxauthsize = SHA256_DIGEST_SIZE,
7179 + .class1_alg_type = OP_ALG_ALGSEL_AES |
7180 + OP_ALG_AAI_CTR_MOD128,
7181 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
7182 + OP_ALG_AAI_HMAC_PRECOMP,
7190 + .cra_name = "authenc(hmac(sha384),"
7191 + "rfc3686(ctr(aes)))",
7192 + .cra_driver_name = "authenc-hmac-sha384-"
7193 + "rfc3686-ctr-aes-caam-qi2",
7194 + .cra_blocksize = 1,
7196 + .setkey = aead_setkey,
7197 + .setauthsize = aead_setauthsize,
7198 + .encrypt = aead_encrypt,
7199 + .decrypt = aead_decrypt,
7200 + .ivsize = CTR_RFC3686_IV_SIZE,
7201 + .maxauthsize = SHA384_DIGEST_SIZE,
7204 + .class1_alg_type = OP_ALG_ALGSEL_AES |
7205 + OP_ALG_AAI_CTR_MOD128,
7206 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
7207 + OP_ALG_AAI_HMAC_PRECOMP,
7214 + .cra_name = "seqiv(authenc(hmac(sha384),"
7215 + "rfc3686(ctr(aes))))",
7216 + .cra_driver_name = "seqiv-authenc-hmac-sha384-"
7217 + "rfc3686-ctr-aes-caam-qi2",
7218 + .cra_blocksize = 1,
7220 + .setkey = aead_setkey,
7221 + .setauthsize = aead_setauthsize,
7222 + .encrypt = aead_encrypt,
7223 + .decrypt = aead_decrypt,
7224 + .ivsize = CTR_RFC3686_IV_SIZE,
7225 + .maxauthsize = SHA384_DIGEST_SIZE,
7228 + .class1_alg_type = OP_ALG_ALGSEL_AES |
7229 + OP_ALG_AAI_CTR_MOD128,
7230 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
7231 + OP_ALG_AAI_HMAC_PRECOMP,
7239 + .cra_name = "authenc(hmac(sha512),"
7240 + "rfc3686(ctr(aes)))",
7241 + .cra_driver_name = "authenc-hmac-sha512-"
7242 + "rfc3686-ctr-aes-caam-qi2",
7243 + .cra_blocksize = 1,
7245 + .setkey = aead_setkey,
7246 + .setauthsize = aead_setauthsize,
7247 + .encrypt = aead_encrypt,
7248 + .decrypt = aead_decrypt,
7249 + .ivsize = CTR_RFC3686_IV_SIZE,
7250 + .maxauthsize = SHA512_DIGEST_SIZE,
7253 + .class1_alg_type = OP_ALG_ALGSEL_AES |
7254 + OP_ALG_AAI_CTR_MOD128,
7255 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
7256 + OP_ALG_AAI_HMAC_PRECOMP,
7263 + .cra_name = "seqiv(authenc(hmac(sha512),"
7264 + "rfc3686(ctr(aes))))",
7265 + .cra_driver_name = "seqiv-authenc-hmac-sha512-"
7266 + "rfc3686-ctr-aes-caam-qi2",
7267 + .cra_blocksize = 1,
7269 + .setkey = aead_setkey,
7270 + .setauthsize = aead_setauthsize,
7271 + .encrypt = aead_encrypt,
7272 + .decrypt = aead_decrypt,
7273 + .ivsize = CTR_RFC3686_IV_SIZE,
7274 + .maxauthsize = SHA512_DIGEST_SIZE,
7277 + .class1_alg_type = OP_ALG_ALGSEL_AES |
7278 + OP_ALG_AAI_CTR_MOD128,
7279 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
7280 + OP_ALG_AAI_HMAC_PRECOMP,
7288 + .cra_name = "tls10(hmac(sha1),cbc(aes))",
7289 + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2",
7290 + .cra_blocksize = AES_BLOCK_SIZE,
7292 + .setkey = tls_setkey,
7293 + .setauthsize = tls_setauthsize,
7294 + .encrypt = tls_encrypt,
7295 + .decrypt = tls_decrypt,
7296 + .ivsize = AES_BLOCK_SIZE,
7297 + .maxauthsize = SHA1_DIGEST_SIZE,
7300 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7301 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7302 + OP_ALG_AAI_HMAC_PRECOMP,
7307 +static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
7309 + struct skcipher_alg *alg = &t_alg->skcipher;
7311 + alg->base.cra_module = THIS_MODULE;
7312 + alg->base.cra_priority = CAAM_CRA_PRIORITY;
7313 + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
7314 + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
7316 + alg->init = caam_cra_init_skcipher;
7317 + alg->exit = caam_cra_exit;
7320 +static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
7322 + struct aead_alg *alg = &t_alg->aead;
7324 + alg->base.cra_module = THIS_MODULE;
7325 + alg->base.cra_priority = CAAM_CRA_PRIORITY;
7326 + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
7327 + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
7329 + alg->init = caam_cra_init_aead;
7330 + alg->exit = caam_cra_exit_aead;
7333 +/* max hash key is max split key size */
7334 +#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
7336 +#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
7337 +#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
7339 +#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
7340 + CAAM_MAX_HASH_KEY_SIZE)
7341 +#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
7343 +/* caam context sizes for hashes: running digest + 8 */
7344 +#define HASH_MSG_LEN 8
7345 +#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
7356 + * caam_hash_ctx - ahash per-session context
7357 + * @flc: Flow Contexts array
7358 + * @flc_dma: I/O virtual addresses of the Flow Contexts
7359 + * @key: virtual address of the authentication key
7360 + * @dev: dpseci device
7361 + * @ctx_len: size of Context Register
7362 + * @adata: hashing algorithm details
7364 +struct caam_hash_ctx {
7365 + struct caam_flc flc[HASH_NUM_OP];
7366 + dma_addr_t flc_dma[HASH_NUM_OP];
7367 + u8 key[CAAM_MAX_HASH_KEY_SIZE];
7368 + struct device *dev;
7370 + struct alginfo adata;
7374 +struct caam_hash_state {
7375 + struct caam_request caam_req;
7376 + dma_addr_t buf_dma;
7377 + dma_addr_t ctx_dma;
7378 + u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
7380 + u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
7382 + u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
7383 + int (*update)(struct ahash_request *req);
7384 + int (*final)(struct ahash_request *req);
7385 + int (*finup)(struct ahash_request *req);
7389 +struct caam_export_state {
7390 + u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
7391 + u8 caam_ctx[MAX_CTX_LEN];
7393 + int (*update)(struct ahash_request *req);
7394 + int (*final)(struct ahash_request *req);
7395 + int (*finup)(struct ahash_request *req);
7398 +static inline void switch_buf(struct caam_hash_state *state)
7400 + state->current_buf ^= 1;
7403 +static inline u8 *current_buf(struct caam_hash_state *state)
7405 + return state->current_buf ? state->buf_1 : state->buf_0;
7408 +static inline u8 *alt_buf(struct caam_hash_state *state)
7410 + return state->current_buf ? state->buf_0 : state->buf_1;
7413 +static inline int *current_buflen(struct caam_hash_state *state)
7415 + return state->current_buf ? &state->buflen_1 : &state->buflen_0;
7418 +static inline int *alt_buflen(struct caam_hash_state *state)
7420 + return state->current_buf ? &state->buflen_0 : &state->buflen_1;
7423 +/* Map current buffer in state (if length > 0) and put it in link table */
7424 +static inline int buf_map_to_qm_sg(struct device *dev,
7425 + struct dpaa2_sg_entry *qm_sg,
7426 + struct caam_hash_state *state)
7428 + int buflen = *current_buflen(state);
7433 + state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
7435 + if (dma_mapping_error(dev, state->buf_dma)) {
7436 + dev_err(dev, "unable to map buf\n");
7437 + state->buf_dma = 0;
7441 + dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
7446 +/* Map state->caam_ctx, and add it to link table */
7447 +static inline int ctx_map_to_qm_sg(struct device *dev,
7448 + struct caam_hash_state *state, int ctx_len,
7449 + struct dpaa2_sg_entry *qm_sg, u32 flag)
7451 + state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
7452 + if (dma_mapping_error(dev, state->ctx_dma)) {
7453 + dev_err(dev, "unable to map ctx\n");
7454 + state->ctx_dma = 0;
7458 + dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
7463 +static int ahash_set_sh_desc(struct crypto_ahash *ahash)
7465 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
7466 + int digestsize = crypto_ahash_digestsize(ahash);
7467 + struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
7468 + struct caam_flc *flc;
7471 + ctx->adata.key_virt = ctx->key;
7472 + ctx->adata.key_inline = true;
7474 + /* ahash_update shared descriptor */
7475 + flc = &ctx->flc[UPDATE];
7476 + desc = flc->sh_desc;
7477 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
7478 + ctx->ctx_len, true, priv->sec_attr.era);
7479 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
7480 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
7481 + desc_bytes(desc), DMA_BIDIRECTIONAL);
7483 + print_hex_dump(KERN_ERR,
7484 + "ahash update shdesc@" __stringify(__LINE__)": ",
7485 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
7488 + /* ahash_update_first shared descriptor */
7489 + flc = &ctx->flc[UPDATE_FIRST];
7490 + desc = flc->sh_desc;
7491 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
7492 + ctx->ctx_len, false, priv->sec_attr.era);
7493 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
7494 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
7495 + desc_bytes(desc), DMA_BIDIRECTIONAL);
7497 + print_hex_dump(KERN_ERR,
7498 + "ahash update first shdesc@" __stringify(__LINE__)": ",
7499 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
7502 + /* ahash_final shared descriptor */
7503 + flc = &ctx->flc[FINALIZE];
7504 + desc = flc->sh_desc;
7505 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
7506 + ctx->ctx_len, true, priv->sec_attr.era);
7507 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
7508 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
7509 + desc_bytes(desc), DMA_BIDIRECTIONAL);
7511 + print_hex_dump(KERN_ERR,
7512 + "ahash final shdesc@" __stringify(__LINE__)": ",
7513 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
7516 + /* ahash_digest shared descriptor */
7517 + flc = &ctx->flc[DIGEST];
7518 + desc = flc->sh_desc;
7519 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
7520 + ctx->ctx_len, false, priv->sec_attr.era);
7521 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
7522 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
7523 + desc_bytes(desc), DMA_BIDIRECTIONAL);
7525 + print_hex_dump(KERN_ERR,
7526 + "ahash digest shdesc@" __stringify(__LINE__)": ",
7527 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
7533 +/* Digest hash size if it is too large */
7534 +static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
7535 + u32 *keylen, u8 *key_out, u32 digestsize)
7537 + struct caam_request *req_ctx;
7539 + struct split_key_sh_result result;
7540 + dma_addr_t src_dma, dst_dma;
7541 + struct caam_flc *flc;
7542 + dma_addr_t flc_dma;
7543 + int ret = -ENOMEM;
7544 + struct dpaa2_fl_entry *in_fle, *out_fle;
7546 + req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
7550 + in_fle = &req_ctx->fd_flt[1];
7551 + out_fle = &req_ctx->fd_flt[0];
7553 + flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
7557 + src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
7559 + if (dma_mapping_error(ctx->dev, src_dma)) {
7560 + dev_err(ctx->dev, "unable to map key input memory\n");
7563 + dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
7565 + if (dma_mapping_error(ctx->dev, dst_dma)) {
7566 + dev_err(ctx->dev, "unable to map key output memory\n");
7570 + desc = flc->sh_desc;
7572 + init_sh_desc(desc, 0);
7574 + /* descriptor to perform unkeyed hash on key_in */
7575 + append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
7576 + OP_ALG_AS_INITFINAL);
7577 + append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
7578 + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
7579 + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
7580 + LDST_SRCDST_BYTE_CONTEXT);
7582 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
7583 + flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
7584 + desc_bytes(desc), DMA_TO_DEVICE);
7585 + if (dma_mapping_error(ctx->dev, flc_dma)) {
7586 + dev_err(ctx->dev, "unable to map shared descriptor\n");
7590 + dpaa2_fl_set_final(in_fle, true);
7591 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
7592 + dpaa2_fl_set_addr(in_fle, src_dma);
7593 + dpaa2_fl_set_len(in_fle, *keylen);
7594 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
7595 + dpaa2_fl_set_addr(out_fle, dst_dma);
7596 + dpaa2_fl_set_len(out_fle, digestsize);
7599 + print_hex_dump(KERN_ERR, "key_in@" __stringify(__LINE__)": ",
7600 + DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
7601 + print_hex_dump(KERN_ERR, "shdesc@" __stringify(__LINE__)": ",
7602 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
7606 + init_completion(&result.completion);
7607 + result.dev = ctx->dev;
7609 + req_ctx->flc = flc;
7610 + req_ctx->flc_dma = flc_dma;
7611 + req_ctx->cbk = split_key_sh_done;
7612 + req_ctx->ctx = &result;
7614 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
7615 + if (ret == -EINPROGRESS) {
7617 + wait_for_completion(&result.completion);
7620 + print_hex_dump(KERN_ERR,
7621 + "digested key@" __stringify(__LINE__)": ",
7622 + DUMP_PREFIX_ADDRESS, 16, 4, key_in, digestsize,
7627 + dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
7630 + dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
7632 + dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
7638 + *keylen = digestsize;
7643 +static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
7644 + unsigned int keylen)
7646 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
7647 + unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
7648 + unsigned int digestsize = crypto_ahash_digestsize(ahash);
7650 + u8 *hashed_key = NULL;
7653 + dev_err(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
7656 + if (keylen > blocksize) {
7657 + hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
7658 + GFP_KERNEL | GFP_DMA);
7661 + ret = hash_digest_key(ctx, key, &keylen, hashed_key,
7664 + goto bad_free_key;
7668 + ctx->adata.keylen = keylen;
7669 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
7670 + OP_ALG_ALGSEL_MASK);
7671 + if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
7672 + goto bad_free_key;
7674 + memcpy(ctx->key, key, keylen);
7676 + kfree(hashed_key);
7677 + return ahash_set_sh_desc(ahash);
7679 + kfree(hashed_key);
7680 + crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
7684 +static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
7685 + struct ahash_request *req, int dst_len)
7687 + struct caam_hash_state *state = ahash_request_ctx(req);
7689 + if (edesc->src_nents)
7690 + dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
7691 + if (edesc->dst_dma)
7692 + dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
7694 + if (edesc->qm_sg_bytes)
7695 + dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
7698 + if (state->buf_dma) {
7699 + dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
7701 + state->buf_dma = 0;
7705 +static inline void ahash_unmap_ctx(struct device *dev,
7706 + struct ahash_edesc *edesc,
7707 + struct ahash_request *req, int dst_len,
7710 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
7711 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
7712 + struct caam_hash_state *state = ahash_request_ctx(req);
7714 + if (state->ctx_dma) {
7715 + dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
7716 + state->ctx_dma = 0;
7718 + ahash_unmap(dev, edesc, req, dst_len);
7721 +static void ahash_done(void *cbk_ctx, u32 status)
7723 + struct crypto_async_request *areq = cbk_ctx;
7724 + struct ahash_request *req = ahash_request_cast(areq);
7725 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
7726 + struct caam_hash_state *state = ahash_request_ctx(req);
7727 + struct ahash_edesc *edesc = state->caam_req.edesc;
7728 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
7729 + int digestsize = crypto_ahash_digestsize(ahash);
7733 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
7736 + if (unlikely(status)) {
7737 + caam_qi2_strstatus(ctx->dev, status);
7741 + ahash_unmap(ctx->dev, edesc, req, digestsize);
7742 + qi_cache_free(edesc);
7745 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
7746 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
7749 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
7750 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
7754 + req->base.complete(&req->base, ecode);
7757 +static void ahash_done_bi(void *cbk_ctx, u32 status)
7759 + struct crypto_async_request *areq = cbk_ctx;
7760 + struct ahash_request *req = ahash_request_cast(areq);
7761 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
7762 + struct caam_hash_state *state = ahash_request_ctx(req);
7763 + struct ahash_edesc *edesc = state->caam_req.edesc;
7764 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
7767 + int digestsize = crypto_ahash_digestsize(ahash);
7769 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
7772 + if (unlikely(status)) {
7773 + caam_qi2_strstatus(ctx->dev, status);
7777 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
7778 + switch_buf(state);
7779 + qi_cache_free(edesc);
7782 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
7783 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
7786 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
7787 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
7791 + req->base.complete(&req->base, ecode);
7794 +static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
7796 + struct crypto_async_request *areq = cbk_ctx;
7797 + struct ahash_request *req = ahash_request_cast(areq);
7798 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
7799 + struct caam_hash_state *state = ahash_request_ctx(req);
7800 + struct ahash_edesc *edesc = state->caam_req.edesc;
7801 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
7802 + int digestsize = crypto_ahash_digestsize(ahash);
7806 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
7809 + if (unlikely(status)) {
7810 + caam_qi2_strstatus(ctx->dev, status);
7814 + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
7815 + qi_cache_free(edesc);
7818 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
7819 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
7822 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
7823 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
7827 + req->base.complete(&req->base, ecode);
7830 +static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
7832 + struct crypto_async_request *areq = cbk_ctx;
7833 + struct ahash_request *req = ahash_request_cast(areq);
7834 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
7835 + struct caam_hash_state *state = ahash_request_ctx(req);
7836 + struct ahash_edesc *edesc = state->caam_req.edesc;
7837 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
7840 + int digestsize = crypto_ahash_digestsize(ahash);
7842 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
7845 + if (unlikely(status)) {
7846 + caam_qi2_strstatus(ctx->dev, status);
7850 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
7851 + switch_buf(state);
7852 + qi_cache_free(edesc);
7855 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
7856 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
7859 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
7860 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
7864 + req->base.complete(&req->base, ecode);
7867 +static int ahash_update_ctx(struct ahash_request *req)
7869 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
7870 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
7871 + struct caam_hash_state *state = ahash_request_ctx(req);
7872 + struct caam_request *req_ctx = &state->caam_req;
7873 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
7874 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
7875 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
7876 + GFP_KERNEL : GFP_ATOMIC;
7877 + u8 *buf = current_buf(state);
7878 + int *buflen = current_buflen(state);
7879 + u8 *next_buf = alt_buf(state);
7880 + int *next_buflen = alt_buflen(state), last_buflen;
7881 + int in_len = *buflen + req->nbytes, to_hash;
7882 + int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
7883 + struct ahash_edesc *edesc;
7886 + last_buflen = *next_buflen;
7887 + *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
7888 + to_hash = in_len - *next_buflen;
7891 + struct dpaa2_sg_entry *sg_table;
7893 + src_nents = sg_nents_for_len(req->src,
7894 + req->nbytes - (*next_buflen));
7895 + if (src_nents < 0) {
7896 + dev_err(ctx->dev, "Invalid number of src SG.\n");
7901 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
7903 + if (!mapped_nents) {
7904 + dev_err(ctx->dev, "unable to DMA map source\n");
7911 + /* allocate space for base edesc and link tables */
7912 + edesc = qi_cache_zalloc(GFP_DMA | flags);
7914 + dma_unmap_sg(ctx->dev, req->src, src_nents,
7919 + edesc->src_nents = src_nents;
7920 + qm_sg_src_index = 1 + (*buflen ? 1 : 0);
7921 + qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
7922 + sizeof(*sg_table);
7923 + sg_table = &edesc->sgt[0];
7925 + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
7926 + DMA_BIDIRECTIONAL);
7930 + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
7934 + if (mapped_nents) {
7935 + sg_to_qm_sg_last(req->src, mapped_nents,
7936 + sg_table + qm_sg_src_index, 0);
7938 + scatterwalk_map_and_copy(next_buf, req->src,
7939 + to_hash - *buflen,
7942 + dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
7946 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
7947 + qm_sg_bytes, DMA_TO_DEVICE);
7948 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
7949 + dev_err(ctx->dev, "unable to map S/G table\n");
7953 + edesc->qm_sg_bytes = qm_sg_bytes;
7955 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
7956 + dpaa2_fl_set_final(in_fle, true);
7957 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
7958 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
7959 + dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
7960 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
7961 + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
7962 + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
7964 + req_ctx->flc = &ctx->flc[UPDATE];
7965 + req_ctx->flc_dma = ctx->flc_dma[UPDATE];
7966 + req_ctx->cbk = ahash_done_bi;
7967 + req_ctx->ctx = &req->base;
7968 + req_ctx->edesc = edesc;
7970 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
7971 + if (ret != -EINPROGRESS &&
7972 + !(ret == -EBUSY &&
7973 + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
7975 + } else if (*next_buflen) {
7976 + scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
7978 + *buflen = *next_buflen;
7979 + *next_buflen = last_buflen;
7982 + print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
7983 + DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
7984 + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
7985 + DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
7991 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
7992 + qi_cache_free(edesc);
7996 +static int ahash_final_ctx(struct ahash_request *req)
7998 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
7999 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8000 + struct caam_hash_state *state = ahash_request_ctx(req);
8001 + struct caam_request *req_ctx = &state->caam_req;
8002 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
8003 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
8004 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
8005 + GFP_KERNEL : GFP_ATOMIC;
8006 + int buflen = *current_buflen(state);
8007 + int qm_sg_bytes, qm_sg_src_index;
8008 + int digestsize = crypto_ahash_digestsize(ahash);
8009 + struct ahash_edesc *edesc;
8010 + struct dpaa2_sg_entry *sg_table;
8013 + /* allocate space for base edesc and link tables */
8014 + edesc = qi_cache_zalloc(GFP_DMA | flags);
8018 + qm_sg_src_index = 1 + (buflen ? 1 : 0);
8019 + qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
8020 + sg_table = &edesc->sgt[0];
8022 + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
8027 + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
8031 + dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
8033 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
8035 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
8036 + dev_err(ctx->dev, "unable to map S/G table\n");
8040 + edesc->qm_sg_bytes = qm_sg_bytes;
8042 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
8044 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
8045 + dev_err(ctx->dev, "unable to map dst\n");
8046 + edesc->dst_dma = 0;
8051 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
8052 + dpaa2_fl_set_final(in_fle, true);
8053 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
8054 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
8055 + dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
8056 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
8057 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
8058 + dpaa2_fl_set_len(out_fle, digestsize);
8060 + req_ctx->flc = &ctx->flc[FINALIZE];
8061 + req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
8062 + req_ctx->cbk = ahash_done_ctx_src;
8063 + req_ctx->ctx = &req->base;
8064 + req_ctx->edesc = edesc;
8066 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
8067 + if (ret == -EINPROGRESS ||
8068 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
8072 + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
8073 + qi_cache_free(edesc);
8077 +static int ahash_finup_ctx(struct ahash_request *req)
8079 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8080 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8081 + struct caam_hash_state *state = ahash_request_ctx(req);
8082 + struct caam_request *req_ctx = &state->caam_req;
8083 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
8084 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
8085 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
8086 + GFP_KERNEL : GFP_ATOMIC;
8087 + int buflen = *current_buflen(state);
8088 + int qm_sg_bytes, qm_sg_src_index;
8089 + int src_nents, mapped_nents;
8090 + int digestsize = crypto_ahash_digestsize(ahash);
8091 + struct ahash_edesc *edesc;
8092 + struct dpaa2_sg_entry *sg_table;
8095 + src_nents = sg_nents_for_len(req->src, req->nbytes);
8096 + if (src_nents < 0) {
8097 + dev_err(ctx->dev, "Invalid number of src SG.\n");
8102 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
8104 + if (!mapped_nents) {
8105 + dev_err(ctx->dev, "unable to DMA map source\n");
8112 + /* allocate space for base edesc and link tables */
8113 + edesc = qi_cache_zalloc(GFP_DMA | flags);
8115 + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
8119 + edesc->src_nents = src_nents;
8120 + qm_sg_src_index = 1 + (buflen ? 1 : 0);
8121 + qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
8122 + sg_table = &edesc->sgt[0];
8124 + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
8129 + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
8133 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
8135 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
8137 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
8138 + dev_err(ctx->dev, "unable to map S/G table\n");
8142 + edesc->qm_sg_bytes = qm_sg_bytes;
8144 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
8146 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
8147 + dev_err(ctx->dev, "unable to map dst\n");
8148 + edesc->dst_dma = 0;
8153 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
8154 + dpaa2_fl_set_final(in_fle, true);
8155 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
8156 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
8157 + dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
8158 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
8159 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
8160 + dpaa2_fl_set_len(out_fle, digestsize);
8162 + req_ctx->flc = &ctx->flc[FINALIZE];
8163 + req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
8164 + req_ctx->cbk = ahash_done_ctx_src;
8165 + req_ctx->ctx = &req->base;
8166 + req_ctx->edesc = edesc;
8168 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
8169 + if (ret == -EINPROGRESS ||
8170 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
8174 + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
8175 + qi_cache_free(edesc);
8179 +static int ahash_digest(struct ahash_request *req)
8181 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8182 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8183 + struct caam_hash_state *state = ahash_request_ctx(req);
8184 + struct caam_request *req_ctx = &state->caam_req;
8185 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
8186 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
8187 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
8188 + GFP_KERNEL : GFP_ATOMIC;
8189 + int digestsize = crypto_ahash_digestsize(ahash);
8190 + int src_nents, mapped_nents;
8191 + struct ahash_edesc *edesc;
8192 + int ret = -ENOMEM;
8194 + state->buf_dma = 0;
8196 + src_nents = sg_nents_for_len(req->src, req->nbytes);
8197 + if (src_nents < 0) {
8198 + dev_err(ctx->dev, "Invalid number of src SG.\n");
8203 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
8205 + if (!mapped_nents) {
8206 + dev_err(ctx->dev, "unable to map source for DMA\n");
8213 + /* allocate space for base edesc and link tables */
8214 + edesc = qi_cache_zalloc(GFP_DMA | flags);
8216 + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
8220 + edesc->src_nents = src_nents;
8221 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
8223 + if (mapped_nents > 1) {
8225 + struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
8227 + qm_sg_bytes = mapped_nents * sizeof(*sg_table);
8228 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
8229 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
8230 + qm_sg_bytes, DMA_TO_DEVICE);
8231 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
8232 + dev_err(ctx->dev, "unable to map S/G table\n");
8235 + edesc->qm_sg_bytes = qm_sg_bytes;
8236 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
8237 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
8239 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
8240 + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
8243 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
8245 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
8246 + dev_err(ctx->dev, "unable to map dst\n");
8247 + edesc->dst_dma = 0;
8251 + dpaa2_fl_set_final(in_fle, true);
8252 + dpaa2_fl_set_len(in_fle, req->nbytes);
8253 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
8254 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
8255 + dpaa2_fl_set_len(out_fle, digestsize);
8257 + req_ctx->flc = &ctx->flc[DIGEST];
8258 + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
8259 + req_ctx->cbk = ahash_done;
8260 + req_ctx->ctx = &req->base;
8261 + req_ctx->edesc = edesc;
8262 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
8263 + if (ret == -EINPROGRESS ||
8264 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
8268 + ahash_unmap(ctx->dev, edesc, req, digestsize);
8269 + qi_cache_free(edesc);
8273 +static int ahash_final_no_ctx(struct ahash_request *req)
8275 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8276 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8277 + struct caam_hash_state *state = ahash_request_ctx(req);
8278 + struct caam_request *req_ctx = &state->caam_req;
8279 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
8280 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
8281 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
8282 + GFP_KERNEL : GFP_ATOMIC;
8283 + u8 *buf = current_buf(state);
8284 + int buflen = *current_buflen(state);
8285 + int digestsize = crypto_ahash_digestsize(ahash);
8286 + struct ahash_edesc *edesc;
8287 + int ret = -ENOMEM;
8289 + /* allocate space for base edesc and link tables */
8290 + edesc = qi_cache_zalloc(GFP_DMA | flags);
8294 + state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
8295 + if (dma_mapping_error(ctx->dev, state->buf_dma)) {
8296 + dev_err(ctx->dev, "unable to map src\n");
8300 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
8302 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
8303 + dev_err(ctx->dev, "unable to map dst\n");
8304 + edesc->dst_dma = 0;
8308 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
8309 + dpaa2_fl_set_final(in_fle, true);
8310 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
8311 + dpaa2_fl_set_addr(in_fle, state->buf_dma);
8312 + dpaa2_fl_set_len(in_fle, buflen);
8313 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
8314 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
8315 + dpaa2_fl_set_len(out_fle, digestsize);
8317 + req_ctx->flc = &ctx->flc[DIGEST];
8318 + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
8319 + req_ctx->cbk = ahash_done;
8320 + req_ctx->ctx = &req->base;
8321 + req_ctx->edesc = edesc;
8323 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
8324 + if (ret == -EINPROGRESS ||
8325 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
8329 + ahash_unmap(ctx->dev, edesc, req, digestsize);
8330 + qi_cache_free(edesc);
8334 +static int ahash_update_no_ctx(struct ahash_request *req)
8336 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8337 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8338 + struct caam_hash_state *state = ahash_request_ctx(req);
8339 + struct caam_request *req_ctx = &state->caam_req;
8340 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
8341 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
8342 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
8343 + GFP_KERNEL : GFP_ATOMIC;
8344 + u8 *buf = current_buf(state);
8345 + int *buflen = current_buflen(state);
8346 + u8 *next_buf = alt_buf(state);
8347 + int *next_buflen = alt_buflen(state);
8348 + int in_len = *buflen + req->nbytes, to_hash;
8349 + int qm_sg_bytes, src_nents, mapped_nents;
8350 + struct ahash_edesc *edesc;
8353 + *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
8354 + to_hash = in_len - *next_buflen;
8357 + struct dpaa2_sg_entry *sg_table;
8359 + src_nents = sg_nents_for_len(req->src,
8360 + req->nbytes - *next_buflen);
8361 + if (src_nents < 0) {
8362 + dev_err(ctx->dev, "Invalid number of src SG.\n");
8367 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
8369 + if (!mapped_nents) {
8370 + dev_err(ctx->dev, "unable to DMA map source\n");
8377 + /* allocate space for base edesc and link tables */
8378 + edesc = qi_cache_zalloc(GFP_DMA | flags);
8380 + dma_unmap_sg(ctx->dev, req->src, src_nents,
8385 + edesc->src_nents = src_nents;
8386 + qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
8387 + sg_table = &edesc->sgt[0];
8389 + ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
8393 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
8396 + scatterwalk_map_and_copy(next_buf, req->src,
8397 + to_hash - *buflen,
8400 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
8401 + qm_sg_bytes, DMA_TO_DEVICE);
8402 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
8403 + dev_err(ctx->dev, "unable to map S/G table\n");
8407 + edesc->qm_sg_bytes = qm_sg_bytes;
8409 + state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
8410 + ctx->ctx_len, DMA_FROM_DEVICE);
8411 + if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
8412 + dev_err(ctx->dev, "unable to map ctx\n");
8413 + state->ctx_dma = 0;
8418 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
8419 + dpaa2_fl_set_final(in_fle, true);
8420 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
8421 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
8422 + dpaa2_fl_set_len(in_fle, to_hash);
8423 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
8424 + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
8425 + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
8427 + req_ctx->flc = &ctx->flc[UPDATE_FIRST];
8428 + req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
8429 + req_ctx->cbk = ahash_done_ctx_dst;
8430 + req_ctx->ctx = &req->base;
8431 + req_ctx->edesc = edesc;
8433 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
8434 + if (ret != -EINPROGRESS &&
8435 + !(ret == -EBUSY &&
8436 + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
8439 + state->update = ahash_update_ctx;
8440 + state->finup = ahash_finup_ctx;
8441 + state->final = ahash_final_ctx;
8442 + } else if (*next_buflen) {
8443 + scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
8445 + *buflen = *next_buflen;
8449 + print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
8450 + DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
8451 + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
8452 + DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
8458 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
8459 + qi_cache_free(edesc);
8463 +static int ahash_finup_no_ctx(struct ahash_request *req)
8465 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8466 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8467 + struct caam_hash_state *state = ahash_request_ctx(req);
8468 + struct caam_request *req_ctx = &state->caam_req;
8469 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
8470 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
8471 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
8472 + GFP_KERNEL : GFP_ATOMIC;
8473 + int buflen = *current_buflen(state);
8474 + int qm_sg_bytes, src_nents, mapped_nents;
8475 + int digestsize = crypto_ahash_digestsize(ahash);
8476 + struct ahash_edesc *edesc;
8477 + struct dpaa2_sg_entry *sg_table;
8480 + src_nents = sg_nents_for_len(req->src, req->nbytes);
8481 + if (src_nents < 0) {
8482 + dev_err(ctx->dev, "Invalid number of src SG.\n");
8487 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
8489 + if (!mapped_nents) {
8490 + dev_err(ctx->dev, "unable to DMA map source\n");
8497 + /* allocate space for base edesc and link tables */
8498 + edesc = qi_cache_zalloc(GFP_DMA | flags);
8500 + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
8504 + edesc->src_nents = src_nents;
8505 + qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
8506 + sg_table = &edesc->sgt[0];
8508 + ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
8512 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
8514 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
8516 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
8517 + dev_err(ctx->dev, "unable to map S/G table\n");
8521 + edesc->qm_sg_bytes = qm_sg_bytes;
8523 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
8525 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
8526 + dev_err(ctx->dev, "unable to map dst\n");
8527 + edesc->dst_dma = 0;
8532 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
8533 + dpaa2_fl_set_final(in_fle, true);
8534 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
8535 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
8536 + dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
8537 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
8538 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
8539 + dpaa2_fl_set_len(out_fle, digestsize);
8541 + req_ctx->flc = &ctx->flc[DIGEST];
8542 + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
8543 + req_ctx->cbk = ahash_done;
8544 + req_ctx->ctx = &req->base;
8545 + req_ctx->edesc = edesc;
8546 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
8547 + if (ret != -EINPROGRESS &&
8548 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
8553 + ahash_unmap(ctx->dev, edesc, req, digestsize);
8554 + qi_cache_free(edesc);
8558 +static int ahash_update_first(struct ahash_request *req)
8560 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8561 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8562 + struct caam_hash_state *state = ahash_request_ctx(req);
8563 + struct caam_request *req_ctx = &state->caam_req;
8564 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
8565 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
8566 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
8567 + GFP_KERNEL : GFP_ATOMIC;
8568 + u8 *next_buf = alt_buf(state);
8569 + int *next_buflen = alt_buflen(state);
8571 + int src_nents, mapped_nents;
8572 + struct ahash_edesc *edesc;
8575 + *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
8577 + to_hash = req->nbytes - *next_buflen;
8580 + struct dpaa2_sg_entry *sg_table;
8582 + src_nents = sg_nents_for_len(req->src,
8583 + req->nbytes - (*next_buflen));
8584 + if (src_nents < 0) {
8585 + dev_err(ctx->dev, "Invalid number of src SG.\n");
8590 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
8592 + if (!mapped_nents) {
8593 + dev_err(ctx->dev, "unable to map source for DMA\n");
8600 + /* allocate space for base edesc and link tables */
8601 + edesc = qi_cache_zalloc(GFP_DMA | flags);
8603 + dma_unmap_sg(ctx->dev, req->src, src_nents,
8608 + edesc->src_nents = src_nents;
8609 + sg_table = &edesc->sgt[0];
8611 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
8612 + dpaa2_fl_set_final(in_fle, true);
8613 + dpaa2_fl_set_len(in_fle, to_hash);
8615 + if (mapped_nents > 1) {
8618 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
8619 + qm_sg_bytes = mapped_nents * sizeof(*sg_table);
8620 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
8623 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
8624 + dev_err(ctx->dev, "unable to map S/G table\n");
8628 + edesc->qm_sg_bytes = qm_sg_bytes;
8629 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
8630 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
8632 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
8633 + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
8637 + scatterwalk_map_and_copy(next_buf, req->src, to_hash,
8640 + state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
8641 + ctx->ctx_len, DMA_FROM_DEVICE);
8642 + if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
8643 + dev_err(ctx->dev, "unable to map ctx\n");
8644 + state->ctx_dma = 0;
8649 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
8650 + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
8651 + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
8653 + req_ctx->flc = &ctx->flc[UPDATE_FIRST];
8654 + req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
8655 + req_ctx->cbk = ahash_done_ctx_dst;
8656 + req_ctx->ctx = &req->base;
8657 + req_ctx->edesc = edesc;
8659 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
8660 + if (ret != -EINPROGRESS &&
8661 + !(ret == -EBUSY && req->base.flags &
8662 + CRYPTO_TFM_REQ_MAY_BACKLOG))
8665 + state->update = ahash_update_ctx;
8666 + state->finup = ahash_finup_ctx;
8667 + state->final = ahash_final_ctx;
8668 + } else if (*next_buflen) {
8669 + state->update = ahash_update_no_ctx;
8670 + state->finup = ahash_finup_no_ctx;
8671 + state->final = ahash_final_no_ctx;
8672 + scatterwalk_map_and_copy(next_buf, req->src, 0,
8674 + switch_buf(state);
8677 + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
8678 + DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, 1);
8683 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
8684 + qi_cache_free(edesc);
8688 +static int ahash_finup_first(struct ahash_request *req)
8690 + return ahash_digest(req);
8693 +static int ahash_init(struct ahash_request *req)
8695 + struct caam_hash_state *state = ahash_request_ctx(req);
8697 + state->update = ahash_update_first;
8698 + state->finup = ahash_finup_first;
8699 + state->final = ahash_final_no_ctx;
8701 + state->ctx_dma = 0;
8702 + state->current_buf = 0;
8703 + state->buf_dma = 0;
8704 + state->buflen_0 = 0;
8705 + state->buflen_1 = 0;
8710 +static int ahash_update(struct ahash_request *req)
8712 + struct caam_hash_state *state = ahash_request_ctx(req);
8714 + return state->update(req);
8717 +static int ahash_finup(struct ahash_request *req)
8719 + struct caam_hash_state *state = ahash_request_ctx(req);
8721 + return state->finup(req);
8724 +static int ahash_final(struct ahash_request *req)
8726 + struct caam_hash_state *state = ahash_request_ctx(req);
8728 + return state->final(req);
8731 +static int ahash_export(struct ahash_request *req, void *out)
8733 + struct caam_hash_state *state = ahash_request_ctx(req);
8734 + struct caam_export_state *export = out;
8738 + if (state->current_buf) {
8739 + buf = state->buf_1;
8740 + len = state->buflen_1;
8742 + buf = state->buf_0;
8743 + len = state->buflen_0;
8746 + memcpy(export->buf, buf, len);
8747 + memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
8748 + export->buflen = len;
8749 + export->update = state->update;
8750 + export->final = state->final;
8751 + export->finup = state->finup;
8756 +static int ahash_import(struct ahash_request *req, const void *in)
8758 + struct caam_hash_state *state = ahash_request_ctx(req);
8759 + const struct caam_export_state *export = in;
8761 + memset(state, 0, sizeof(*state));
8762 + memcpy(state->buf_0, export->buf, export->buflen);
8763 + memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
8764 + state->buflen_0 = export->buflen;
8765 + state->update = export->update;
8766 + state->final = export->final;
8767 + state->finup = export->finup;
8772 +struct caam_hash_template {
8773 + char name[CRYPTO_MAX_ALG_NAME];
8774 + char driver_name[CRYPTO_MAX_ALG_NAME];
8775 + char hmac_name[CRYPTO_MAX_ALG_NAME];
8776 + char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
8777 + unsigned int blocksize;
8778 + struct ahash_alg template_ahash;
8782 +/* ahash descriptors */
8783 +static struct caam_hash_template driver_hash[] = {
8786 + .driver_name = "sha1-caam-qi2",
8787 + .hmac_name = "hmac(sha1)",
8788 + .hmac_driver_name = "hmac-sha1-caam-qi2",
8789 + .blocksize = SHA1_BLOCK_SIZE,
8790 + .template_ahash = {
8791 + .init = ahash_init,
8792 + .update = ahash_update,
8793 + .final = ahash_final,
8794 + .finup = ahash_finup,
8795 + .digest = ahash_digest,
8796 + .export = ahash_export,
8797 + .import = ahash_import,
8798 + .setkey = ahash_setkey,
8800 + .digestsize = SHA1_DIGEST_SIZE,
8801 + .statesize = sizeof(struct caam_export_state),
8804 + .alg_type = OP_ALG_ALGSEL_SHA1,
8807 + .driver_name = "sha224-caam-qi2",
8808 + .hmac_name = "hmac(sha224)",
8809 + .hmac_driver_name = "hmac-sha224-caam-qi2",
8810 + .blocksize = SHA224_BLOCK_SIZE,
8811 + .template_ahash = {
8812 + .init = ahash_init,
8813 + .update = ahash_update,
8814 + .final = ahash_final,
8815 + .finup = ahash_finup,
8816 + .digest = ahash_digest,
8817 + .export = ahash_export,
8818 + .import = ahash_import,
8819 + .setkey = ahash_setkey,
8821 + .digestsize = SHA224_DIGEST_SIZE,
8822 + .statesize = sizeof(struct caam_export_state),
8825 + .alg_type = OP_ALG_ALGSEL_SHA224,
8828 + .driver_name = "sha256-caam-qi2",
8829 + .hmac_name = "hmac(sha256)",
8830 + .hmac_driver_name = "hmac-sha256-caam-qi2",
8831 + .blocksize = SHA256_BLOCK_SIZE,
8832 + .template_ahash = {
8833 + .init = ahash_init,
8834 + .update = ahash_update,
8835 + .final = ahash_final,
8836 + .finup = ahash_finup,
8837 + .digest = ahash_digest,
8838 + .export = ahash_export,
8839 + .import = ahash_import,
8840 + .setkey = ahash_setkey,
8842 + .digestsize = SHA256_DIGEST_SIZE,
8843 + .statesize = sizeof(struct caam_export_state),
8846 + .alg_type = OP_ALG_ALGSEL_SHA256,
8849 + .driver_name = "sha384-caam-qi2",
8850 + .hmac_name = "hmac(sha384)",
8851 + .hmac_driver_name = "hmac-sha384-caam-qi2",
8852 + .blocksize = SHA384_BLOCK_SIZE,
8853 + .template_ahash = {
8854 + .init = ahash_init,
8855 + .update = ahash_update,
8856 + .final = ahash_final,
8857 + .finup = ahash_finup,
8858 + .digest = ahash_digest,
8859 + .export = ahash_export,
8860 + .import = ahash_import,
8861 + .setkey = ahash_setkey,
8863 + .digestsize = SHA384_DIGEST_SIZE,
8864 + .statesize = sizeof(struct caam_export_state),
8867 + .alg_type = OP_ALG_ALGSEL_SHA384,
8870 + .driver_name = "sha512-caam-qi2",
8871 + .hmac_name = "hmac(sha512)",
8872 + .hmac_driver_name = "hmac-sha512-caam-qi2",
8873 + .blocksize = SHA512_BLOCK_SIZE,
8874 + .template_ahash = {
8875 + .init = ahash_init,
8876 + .update = ahash_update,
8877 + .final = ahash_final,
8878 + .finup = ahash_finup,
8879 + .digest = ahash_digest,
8880 + .export = ahash_export,
8881 + .import = ahash_import,
8882 + .setkey = ahash_setkey,
8884 + .digestsize = SHA512_DIGEST_SIZE,
8885 + .statesize = sizeof(struct caam_export_state),
8888 + .alg_type = OP_ALG_ALGSEL_SHA512,
8891 + .driver_name = "md5-caam-qi2",
8892 + .hmac_name = "hmac(md5)",
8893 + .hmac_driver_name = "hmac-md5-caam-qi2",
8894 + .blocksize = MD5_BLOCK_WORDS * 4,
8895 + .template_ahash = {
8896 + .init = ahash_init,
8897 + .update = ahash_update,
8898 + .final = ahash_final,
8899 + .finup = ahash_finup,
8900 + .digest = ahash_digest,
8901 + .export = ahash_export,
8902 + .import = ahash_import,
8903 + .setkey = ahash_setkey,
8905 + .digestsize = MD5_DIGEST_SIZE,
8906 + .statesize = sizeof(struct caam_export_state),
8909 + .alg_type = OP_ALG_ALGSEL_MD5,
8913 +struct caam_hash_alg {
8914 + struct list_head entry;
8915 + struct device *dev;
8917 + struct ahash_alg ahash_alg;
8920 +static int caam_hash_cra_init(struct crypto_tfm *tfm)
8922 + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
8923 + struct crypto_alg *base = tfm->__crt_alg;
8924 + struct hash_alg_common *halg =
8925 + container_of(base, struct hash_alg_common, base);
8926 + struct ahash_alg *alg =
8927 + container_of(halg, struct ahash_alg, halg);
8928 + struct caam_hash_alg *caam_hash =
8929 + container_of(alg, struct caam_hash_alg, ahash_alg);
8930 + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
8931 + /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
8932 + static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
8933 + HASH_MSG_LEN + SHA1_DIGEST_SIZE,
8934 + HASH_MSG_LEN + 32,
8935 + HASH_MSG_LEN + SHA256_DIGEST_SIZE,
8936 + HASH_MSG_LEN + 64,
8937 + HASH_MSG_LEN + SHA512_DIGEST_SIZE };
8938 + dma_addr_t dma_addr;
8941 + ctx->dev = caam_hash->dev;
8943 + dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
8944 + DMA_BIDIRECTIONAL,
8945 + DMA_ATTR_SKIP_CPU_SYNC);
8946 + if (dma_mapping_error(ctx->dev, dma_addr)) {
8947 + dev_err(ctx->dev, "unable to map shared descriptors\n");
8951 + for (i = 0; i < HASH_NUM_OP; i++)
8952 + ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
8954 + /* copy descriptor header template value */
8955 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
8957 + ctx->ctx_len = runninglen[(ctx->adata.algtype &
8958 + OP_ALG_ALGSEL_SUBMASK) >>
8959 + OP_ALG_ALGSEL_SHIFT];
8961 + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
8962 + sizeof(struct caam_hash_state));
8964 + return ahash_set_sh_desc(ahash);
8967 +static void caam_hash_cra_exit(struct crypto_tfm *tfm)
8969 + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
8971 + dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
8972 + DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
8975 +static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
8976 + struct caam_hash_template *template, bool keyed)
8978 + struct caam_hash_alg *t_alg;
8979 + struct ahash_alg *halg;
8980 + struct crypto_alg *alg;
8982 + t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
8984 + return ERR_PTR(-ENOMEM);
8986 + t_alg->ahash_alg = template->template_ahash;
8987 + halg = &t_alg->ahash_alg;
8988 + alg = &halg->halg.base;
8991 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
8992 + template->hmac_name);
8993 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
8994 + template->hmac_driver_name);
8996 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
8998 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
8999 + template->driver_name);
9000 + t_alg->ahash_alg.setkey = NULL;
9002 + alg->cra_module = THIS_MODULE;
9003 + alg->cra_init = caam_hash_cra_init;
9004 + alg->cra_exit = caam_hash_cra_exit;
9005 + alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
9006 + alg->cra_priority = CAAM_CRA_PRIORITY;
9007 + alg->cra_blocksize = template->blocksize;
9008 + alg->cra_alignmask = 0;
9009 + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
9010 + alg->cra_type = &crypto_ahash_type;
9012 + t_alg->alg_type = template->alg_type;
9018 +static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
9020 + struct dpaa2_caam_priv_per_cpu *ppriv;
9022 + ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
9023 + napi_schedule_irqoff(&ppriv->napi);
9026 +static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
9028 + struct device *dev = priv->dev;
9029 + struct dpaa2_io_notification_ctx *nctx;
9030 + struct dpaa2_caam_priv_per_cpu *ppriv;
9031 + int err, i = 0, cpu;
9033 + for_each_online_cpu(cpu) {
9034 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
9035 + ppriv->priv = priv;
9036 + nctx = &ppriv->nctx;
9037 + nctx->is_cdan = 0;
9038 + nctx->id = ppriv->rsp_fqid;
9039 + nctx->desired_cpu = cpu;
9040 + nctx->cb = dpaa2_caam_fqdan_cb;
9042 + /* Register notification callbacks */
9043 + err = dpaa2_io_service_register(NULL, nctx);
9044 + if (unlikely(err)) {
9045 + dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
9048 + * If no affine DPIO for this core, there's probably
9049 + * none available for next cores either. Signal we want
9050 + * to retry later, in case the DPIO devices weren't
9053 + err = -EPROBE_DEFER;
9057 + ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
9059 + if (unlikely(!ppriv->store)) {
9060 + dev_err(dev, "dpaa2_io_store_create() failed\n");
9064 + if (++i == priv->num_pairs)
9071 + for_each_online_cpu(cpu) {
9072 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
9073 + if (!ppriv->nctx.cb)
9075 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
9078 + for_each_online_cpu(cpu) {
9079 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
9080 + if (!ppriv->store)
9082 + dpaa2_io_store_destroy(ppriv->store);
9088 +static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
9090 + struct dpaa2_caam_priv_per_cpu *ppriv;
9093 + for_each_online_cpu(cpu) {
9094 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
9095 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
9096 + dpaa2_io_store_destroy(ppriv->store);
9098 + if (++i == priv->num_pairs)
9103 +static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
9105 + struct dpseci_rx_queue_cfg rx_queue_cfg;
9106 + struct device *dev = priv->dev;
9107 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
9108 + struct dpaa2_caam_priv_per_cpu *ppriv;
9109 + int err = 0, i = 0, cpu;
9111 + /* Configure Rx queues */
9112 + for_each_online_cpu(cpu) {
9113 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
9115 + rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
9116 + DPSECI_QUEUE_OPT_USER_CTX;
9117 + rx_queue_cfg.order_preservation_en = 0;
9118 + rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
9119 + rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
9121 + * Rx priority (WQ) doesn't really matter, since we use
9122 + * pull mode, i.e. volatile dequeues from specific FQs
9124 + rx_queue_cfg.dest_cfg.priority = 0;
9125 + rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
9127 + err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
9130 + dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
9135 + if (++i == priv->num_pairs)
9142 +static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
9144 + struct device *dev = priv->dev;
9146 + if (!priv->cscn_mem)
9149 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
9150 + kfree(priv->cscn_mem);
9153 +static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
9155 + struct device *dev = priv->dev;
9156 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
9158 + dpaa2_dpseci_congestion_free(priv);
9159 + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
9162 +static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
9163 + const struct dpaa2_fd *fd)
9165 + struct caam_request *req;
9168 + if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
9169 + dev_err(priv->dev, "Only Frame List FD format is supported!\n");
9173 + fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
9174 + if (unlikely(fd_err))
9175 + dev_err(priv->dev, "FD error: %08x\n", fd_err);
9178 + * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
9179 + * in FD[ERR] or FD[FRC].
9181 + req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
9182 + dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
9183 + DMA_BIDIRECTIONAL);
9184 + req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
9187 +static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
9191 + /* Retry while portal is busy */
9193 + err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
9195 + } while (err == -EBUSY);
9197 + if (unlikely(err))
9198 + dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
9203 +static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
9205 + struct dpaa2_dq *dq;
9206 + int cleaned = 0, is_last;
9209 + dq = dpaa2_io_store_next(ppriv->store, &is_last);
9210 + if (unlikely(!dq)) {
9211 + if (unlikely(!is_last)) {
9212 + dev_dbg(ppriv->priv->dev,
9213 + "FQ %d returned no valid frames\n",
9216 + * MUST retry until we get some sort of
9217 + * valid response token (be it "empty dequeue"
9218 + * or a valid frame).
9226 + dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
9228 + } while (!is_last);
9233 +static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
9235 + struct dpaa2_caam_priv_per_cpu *ppriv;
9236 + struct dpaa2_caam_priv *priv;
9237 + int err, cleaned = 0, store_cleaned;
9239 + ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
9240 + priv = ppriv->priv;
9242 + if (unlikely(dpaa2_caam_pull_fq(ppriv)))
9246 + store_cleaned = dpaa2_caam_store_consume(ppriv);
9247 + cleaned += store_cleaned;
9249 + if (store_cleaned == 0 ||
9250 + cleaned > budget - DPAA2_CAAM_STORE_SIZE)
9253 + /* Try to dequeue some more */
9254 + err = dpaa2_caam_pull_fq(ppriv);
9255 + if (unlikely(err))
9259 + if (cleaned < budget) {
9260 + napi_complete_done(napi, cleaned);
9261 + err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
9262 + if (unlikely(err))
9263 + dev_err(priv->dev, "Notification rearm failed: %d\n",
9270 +static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
9273 + struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
9274 + struct device *dev = priv->dev;
9278 + * Congestion group feature supported starting with DPSECI API v5.1
9279 + * and only when object has been created with this capability.
9281 + if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
9282 + !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
9285 + priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
9286 + GFP_KERNEL | GFP_DMA);
9287 + if (!priv->cscn_mem)
9290 + priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
9291 + priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
9292 + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
9293 + if (dma_mapping_error(dev, priv->cscn_dma)) {
9294 + dev_err(dev, "Error mapping CSCN memory area\n");
9299 + cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
9300 + cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
9301 + cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
9302 + cong_notif_cfg.message_ctx = (u64)priv;
9303 + cong_notif_cfg.message_iova = priv->cscn_dma;
9304 + cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
9305 + DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
9306 + DPSECI_CGN_MODE_COHERENT_WRITE;
9308 + err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
9311 + dev_err(dev, "dpseci_set_congestion_notification failed\n");
9312 + goto err_set_cong;
9318 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
9320 + kfree(priv->cscn_mem);
9325 +static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
9327 + struct device *dev = &ls_dev->dev;
9328 + struct dpaa2_caam_priv *priv;
9329 + struct dpaa2_caam_priv_per_cpu *ppriv;
9333 + priv = dev_get_drvdata(dev);
9336 + priv->dpsec_id = ls_dev->obj_desc.id;
9338 + /* Get a handle for the DPSECI this interface is associate with */
9339 + err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
9341 + dev_err(dev, "dpsec_open() failed: %d\n", err);
9345 + dev_info(dev, "Opened dpseci object successfully\n");
9347 + err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
9348 + &priv->minor_ver);
9350 + dev_err(dev, "dpseci_get_api_version() failed\n");
9351 + goto err_get_vers;
9354 + err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
9355 + &priv->dpseci_attr);
9357 + dev_err(dev, "dpseci_get_attributes() failed\n");
9358 + goto err_get_vers;
9361 + err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
9364 + dev_err(dev, "dpseci_get_sec_attr() failed\n");
9365 + goto err_get_vers;
9368 + err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
9370 + dev_err(dev, "setup_congestion() failed\n");
9371 + goto err_get_vers;
9374 + priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
9375 + priv->dpseci_attr.num_tx_queues);
9376 + if (priv->num_pairs > num_online_cpus()) {
9377 + dev_warn(dev, "%d queues won't be used\n",
9378 + priv->num_pairs - num_online_cpus());
9379 + priv->num_pairs = num_online_cpus();
9382 + for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
9383 + err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
9384 + &priv->rx_queue_attr[i]);
9386 + dev_err(dev, "dpseci_get_rx_queue() failed\n");
9387 + goto err_get_rx_queue;
9391 + for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
9392 + err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
9393 + &priv->tx_queue_attr[i]);
9395 + dev_err(dev, "dpseci_get_tx_queue() failed\n");
9396 + goto err_get_rx_queue;
9401 + for_each_online_cpu(cpu) {
9402 + dev_info(dev, "pair %d: rx queue %d, tx queue %d\n", i,
9403 + priv->rx_queue_attr[i].fqid,
9404 + priv->tx_queue_attr[i].fqid);
9406 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
9407 + ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
9408 + ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
9411 + ppriv->net_dev.dev = *dev;
9412 + INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
9413 + netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
9414 + DPAA2_CAAM_NAPI_WEIGHT);
9415 + if (++i == priv->num_pairs)
9422 + dpaa2_dpseci_congestion_free(priv);
9424 + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
9429 +static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
9431 + struct device *dev = priv->dev;
9432 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
9433 + struct dpaa2_caam_priv_per_cpu *ppriv;
9436 + for (i = 0; i < priv->num_pairs; i++) {
9437 + ppriv = per_cpu_ptr(priv->ppriv, i);
9438 + napi_enable(&ppriv->napi);
9441 + err = dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
9443 + dev_err(dev, "dpseci_enable() failed\n");
9447 + dev_info(dev, "DPSECI version %d.%d\n",
9454 +static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
9456 + struct device *dev = priv->dev;
9457 + struct dpaa2_caam_priv_per_cpu *ppriv;
9458 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
9459 + int i, err = 0, enabled;
9461 + err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
9463 + dev_err(dev, "dpseci_disable() failed\n");
9467 + err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
9469 + dev_err(dev, "dpseci_is_enabled() failed\n");
9473 + dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
9475 + for (i = 0; i < priv->num_pairs; i++) {
9476 + ppriv = per_cpu_ptr(priv->ppriv, i);
9477 + napi_disable(&ppriv->napi);
9478 + netif_napi_del(&ppriv->napi);
9484 +static struct list_head hash_list;
9486 +static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
9488 + struct device *dev;
9489 + struct dpaa2_caam_priv *priv;
9491 + bool registered = false;
9494 + * There is no way to get CAAM endianness - there is no direct register
9495 + * space access and MC f/w does not provide this attribute.
9496 + * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
9499 + caam_little_end = true;
9503 + dev = &dpseci_dev->dev;
9505 + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
9509 + dev_set_drvdata(dev, priv);
9511 + priv->domain = iommu_get_domain_for_dev(dev);
9513 + qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
9514 + 0, SLAB_CACHE_DMA, NULL);
9516 + dev_err(dev, "Can't allocate SEC cache\n");
9521 + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
9523 + dev_err(dev, "dma_set_mask_and_coherent() failed\n");
9524 + goto err_dma_mask;
9527 + /* Obtain a MC portal */
9528 + err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
9530 + if (err == -ENXIO)
9531 + err = -EPROBE_DEFER;
9533 + dev_err(dev, "MC portal allocation failed\n");
9535 + goto err_dma_mask;
9538 + priv->ppriv = alloc_percpu(*priv->ppriv);
9539 + if (!priv->ppriv) {
9540 + dev_err(dev, "alloc_percpu() failed\n");
9541 + goto err_alloc_ppriv;
9544 + /* DPSECI initialization */
9545 + err = dpaa2_dpseci_setup(dpseci_dev);
9547 + dev_err(dev, "dpaa2_dpseci_setup() failed\n");
9548 + goto err_dpseci_setup;
9552 + err = dpaa2_dpseci_dpio_setup(priv);
9554 + dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
9555 + goto err_dpio_setup;
9558 + /* DPSECI binding to DPIO */
9559 + err = dpaa2_dpseci_bind(priv);
9561 + dev_err(dev, "dpaa2_dpseci_bind() failed\n");
9565 + /* DPSECI enable */
9566 + err = dpaa2_dpseci_enable(priv);
9568 + dev_err(dev, "dpaa2_dpseci_enable() failed");
9572 + /* register crypto algorithms the device supports */
9573 + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
9574 + struct caam_skcipher_alg *t_alg = driver_algs + i;
9575 + u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
9577 + /* Skip DES algorithms if not supported by device */
9578 + if (!priv->sec_attr.des_acc_num &&
9579 + ((alg_sel == OP_ALG_ALGSEL_3DES) ||
9580 + (alg_sel == OP_ALG_ALGSEL_DES)))
9583 + /* Skip AES algorithms if not supported by device */
9584 + if (!priv->sec_attr.aes_acc_num &&
9585 + (alg_sel == OP_ALG_ALGSEL_AES))
9588 + t_alg->caam.dev = dev;
9589 + caam_skcipher_alg_init(t_alg);
9591 + err = crypto_register_skcipher(&t_alg->skcipher);
9593 + dev_warn(dev, "%s alg registration failed: %d\n",
9594 + t_alg->skcipher.base.cra_driver_name, err);
9598 + t_alg->registered = true;
9599 + registered = true;
9602 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
9603 + struct caam_aead_alg *t_alg = driver_aeads + i;
9604 + u32 c1_alg_sel = t_alg->caam.class1_alg_type &
9605 + OP_ALG_ALGSEL_MASK;
9606 + u32 c2_alg_sel = t_alg->caam.class2_alg_type &
9607 + OP_ALG_ALGSEL_MASK;
9609 + /* Skip DES algorithms if not supported by device */
9610 + if (!priv->sec_attr.des_acc_num &&
9611 + ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
9612 + (c1_alg_sel == OP_ALG_ALGSEL_DES)))
9615 + /* Skip AES algorithms if not supported by device */
9616 + if (!priv->sec_attr.aes_acc_num &&
9617 + (c1_alg_sel == OP_ALG_ALGSEL_AES))
9621 + * Skip algorithms requiring message digests
9622 + * if MD not supported by device.
9624 + if (!priv->sec_attr.md_acc_num && c2_alg_sel)
9627 + t_alg->caam.dev = dev;
9628 + caam_aead_alg_init(t_alg);
9630 + err = crypto_register_aead(&t_alg->aead);
9632 + dev_warn(dev, "%s alg registration failed: %d\n",
9633 + t_alg->aead.base.cra_driver_name, err);
9637 + t_alg->registered = true;
9638 + registered = true;
9641 + dev_info(dev, "algorithms registered in /proc/crypto\n");
9643 + /* register hash algorithms the device supports */
9644 + INIT_LIST_HEAD(&hash_list);
9647 + * Skip registration of any hashing algorithms if MD block
9650 + if (!priv->sec_attr.md_acc_num)
9653 + for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
9654 + struct caam_hash_alg *t_alg;
9655 + struct caam_hash_template *alg = driver_hash + i;
9657 + /* register hmac version */
9658 + t_alg = caam_hash_alloc(dev, alg, true);
9659 + if (IS_ERR(t_alg)) {
9660 + err = PTR_ERR(t_alg);
9661 + dev_warn(dev, "%s hash alg allocation failed: %d\n",
9662 + alg->driver_name, err);
9666 + err = crypto_register_ahash(&t_alg->ahash_alg);
9668 + dev_warn(dev, "%s alg registration failed: %d\n",
9669 + t_alg->ahash_alg.halg.base.cra_driver_name,
9673 + list_add_tail(&t_alg->entry, &hash_list);
9676 + /* register unkeyed version */
9677 + t_alg = caam_hash_alloc(dev, alg, false);
9678 + if (IS_ERR(t_alg)) {
9679 + err = PTR_ERR(t_alg);
9680 + dev_warn(dev, "%s alg allocation failed: %d\n",
9681 + alg->driver_name, err);
9685 + err = crypto_register_ahash(&t_alg->ahash_alg);
9687 + dev_warn(dev, "%s alg registration failed: %d\n",
9688 + t_alg->ahash_alg.halg.base.cra_driver_name,
9692 + list_add_tail(&t_alg->entry, &hash_list);
9695 + if (!list_empty(&hash_list))
9696 + dev_info(dev, "hash algorithms registered in /proc/crypto\n");
9701 + dpaa2_dpseci_dpio_free(priv);
9703 + dpaa2_dpseci_free(priv);
9705 + free_percpu(priv->ppriv);
9707 + fsl_mc_portal_free(priv->mc_io);
9709 + kmem_cache_destroy(qi_cache);
9711 + dev_set_drvdata(dev, NULL);
9716 +static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
9718 + struct device *dev;
9719 + struct dpaa2_caam_priv *priv;
9722 + dev = &ls_dev->dev;
9723 + priv = dev_get_drvdata(dev);
9725 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
9726 + struct caam_aead_alg *t_alg = driver_aeads + i;
9728 + if (t_alg->registered)
9729 + crypto_unregister_aead(&t_alg->aead);
9732 + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
9733 + struct caam_skcipher_alg *t_alg = driver_algs + i;
9735 + if (t_alg->registered)
9736 + crypto_unregister_skcipher(&t_alg->skcipher);
9739 + if (hash_list.next) {
9740 + struct caam_hash_alg *t_hash_alg, *p;
9742 + list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
9743 + crypto_unregister_ahash(&t_hash_alg->ahash_alg);
9744 + list_del(&t_hash_alg->entry);
9745 + kfree(t_hash_alg);
9749 + dpaa2_dpseci_disable(priv);
9750 + dpaa2_dpseci_dpio_free(priv);
9751 + dpaa2_dpseci_free(priv);
9752 + free_percpu(priv->ppriv);
9753 + fsl_mc_portal_free(priv->mc_io);
9754 + dev_set_drvdata(dev, NULL);
9755 + kmem_cache_destroy(qi_cache);
9760 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
9762 + struct dpaa2_fd fd;
9763 + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
9764 + int err = 0, i, id;
9767 + return PTR_ERR(req);
9769 + if (priv->cscn_mem) {
9770 + dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
9773 + if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
9774 + dev_dbg_ratelimited(dev, "Dropping request\n");
9779 + dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
9781 + req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
9782 + DMA_BIDIRECTIONAL);
9783 + if (dma_mapping_error(dev, req->fd_flt_dma)) {
9784 + dev_err(dev, "DMA mapping error for QI enqueue request\n");
9788 + memset(&fd, 0, sizeof(fd));
9789 + dpaa2_fd_set_format(&fd, dpaa2_fd_list);
9790 + dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
9791 + dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
9792 + dpaa2_fd_set_flc(&fd, req->flc_dma);
9795 + * There is no guarantee that preemption is disabled here,
9796 + * thus take action.
9798 + preempt_disable();
9799 + id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
9800 + for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
9801 + err = dpaa2_io_service_enqueue_fq(NULL,
9802 + priv->tx_queue_attr[id].fqid,
9804 + if (err != -EBUSY)
9809 + if (unlikely(err < 0)) {
9810 + dev_err(dev, "Error enqueuing frame: %d\n", err);
9814 + return -EINPROGRESS;
9817 + dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
9818 + DMA_BIDIRECTIONAL);
9821 +EXPORT_SYMBOL(dpaa2_caam_enqueue);
9823 +const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
9825 + .vendor = FSL_MC_VENDOR_FREESCALE,
9826 + .obj_type = "dpseci",
9831 +static struct fsl_mc_driver dpaa2_caam_driver = {
9833 + .name = KBUILD_MODNAME,
9834 + .owner = THIS_MODULE,
9836 + .probe = dpaa2_caam_probe,
9837 + .remove = dpaa2_caam_remove,
9838 + .match_id_table = dpaa2_caam_match_id_table
9841 +MODULE_LICENSE("Dual BSD/GPL");
9842 +MODULE_AUTHOR("Freescale Semiconductor, Inc");
9843 +MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
9845 +module_fsl_mc_driver(dpaa2_caam_driver);
9847 +++ b/drivers/crypto/caam/caamalg_qi2.h
9850 + * Copyright 2015-2016 Freescale Semiconductor Inc.
9851 + * Copyright 2017 NXP
9853 + * Redistribution and use in source and binary forms, with or without
9854 + * modification, are permitted provided that the following conditions are met:
9855 + * * Redistributions of source code must retain the above copyright
9856 + * notice, this list of conditions and the following disclaimer.
9857 + * * Redistributions in binary form must reproduce the above copyright
9858 + * notice, this list of conditions and the following disclaimer in the
9859 + * documentation and/or other materials provided with the distribution.
9860 + * * Neither the names of the above-listed copyright holders nor the
9861 + * names of any contributors may be used to endorse or promote products
9862 + * derived from this software without specific prior written permission.
9865 + * ALTERNATIVELY, this software may be distributed under the terms of the
9866 + * GNU General Public License ("GPL") as published by the Free Software
9867 + * Foundation, either version 2 of that License or (at your option) any
9870 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
9871 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
9872 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
9873 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
9874 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
9875 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
9876 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
9877 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
9878 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
9879 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
9880 + * POSSIBILITY OF SUCH DAMAGE.
9883 +#ifndef _CAAMALG_QI2_H_
9884 +#define _CAAMALG_QI2_H_
9886 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
9887 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
9888 +#include <linux/threads.h>
9889 +#include "dpseci.h"
9890 +#include "desc_constr.h"
9892 +#define DPAA2_CAAM_STORE_SIZE 16
9893 +/* NAPI weight *must* be a multiple of the store size. */
9894 +#define DPAA2_CAAM_NAPI_WEIGHT 64
9896 +/* The congestion entrance threshold was chosen so that on LS2088
9897 + * we support the maximum throughput for the available memory
9899 +#define DPAA2_SEC_CONG_ENTRY_THRESH (128 * 1024 * 1024)
9900 +#define DPAA2_SEC_CONG_EXIT_THRESH (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10)
9903 + * dpaa2_caam_priv - driver private data
9904 + * @dpseci_id: DPSECI object unique ID
9905 + * @major_ver: DPSECI major version
9906 + * @minor_ver: DPSECI minor version
9907 + * @dpseci_attr: DPSECI attributes
9908 + * @sec_attr: SEC engine attributes
9909 + * @rx_queue_attr: array of Rx queue attributes
9910 + * @tx_queue_attr: array of Tx queue attributes
9911 + * @cscn_mem: pointer to memory region containing the
9912 + * dpaa2_cscn struct; it's size is larger than
9913 + * sizeof(struct dpaa2_cscn) to accommodate alignment
9914 + * @cscn_mem_aligned: pointer to struct dpaa2_cscn; it is computed
9915 + * as PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
9916 + * @cscn_dma: dma address used by the QMAN to write CSCN messages
9917 + * @dev: device associated with the DPSECI object
9918 + * @mc_io: pointer to MC portal's I/O object
9919 + * @domain: IOMMU domain
9920 + * @ppriv: per CPU pointers to privata data
9922 +struct dpaa2_caam_priv {
9928 + struct dpseci_attr dpseci_attr;
9929 + struct dpseci_sec_attr sec_attr;
9930 + struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_MAX_QUEUE_NUM];
9931 + struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_MAX_QUEUE_NUM];
9936 + void *cscn_mem_aligned;
9937 + dma_addr_t cscn_dma;
9939 + struct device *dev;
9940 + struct fsl_mc_io *mc_io;
9941 + struct iommu_domain *domain;
9943 + struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
9947 + * dpaa2_caam_priv_per_cpu - per CPU private data
9948 + * @napi: napi structure
9949 + * @net_dev: netdev used by napi
9950 + * @req_fqid: (virtual) request (Tx / enqueue) FQID
9951 + * @rsp_fqid: (virtual) response (Rx / dequeue) FQID
9952 + * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr
9953 + * @nctx: notification context of response FQ
9954 + * @store: where dequeued frames are stored
9955 + * @priv: backpointer to dpaa2_caam_priv
9957 +struct dpaa2_caam_priv_per_cpu {
9958 + struct napi_struct napi;
9959 + struct net_device net_dev;
9963 + struct dpaa2_io_notification_ctx nctx;
9964 + struct dpaa2_io_store *store;
9965 + struct dpaa2_caam_priv *priv;
9969 + * The CAAM QI hardware constructs a job descriptor which points
9970 + * to shared descriptor (as pointed by context_a of FQ to CAAM).
9971 + * When the job descriptor is executed by deco, the whole job
9972 + * descriptor together with shared descriptor gets loaded in
9973 + * deco buffer which is 64 words long (each 32-bit).
9975 + * The job descriptor constructed by QI hardware has layout:
9978 + * Shdesc ptr (1 or 2 words)
9979 + * SEQ_OUT_PTR (1 word)
9980 + * Out ptr (1 or 2 words)
9981 + * Out length (1 word)
9982 + * SEQ_IN_PTR (1 word)
9983 + * In ptr (1 or 2 words)
9984 + * In length (1 word)
9986 + * The shdesc ptr is used to fetch shared descriptor contents
9987 + * into deco buffer.
9989 + * Apart from shdesc contents, the total number of words that
9990 + * get loaded in deco buffer are '8' or '11'. The remaining words
9991 + * in deco buffer can be used for storing shared descriptor.
9993 +#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
9995 +/* Length of a single buffer in the QI driver memory cache */
9996 +#define CAAM_QI_MEMCACHE_SIZE 512
9999 + * aead_edesc - s/w-extended aead descriptor
10000 + * @src_nents: number of segments in input scatterlist
10001 + * @dst_nents: number of segments in output scatterlist
10002 + * @iv_dma: dma address of iv for checking continuity and link table
10003 + * @qm_sg_bytes: length of dma mapped h/w link table
10004 + * @qm_sg_dma: bus physical mapped address of h/w link table
10005 + * @assoclen: associated data length, in CAAM endianness
10006 + * @assoclen_dma: bus physical mapped address of req->assoclen
10007 + * @sgt: the h/w link table, followed by IV
10009 +struct aead_edesc {
10012 + dma_addr_t iv_dma;
10014 + dma_addr_t qm_sg_dma;
10015 + unsigned int assoclen;
10016 + dma_addr_t assoclen_dma;
10017 + struct dpaa2_sg_entry sgt[0];
10021 + * tls_edesc - s/w-extended tls descriptor
10022 + * @src_nents: number of segments in input scatterlist
10023 + * @dst_nents: number of segments in output scatterlist
10024 + * @iv_dma: dma address of iv for checking continuity and link table
10025 + * @qm_sg_bytes: length of dma mapped h/w link table
10026 + * @qm_sg_dma: bus physical mapped address of h/w link table
10027 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
10028 + * @dst: pointer to output scatterlist, usefull for unmapping
10029 + * @sgt: the h/w link table, followed by IV
10031 +struct tls_edesc {
10034 + dma_addr_t iv_dma;
10036 + dma_addr_t qm_sg_dma;
10037 + struct scatterlist tmp[2];
10038 + struct scatterlist *dst;
10039 + struct dpaa2_sg_entry sgt[0];
10043 + * skcipher_edesc - s/w-extended skcipher descriptor
10044 + * @src_nents: number of segments in input scatterlist
10045 + * @dst_nents: number of segments in output scatterlist
10046 + * @iv_dma: dma address of iv for checking continuity and link table
10047 + * @qm_sg_bytes: length of dma mapped qm_sg space
10048 + * @qm_sg_dma: I/O virtual address of h/w link table
10049 + * @sgt: the h/w link table, followed by IV
10051 +struct skcipher_edesc {
10054 + dma_addr_t iv_dma;
10056 + dma_addr_t qm_sg_dma;
10057 + struct dpaa2_sg_entry sgt[0];
10061 + * ahash_edesc - s/w-extended ahash descriptor
10062 + * @dst_dma: I/O virtual address of req->result
10063 + * @qm_sg_dma: I/O virtual address of h/w link table
10064 + * @src_nents: number of segments in input scatterlist
10065 + * @qm_sg_bytes: length of dma mapped qm_sg space
10066 + * @sgt: pointer to h/w link table
10068 +struct ahash_edesc {
10069 + dma_addr_t dst_dma;
10070 + dma_addr_t qm_sg_dma;
10073 + struct dpaa2_sg_entry sgt[0];
10077 + * caam_flc - Flow Context (FLC)
10078 + * @flc: Flow Context options
10079 + * @sh_desc: Shared Descriptor
10083 + u32 sh_desc[MAX_SDLEN];
10084 +} ____cacheline_aligned;
10093 + * caam_request - the request structure the driver application should fill while
10094 + * submitting a job to driver.
10095 + * @fd_flt: Frame list table defining input and output
10096 + * fd_flt[0] - FLE pointing to output buffer
10097 + * fd_flt[1] - FLE pointing to input buffer
10098 + * @fd_flt_dma: DMA address for the frame list table
10099 + * @flc: Flow Context
10100 + * @flc_dma: I/O virtual address of Flow Context
10101 + * @cbk: Callback function to invoke when job is completed
10102 + * @ctx: arbit context attached with request by the application
10103 + * @edesc: extended descriptor; points to one of {skcipher,aead}_edesc
10105 +struct caam_request {
10106 + struct dpaa2_fl_entry fd_flt[2];
10107 + dma_addr_t fd_flt_dma;
10108 + struct caam_flc *flc;
10109 + dma_addr_t flc_dma;
10110 + void (*cbk)(void *ctx, u32 err);
10116 + * dpaa2_caam_enqueue() - enqueue a crypto request
10117 + * @dev: device associated with the DPSECI object
10118 + * @req: pointer to caam_request
10120 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
10122 +#endif /* _CAAMALG_QI2_H_ */
10123 --- a/drivers/crypto/caam/caamhash.c
10124 +++ b/drivers/crypto/caam/caamhash.c
10127 #include "sg_sw_sec4.h"
10128 #include "key_gen.h"
10129 +#include "caamhash_desc.h"
10131 #define CAAM_CRA_PRIORITY 3000
10134 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
10135 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
10137 -/* length of descriptors text */
10138 -#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
10139 -#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
10140 -#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
10141 -#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
10142 -#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
10143 -#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
10145 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
10146 CAAM_MAX_HASH_KEY_SIZE)
10147 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
10148 @@ -107,6 +100,7 @@ struct caam_hash_ctx {
10149 dma_addr_t sh_desc_update_first_dma;
10150 dma_addr_t sh_desc_fin_dma;
10151 dma_addr_t sh_desc_digest_dma;
10152 + enum dma_data_direction dir;
10153 struct device *jrdev;
10154 u8 key[CAAM_MAX_HASH_KEY_SIZE];
10156 @@ -218,7 +212,7 @@ static inline int buf_map_to_sec4_sg(str
10159 /* Map state->caam_ctx, and add it to link table */
10160 -static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
10161 +static inline int ctx_map_to_sec4_sg(struct device *jrdev,
10162 struct caam_hash_state *state, int ctx_len,
10163 struct sec4_sg_entry *sec4_sg, u32 flag)
10165 @@ -234,68 +228,22 @@ static inline int ctx_map_to_sec4_sg(u32
10170 - * For ahash update, final and finup (import_ctx = true)
10171 - * import context, read and write to seqout
10172 - * For ahash firsts and digest (import_ctx = false)
10173 - * read and write to seqout
10175 -static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
10176 - struct caam_hash_ctx *ctx, bool import_ctx)
10178 - u32 op = ctx->adata.algtype;
10179 - u32 *skip_key_load;
10181 - init_sh_desc(desc, HDR_SHARE_SERIAL);
10183 - /* Append key if it has been set; ahash update excluded */
10184 - if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
10185 - /* Skip key loading if already shared */
10186 - skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10189 - append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
10190 - ctx->adata.keylen, CLASS_2 |
10191 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
10193 - set_jump_tgt_here(desc, skip_key_load);
10195 - op |= OP_ALG_AAI_HMAC_PRECOMP;
10198 - /* If needed, import context from software */
10200 - append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
10201 - LDST_SRCDST_BYTE_CONTEXT);
10203 - /* Class 2 operation */
10204 - append_operation(desc, op | state | OP_ALG_ENCRYPT);
10207 - * Load from buf and/or src and write to req->result or state->context
10208 - * Calculate remaining bytes to read
10210 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10211 - /* Read remaining bytes */
10212 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
10213 - FIFOLD_TYPE_MSG | KEY_VLF);
10214 - /* Store class2 context bytes */
10215 - append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
10216 - LDST_SRCDST_BYTE_CONTEXT);
10219 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
10221 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
10222 int digestsize = crypto_ahash_digestsize(ahash);
10223 struct device *jrdev = ctx->jrdev;
10224 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
10227 + ctx->adata.key_virt = ctx->key;
10229 /* ahash_update shared descriptor */
10230 desc = ctx->sh_desc_update;
10231 - ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
10232 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
10233 + ctx->ctx_len, true, ctrlpriv->era);
10234 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
10235 - desc_bytes(desc), DMA_TO_DEVICE);
10236 + desc_bytes(desc), ctx->dir);
10238 print_hex_dump(KERN_ERR,
10239 "ahash update shdesc@"__stringify(__LINE__)": ",
10240 @@ -304,9 +252,10 @@ static int ahash_set_sh_desc(struct cryp
10242 /* ahash_update_first shared descriptor */
10243 desc = ctx->sh_desc_update_first;
10244 - ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
10245 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
10246 + ctx->ctx_len, false, ctrlpriv->era);
10247 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
10248 - desc_bytes(desc), DMA_TO_DEVICE);
10249 + desc_bytes(desc), ctx->dir);
10251 print_hex_dump(KERN_ERR,
10252 "ahash update first shdesc@"__stringify(__LINE__)": ",
10253 @@ -315,9 +264,10 @@ static int ahash_set_sh_desc(struct cryp
10255 /* ahash_final shared descriptor */
10256 desc = ctx->sh_desc_fin;
10257 - ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
10258 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
10259 + ctx->ctx_len, true, ctrlpriv->era);
10260 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
10261 - desc_bytes(desc), DMA_TO_DEVICE);
10262 + desc_bytes(desc), ctx->dir);
10264 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
10265 DUMP_PREFIX_ADDRESS, 16, 4, desc,
10266 @@ -326,9 +276,10 @@ static int ahash_set_sh_desc(struct cryp
10268 /* ahash_digest shared descriptor */
10269 desc = ctx->sh_desc_digest;
10270 - ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
10271 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
10272 + ctx->ctx_len, false, ctrlpriv->era);
10273 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
10274 - desc_bytes(desc), DMA_TO_DEVICE);
10275 + desc_bytes(desc), ctx->dir);
10277 print_hex_dump(KERN_ERR,
10278 "ahash digest shdesc@"__stringify(__LINE__)": ",
10279 @@ -421,6 +372,7 @@ static int ahash_setkey(struct crypto_ah
10280 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
10281 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
10282 int digestsize = crypto_ahash_digestsize(ahash);
10283 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
10285 u8 *hashed_key = NULL;
10287 @@ -441,16 +393,26 @@ static int ahash_setkey(struct crypto_ah
10291 - ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
10292 - CAAM_MAX_HASH_KEY_SIZE);
10294 - goto bad_free_key;
10296 + * If DKP is supported, use it in the shared descriptor to generate
10299 + if (ctrlpriv->era >= 6) {
10300 + ctx->adata.key_inline = true;
10301 + ctx->adata.keylen = keylen;
10302 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
10303 + OP_ALG_ALGSEL_MASK);
10306 - print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
10307 - DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
10308 - ctx->adata.keylen_pad, 1);
10310 + if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
10311 + goto bad_free_key;
10313 + memcpy(ctx->key, key, keylen);
10315 + ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
10316 + keylen, CAAM_MAX_HASH_KEY_SIZE);
10318 + goto bad_free_key;
10322 return ahash_set_sh_desc(ahash);
10323 @@ -773,7 +735,7 @@ static int ahash_update_ctx(struct ahash
10324 edesc->src_nents = src_nents;
10325 edesc->sec4_sg_bytes = sec4_sg_bytes;
10327 - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
10328 + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
10329 edesc->sec4_sg, DMA_BIDIRECTIONAL);
10332 @@ -871,9 +833,8 @@ static int ahash_final_ctx(struct ahash_
10333 desc = edesc->hw_desc;
10335 edesc->sec4_sg_bytes = sec4_sg_bytes;
10336 - edesc->src_nents = 0;
10338 - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
10339 + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
10340 edesc->sec4_sg, DMA_TO_DEVICE);
10343 @@ -967,7 +928,7 @@ static int ahash_finup_ctx(struct ahash_
10345 edesc->src_nents = src_nents;
10347 - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
10348 + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
10349 edesc->sec4_sg, DMA_TO_DEVICE);
10352 @@ -1126,7 +1087,6 @@ static int ahash_final_no_ctx(struct aha
10353 dev_err(jrdev, "unable to map dst\n");
10356 - edesc->src_nents = 0;
10359 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
10360 @@ -1208,7 +1168,6 @@ static int ahash_update_no_ctx(struct ah
10362 edesc->src_nents = src_nents;
10363 edesc->sec4_sg_bytes = sec4_sg_bytes;
10364 - edesc->dst_dma = 0;
10366 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
10368 @@ -1420,7 +1379,6 @@ static int ahash_update_first(struct aha
10371 edesc->src_nents = src_nents;
10372 - edesc->dst_dma = 0;
10374 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
10376 @@ -1722,6 +1680,7 @@ static int caam_hash_cra_init(struct cry
10378 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
10379 dma_addr_t dma_addr;
10380 + struct caam_drv_private *priv;
10383 * Get a Job ring from Job Ring driver to ensure in-order
10384 @@ -1733,10 +1692,13 @@ static int caam_hash_cra_init(struct cry
10385 return PTR_ERR(ctx->jrdev);
10388 + priv = dev_get_drvdata(ctx->jrdev->parent);
10389 + ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
10391 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
10392 offsetof(struct caam_hash_ctx,
10393 sh_desc_update_dma),
10394 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
10395 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
10396 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
10397 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
10398 caam_jr_free(ctx->jrdev);
10399 @@ -1771,7 +1733,7 @@ static void caam_hash_cra_exit(struct cr
10400 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
10401 offsetof(struct caam_hash_ctx,
10402 sh_desc_update_dma),
10403 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
10404 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
10405 caam_jr_free(ctx->jrdev);
10409 +++ b/drivers/crypto/caam/caamhash_desc.c
10412 + * Shared descriptors for ahash algorithms
10414 + * Copyright 2017 NXP
10416 + * Redistribution and use in source and binary forms, with or without
10417 + * modification, are permitted provided that the following conditions are met:
10418 + * * Redistributions of source code must retain the above copyright
10419 + * notice, this list of conditions and the following disclaimer.
10420 + * * Redistributions in binary form must reproduce the above copyright
10421 + * notice, this list of conditions and the following disclaimer in the
10422 + * documentation and/or other materials provided with the distribution.
10423 + * * Neither the names of the above-listed copyright holders nor the
10424 + * names of any contributors may be used to endorse or promote products
10425 + * derived from this software without specific prior written permission.
10428 + * ALTERNATIVELY, this software may be distributed under the terms of the
10429 + * GNU General Public License ("GPL") as published by the Free Software
10430 + * Foundation, either version 2 of that License or (at your option) any
10433 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
10434 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
10435 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
10436 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
10437 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
10438 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
10439 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
10440 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
10441 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
10442 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
10443 + * POSSIBILITY OF SUCH DAMAGE.
10446 +#include "compat.h"
10447 +#include "desc_constr.h"
10448 +#include "caamhash_desc.h"
10451 + * cnstr_shdsc_ahash - ahash shared descriptor
10452 + * @desc: pointer to buffer used for descriptor construction
10453 + * @adata: pointer to authentication transform definitions.
10454 + * A split key is required for SEC Era < 6; the size of the split key
10455 + * is specified in this case.
10456 + * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
10457 + * SHA256, SHA384, SHA512}.
10458 + * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
10459 + * @digestsize: algorithm's digest size
10460 + * @ctx_len: size of Context Register
10461 + * @import_ctx: true if previous Context Register needs to be restored
10462 + * must be true for ahash update and final
10463 + * must be false for for ahash first and digest
10466 +void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
10467 + int digestsize, int ctx_len, bool import_ctx, int era)
10469 + u32 op = adata->algtype;
10471 + init_sh_desc(desc, HDR_SHARE_SERIAL);
10473 + /* Append key if it has been set; ahash update excluded */
10474 + if (state != OP_ALG_AS_UPDATE && adata->keylen) {
10475 + u32 *skip_key_load;
10477 + /* Skip key loading if already shared */
10478 + skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10482 + append_key_as_imm(desc, adata->key_virt,
10483 + adata->keylen_pad,
10484 + adata->keylen, CLASS_2 |
10485 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
10487 + append_proto_dkp(desc, adata);
10489 + set_jump_tgt_here(desc, skip_key_load);
10491 + op |= OP_ALG_AAI_HMAC_PRECOMP;
10494 + /* If needed, import context from software */
10496 + append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB |
10497 + LDST_SRCDST_BYTE_CONTEXT);
10499 + /* Class 2 operation */
10500 + append_operation(desc, op | state | OP_ALG_ENCRYPT);
10503 + * Load from buf and/or src and write to req->result or state->context
10504 + * Calculate remaining bytes to read
10506 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10507 + /* Read remaining bytes */
10508 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
10509 + FIFOLD_TYPE_MSG | KEY_VLF);
10510 + /* Store class2 context bytes */
10511 + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
10512 + LDST_SRCDST_BYTE_CONTEXT);
10514 +EXPORT_SYMBOL(cnstr_shdsc_ahash);
10516 +MODULE_LICENSE("Dual BSD/GPL");
10517 +MODULE_DESCRIPTION("FSL CAAM ahash descriptors support");
10518 +MODULE_AUTHOR("NXP Semiconductors");
10520 +++ b/drivers/crypto/caam/caamhash_desc.h
10523 + * Shared descriptors for ahash algorithms
10525 + * Copyright 2017 NXP
10527 + * Redistribution and use in source and binary forms, with or without
10528 + * modification, are permitted provided that the following conditions are met:
10529 + * * Redistributions of source code must retain the above copyright
10530 + * notice, this list of conditions and the following disclaimer.
10531 + * * Redistributions in binary form must reproduce the above copyright
10532 + * notice, this list of conditions and the following disclaimer in the
10533 + * documentation and/or other materials provided with the distribution.
10534 + * * Neither the names of the above-listed copyright holders nor the
10535 + * names of any contributors may be used to endorse or promote products
10536 + * derived from this software without specific prior written permission.
10539 + * ALTERNATIVELY, this software may be distributed under the terms of the
10540 + * GNU General Public License ("GPL") as published by the Free Software
10541 + * Foundation, either version 2 of that License or (at your option) any
10544 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
10545 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
10546 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
10547 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
10548 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
10549 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
10550 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
10551 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
10552 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
10553 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
10554 + * POSSIBILITY OF SUCH DAMAGE.
10557 +#ifndef _CAAMHASH_DESC_H_
10558 +#define _CAAMHASH_DESC_H_
10560 +/* length of descriptors text */
10561 +#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
10562 +#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
10563 +#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
10564 +#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
10565 +#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
10567 +void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
10568 + int digestsize, int ctx_len, bool import_ctx, int era);
10570 +#endif /* _CAAMHASH_DESC_H_ */
10571 --- a/drivers/crypto/caam/compat.h
10572 +++ b/drivers/crypto/caam/compat.h
10574 #include <linux/of_platform.h>
10575 #include <linux/dma-mapping.h>
10576 #include <linux/io.h>
10577 +#include <linux/iommu.h>
10578 #include <linux/spinlock.h>
10579 #include <linux/rtnetlink.h>
10580 #include <linux/in.h>
10582 #include <crypto/authenc.h>
10583 #include <crypto/akcipher.h>
10584 #include <crypto/scatterwalk.h>
10585 +#include <crypto/skcipher.h>
10586 #include <crypto/internal/skcipher.h>
10587 #include <crypto/internal/hash.h>
10588 #include <crypto/internal/rsa.h>
10589 --- a/drivers/crypto/caam/ctrl.c
10590 +++ b/drivers/crypto/caam/ctrl.c
10591 @@ -27,6 +27,8 @@ EXPORT_SYMBOL(caam_imx);
10595 +static struct platform_device *caam_dma_dev;
10598 * i.MX targets tend to have clock control subsystems that can
10599 * enable/disable clocking to our device.
10600 @@ -332,6 +334,9 @@ static int caam_remove(struct platform_d
10601 debugfs_remove_recursive(ctrlpriv->dfs_root);
10604 + if (caam_dma_dev)
10605 + platform_device_unregister(caam_dma_dev);
10607 /* Unmap controller region */
10610 @@ -433,6 +438,10 @@ static int caam_probe(struct platform_de
10611 {.family = "Freescale i.MX"},
10614 + static struct platform_device_info caam_dma_pdev_info = {
10615 + .name = "caam-dma",
10616 + .id = PLATFORM_DEVID_NONE
10618 struct device *dev;
10619 struct device_node *nprop, *np;
10620 struct caam_ctrl __iomem *ctrl;
10621 @@ -615,6 +624,8 @@ static int caam_probe(struct platform_de
10625 + ctrlpriv->era = caam_get_era();
10627 ret = of_platform_populate(nprop, caam_match, NULL, dev);
10629 dev_err(dev, "JR platform devices creation error\n");
10630 @@ -671,6 +682,16 @@ static int caam_probe(struct platform_de
10634 + caam_dma_pdev_info.parent = dev;
10635 + caam_dma_pdev_info.dma_mask = dma_get_mask(dev);
10636 + caam_dma_dev = platform_device_register_full(&caam_dma_pdev_info);
10637 + if (IS_ERR(caam_dma_dev)) {
10638 + dev_err(dev, "Unable to create and register caam-dma dev\n");
10639 + caam_dma_dev = 0;
10641 + set_dma_ops(&caam_dma_dev->dev, get_dma_ops(dev));
10644 cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
10647 @@ -746,7 +767,7 @@ static int caam_probe(struct platform_de
10649 /* Report "alive" for developer to see */
10650 dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
10653 dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
10654 ctrlpriv->total_jobrs, ctrlpriv->qi_present,
10655 caam_dpaa2 ? "yes" : "no");
10656 --- a/drivers/crypto/caam/desc.h
10657 +++ b/drivers/crypto/caam/desc.h
10659 #define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
10660 #define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
10661 #define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
10662 +#define CMD_MOVEB (0x07 << CMD_SHIFT)
10663 #define CMD_STORE (0x0a << CMD_SHIFT)
10664 #define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
10665 #define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
10666 @@ -355,6 +356,7 @@
10667 #define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
10668 #define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
10669 #define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
10670 +#define FIFOLD_TYPE_IFIFO (0x0f << FIFOLD_TYPE_SHIFT)
10672 /* Other types. Need to OR in last/flush bits as desired */
10673 #define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
10674 @@ -408,6 +410,7 @@
10675 #define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
10676 #define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
10677 #define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
10678 +#define FIFOST_TYPE_METADATA (0x3e << FIFOST_TYPE_SHIFT)
10679 #define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
10682 @@ -444,6 +447,18 @@
10683 #define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
10684 #define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT)
10685 #define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT)
10686 +#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT)
10687 +#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT)
10688 +#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT)
10689 +#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT)
10690 +#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT)
10691 +#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT)
10692 +#define OP_PCLID_DKP_RIF_MD5 (0x60 << OP_PCLID_SHIFT)
10693 +#define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT)
10694 +#define OP_PCLID_DKP_RIF_SHA224 (0x62 << OP_PCLID_SHIFT)
10695 +#define OP_PCLID_DKP_RIF_SHA256 (0x63 << OP_PCLID_SHIFT)
10696 +#define OP_PCLID_DKP_RIF_SHA384 (0x64 << OP_PCLID_SHIFT)
10697 +#define OP_PCLID_DKP_RIF_SHA512 (0x65 << OP_PCLID_SHIFT)
10699 /* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
10700 #define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
10701 @@ -1093,6 +1108,22 @@
10702 /* MacSec protinfos */
10703 #define OP_PCL_MACSEC 0x0001
10705 +/* Derived Key Protocol (DKP) Protinfo */
10706 +#define OP_PCL_DKP_SRC_SHIFT 14
10707 +#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT)
10708 +#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT)
10709 +#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT)
10710 +#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT)
10711 +#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT)
10712 +#define OP_PCL_DKP_DST_SHIFT 12
10713 +#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT)
10714 +#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT)
10715 +#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT)
10716 +#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT)
10717 +#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT)
10718 +#define OP_PCL_DKP_KEY_SHIFT 0
10719 +#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT)
10721 /* PKI unidirectional protocol protinfo bits */
10722 #define OP_PCL_PKPROT_TEST 0x0008
10723 #define OP_PCL_PKPROT_DECRYPT 0x0004
10724 @@ -1440,10 +1471,11 @@
10725 #define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
10726 #define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
10727 #define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
10728 -#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT)
10729 +#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT)
10730 #define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
10731 #define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
10732 #define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
10733 +#define MATH_SRC1_ZERO (0x0f << MATH_SRC1_SHIFT)
10735 /* Destination selectors */
10736 #define MATH_DEST_SHIFT 8
10737 @@ -1452,6 +1484,7 @@
10738 #define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
10739 #define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
10740 #define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
10741 +#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT)
10742 #define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
10743 #define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
10744 #define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
10745 @@ -1624,4 +1657,31 @@
10746 /* Frame Descriptor Command for Replacement Job Descriptor */
10747 #define FD_CMD_REPLACE_JOB_DESC 0x20000000
10749 +/* CHA Control Register bits */
10750 +#define CCTRL_RESET_CHA_ALL 0x1
10751 +#define CCTRL_RESET_CHA_AESA 0x2
10752 +#define CCTRL_RESET_CHA_DESA 0x4
10753 +#define CCTRL_RESET_CHA_AFHA 0x8
10754 +#define CCTRL_RESET_CHA_KFHA 0x10
10755 +#define CCTRL_RESET_CHA_SF8A 0x20
10756 +#define CCTRL_RESET_CHA_PKHA 0x40
10757 +#define CCTRL_RESET_CHA_MDHA 0x80
10758 +#define CCTRL_RESET_CHA_CRCA 0x100
10759 +#define CCTRL_RESET_CHA_RNG 0x200
10760 +#define CCTRL_RESET_CHA_SF9A 0x400
10761 +#define CCTRL_RESET_CHA_ZUCE 0x800
10762 +#define CCTRL_RESET_CHA_ZUCA 0x1000
10763 +#define CCTRL_UNLOAD_PK_A0 0x10000
10764 +#define CCTRL_UNLOAD_PK_A1 0x20000
10765 +#define CCTRL_UNLOAD_PK_A2 0x40000
10766 +#define CCTRL_UNLOAD_PK_A3 0x80000
10767 +#define CCTRL_UNLOAD_PK_B0 0x100000
10768 +#define CCTRL_UNLOAD_PK_B1 0x200000
10769 +#define CCTRL_UNLOAD_PK_B2 0x400000
10770 +#define CCTRL_UNLOAD_PK_B3 0x800000
10771 +#define CCTRL_UNLOAD_PK_N 0x1000000
10772 +#define CCTRL_UNLOAD_PK_A 0x4000000
10773 +#define CCTRL_UNLOAD_PK_B 0x8000000
10774 +#define CCTRL_UNLOAD_SBOX 0x10000000
10776 #endif /* DESC_H */
10777 --- a/drivers/crypto/caam/desc_constr.h
10778 +++ b/drivers/crypto/caam/desc_constr.h
10779 @@ -109,7 +109,7 @@ static inline void init_job_desc_shared(
10780 append_ptr(desc, ptr);
10783 -static inline void append_data(u32 * const desc, void *data, int len)
10784 +static inline void append_data(u32 * const desc, const void *data, int len)
10786 u32 *offset = desc_end(desc);
10788 @@ -172,7 +172,7 @@ static inline void append_cmd_ptr_extlen
10789 append_cmd(desc, len);
10792 -static inline void append_cmd_data(u32 * const desc, void *data, int len,
10793 +static inline void append_cmd_data(u32 * const desc, const void *data, int len,
10796 append_cmd(desc, command | IMMEDIATE | len);
10797 @@ -189,6 +189,7 @@ static inline u32 *append_##cmd(u32 * co
10799 APPEND_CMD_RET(jump, JUMP)
10800 APPEND_CMD_RET(move, MOVE)
10801 +APPEND_CMD_RET(moveb, MOVEB)
10803 static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
10805 @@ -271,7 +272,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
10806 APPEND_SEQ_PTR_INTLEN(out, OUT)
10808 #define APPEND_CMD_PTR_TO_IMM(cmd, op) \
10809 -static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
10810 +static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
10811 unsigned int len, u32 options) \
10814 @@ -312,7 +313,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_
10815 * from length of immediate data provided, e.g., split keys
10817 #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
10818 -static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
10819 +static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
10820 unsigned int data_len, \
10821 unsigned int len, u32 options) \
10823 @@ -452,7 +453,7 @@ struct alginfo {
10824 unsigned int keylen_pad;
10826 dma_addr_t key_dma;
10828 + const void *key_virt;
10832 @@ -496,4 +497,45 @@ static inline int desc_inline_query(unsi
10833 return (rem_bytes >= 0) ? 0 : -1;
10837 + * append_proto_dkp - Derived Key Protocol (DKP): key -> split key
10838 + * @desc: pointer to buffer used for descriptor construction
10839 + * @adata: pointer to authentication transform definitions.
10840 + * keylen should be the length of initial key, while keylen_pad
10841 + * the length of the derived (split) key.
10842 + * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
10843 + * SHA256, SHA384, SHA512}.
10845 +static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
10850 + * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*}
10851 + * to OP_PCLID_DKP_{MD5, SHA*}
10853 + protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) |
10854 + (0x20 << OP_ALG_ALGSEL_SHIFT);
10856 + if (adata->key_inline) {
10859 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
10860 + OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
10862 + append_data(desc, adata->key_virt, adata->keylen);
10864 + /* Reserve space in descriptor buffer for the derived key */
10865 + words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
10866 + ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
10868 + (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
10870 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
10871 + OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR |
10873 + append_ptr(desc, adata->key_dma);
10877 #endif /* DESC_CONSTR_H */
10879 +++ b/drivers/crypto/caam/dpseci.c
10882 + * Copyright 2013-2016 Freescale Semiconductor Inc.
10883 + * Copyright 2017 NXP
10885 + * Redistribution and use in source and binary forms, with or without
10886 + * modification, are permitted provided that the following conditions are met:
10887 + * * Redistributions of source code must retain the above copyright
10888 + * notice, this list of conditions and the following disclaimer.
10889 + * * Redistributions in binary form must reproduce the above copyright
10890 + * notice, this list of conditions and the following disclaimer in the
10891 + * documentation and/or other materials provided with the distribution.
10892 + * * Neither the names of the above-listed copyright holders nor the
10893 + * names of any contributors may be used to endorse or promote products
10894 + * derived from this software without specific prior written permission.
10897 + * ALTERNATIVELY, this software may be distributed under the terms of the
10898 + * GNU General Public License ("GPL") as published by the Free Software
10899 + * Foundation, either version 2 of that License or (at your option) any
10902 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
10903 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
10904 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
10905 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
10906 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
10907 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
10908 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
10909 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
10910 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
10911 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
10912 + * POSSIBILITY OF SUCH DAMAGE.
10915 +#include <linux/fsl/mc.h>
10916 +#include "../../../drivers/staging/fsl-mc/include/dpopr.h"
10917 +#include "dpseci.h"
10918 +#include "dpseci_cmd.h"
10921 + * dpseci_open() - Open a control session for the specified object
10922 + * @mc_io: Pointer to MC portal's I/O object
10923 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10924 + * @dpseci_id: DPSECI unique ID
10925 + * @token: Returned token; use in subsequent API calls
10927 + * This function can be used to open a control session for an already created
10928 + * object; an object may have been declared in the DPL or by calling the
10929 + * dpseci_create() function.
10930 + * This function returns a unique authentication token, associated with the
10931 + * specific object ID and the specific MC portal; this token must be used in all
10932 + * subsequent commands for this specific object.
10934 + * Return: '0' on success, error code otherwise
10936 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
10939 + struct fsl_mc_command cmd = { 0 };
10940 + struct dpseci_cmd_open *cmd_params;
10943 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
10946 + cmd_params = (struct dpseci_cmd_open *)cmd.params;
10947 + cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
10948 + err = mc_send_command(mc_io, &cmd);
10952 + *token = mc_cmd_hdr_read_token(&cmd);
10958 + * dpseci_close() - Close the control session of the object
10959 + * @mc_io: Pointer to MC portal's I/O object
10960 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10961 + * @token: Token of DPSECI object
10963 + * After this function is called, no further operations are allowed on the
10964 + * object without opening a new control session.
10966 + * Return: '0' on success, error code otherwise
10968 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
10970 + struct fsl_mc_command cmd = { 0 };
10972 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
10975 + return mc_send_command(mc_io, &cmd);
10979 + * dpseci_create() - Create the DPSECI object
10980 + * @mc_io: Pointer to MC portal's I/O object
10981 + * @dprc_token: Parent container token; '0' for default container
10982 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10983 + * @cfg: Configuration structure
10984 + * @obj_id: returned object id
10986 + * Create the DPSECI object, allocate required resources and perform required
10987 + * initialization.
10989 + * The object can be created either by declaring it in the DPL file, or by
10990 + * calling this function.
10992 + * The function accepts an authentication token of a parent container that this
10993 + * object should be assigned to. The token can be '0' so the object will be
10994 + * assigned to the default container.
10995 + * The newly created object can be opened with the returned object id and using
10996 + * the container's associated tokens and MC portals.
10998 + * Return: '0' on success, error code otherwise
11000 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
11001 + const struct dpseci_cfg *cfg, u32 *obj_id)
11003 + struct fsl_mc_command cmd = { 0 };
11004 + struct dpseci_cmd_create *cmd_params;
11007 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
11010 + cmd_params = (struct dpseci_cmd_create *)cmd.params;
11011 + for (i = 0; i < 8; i++)
11012 + cmd_params->priorities[i] = cfg->priorities[i];
11013 + for (i = 0; i < 8; i++)
11014 + cmd_params->priorities2[i] = cfg->priorities[8 + i];
11015 + cmd_params->num_tx_queues = cfg->num_tx_queues;
11016 + cmd_params->num_rx_queues = cfg->num_rx_queues;
11017 + cmd_params->options = cpu_to_le32(cfg->options);
11018 + err = mc_send_command(mc_io, &cmd);
11022 + *obj_id = mc_cmd_read_object_id(&cmd);
11028 + * dpseci_destroy() - Destroy the DPSECI object and release all its resources
11029 + * @mc_io: Pointer to MC portal's I/O object
11030 + * @dprc_token: Parent container token; '0' for default container
11031 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11032 + * @object_id: The object id; it must be a valid id within the container that
11033 + * created this object
11035 + * The function accepts the authentication token of the parent container that
11036 + * created the object (not the one that currently owns the object). The object
11037 + * is searched within parent using the provided 'object_id'.
11038 + * All tokens to the object must be closed before calling destroy.
11040 + * Return: '0' on success, error code otherwise
11042 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
11045 + struct fsl_mc_command cmd = { 0 };
11046 + struct dpseci_cmd_destroy *cmd_params;
11048 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
11051 + cmd_params = (struct dpseci_cmd_destroy *)cmd.params;
11052 + cmd_params->object_id = cpu_to_le32(object_id);
11054 + return mc_send_command(mc_io, &cmd);
11058 + * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
11059 + * @mc_io: Pointer to MC portal's I/O object
11060 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11061 + * @token: Token of DPSECI object
11063 + * Return: '0' on success, error code otherwise
11065 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
11067 + struct fsl_mc_command cmd = { 0 };
11069 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
11072 + return mc_send_command(mc_io, &cmd);
11076 + * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames
11077 + * @mc_io: Pointer to MC portal's I/O object
11078 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11079 + * @token: Token of DPSECI object
11081 + * Return: '0' on success, error code otherwise
11083 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
11085 + struct fsl_mc_command cmd = { 0 };
11087 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
11091 + return mc_send_command(mc_io, &cmd);
11095 + * dpseci_is_enabled() - Check if the DPSECI is enabled.
11096 + * @mc_io: Pointer to MC portal's I/O object
11097 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11098 + * @token: Token of DPSECI object
11099 + * @en: Returns '1' if object is enabled; '0' otherwise
11101 + * Return: '0' on success, error code otherwise
11103 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
11106 + struct fsl_mc_command cmd = { 0 };
11107 + struct dpseci_rsp_is_enabled *rsp_params;
11110 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
11113 + err = mc_send_command(mc_io, &cmd);
11117 + rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
11118 + *en = dpseci_get_field(rsp_params->is_enabled, ENABLE);
11124 + * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
11125 + * @mc_io: Pointer to MC portal's I/O object
11126 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11127 + * @token: Token of DPSECI object
11129 + * Return: '0' on success, error code otherwise
11131 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
11133 + struct fsl_mc_command cmd = { 0 };
11135 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
11139 + return mc_send_command(mc_io, &cmd);
11143 + * dpseci_get_irq_enable() - Get overall interrupt state
11144 + * @mc_io: Pointer to MC portal's I/O object
11145 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11146 + * @token: Token of DPSECI object
11147 + * @irq_index: The interrupt index to configure
11148 + * @en: Returned Interrupt state - enable = 1, disable = 0
11150 + * Return: '0' on success, error code otherwise
11152 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
11153 + u8 irq_index, u8 *en)
11155 + struct fsl_mc_command cmd = { 0 };
11156 + struct dpseci_cmd_irq_enable *cmd_params;
11157 + struct dpseci_rsp_get_irq_enable *rsp_params;
11160 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE,
11163 + cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
11164 + cmd_params->irq_index = irq_index;
11165 + err = mc_send_command(mc_io, &cmd);
11169 + rsp_params = (struct dpseci_rsp_get_irq_enable *)cmd.params;
11170 + *en = rsp_params->enable_state;
11176 + * dpseci_set_irq_enable() - Set overall interrupt state.
11177 + * @mc_io: Pointer to MC portal's I/O object
11178 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11179 + * @token: Token of DPSECI object
11180 + * @irq_index: The interrupt index to configure
11181 + * @en: Interrupt state - enable = 1, disable = 0
11183 + * Allows GPP software to control when interrupts are generated.
11184 + * Each interrupt can have up to 32 causes. The enable/disable control's the
11185 + * overall interrupt state. If the interrupt is disabled no causes will cause
11188 + * Return: '0' on success, error code otherwise
11190 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
11191 + u8 irq_index, u8 en)
11193 + struct fsl_mc_command cmd = { 0 };
11194 + struct dpseci_cmd_irq_enable *cmd_params;
11196 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
11199 + cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
11200 + cmd_params->irq_index = irq_index;
11201 + cmd_params->enable_state = en;
11203 + return mc_send_command(mc_io, &cmd);
11207 + * dpseci_get_irq_mask() - Get interrupt mask.
11208 + * @mc_io: Pointer to MC portal's I/O object
11209 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11210 + * @token: Token of DPSECI object
11211 + * @irq_index: The interrupt index to configure
11212 + * @mask: Returned event mask to trigger interrupt
11214 + * Every interrupt can have up to 32 causes and the interrupt model supports
11215 + * masking/unmasking each cause independently.
11217 + * Return: '0' on success, error code otherwise
11219 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
11220 + u8 irq_index, u32 *mask)
11222 + struct fsl_mc_command cmd = { 0 };
11223 + struct dpseci_cmd_irq_mask *cmd_params;
11226 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK,
11229 + cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
11230 + cmd_params->irq_index = irq_index;
11231 + err = mc_send_command(mc_io, &cmd);
11235 + *mask = le32_to_cpu(cmd_params->mask);
11241 + * dpseci_set_irq_mask() - Set interrupt mask.
11242 + * @mc_io: Pointer to MC portal's I/O object
11243 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11244 + * @token: Token of DPSECI object
11245 + * @irq_index: The interrupt index to configure
11246 + * @mask: event mask to trigger interrupt;
11248 + * 0 = ignore event
11249 + * 1 = consider event for asserting IRQ
11251 + * Every interrupt can have up to 32 causes and the interrupt model supports
11252 + * masking/unmasking each cause independently
11254 + * Return: '0' on success, error code otherwise
11256 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
11257 + u8 irq_index, u32 mask)
11259 + struct fsl_mc_command cmd = { 0 };
11260 + struct dpseci_cmd_irq_mask *cmd_params;
11262 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
11265 + cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
11266 + cmd_params->mask = cpu_to_le32(mask);
11267 + cmd_params->irq_index = irq_index;
11269 + return mc_send_command(mc_io, &cmd);
11273 + * dpseci_get_irq_status() - Get the current status of any pending interrupts
11274 + * @mc_io: Pointer to MC portal's I/O object
11275 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11276 + * @token: Token of DPSECI object
11277 + * @irq_index: The interrupt index to configure
11278 + * @status: Returned interrupts status - one bit per cause:
11279 + * 0 = no interrupt pending
11280 + * 1 = interrupt pending
11282 + * Return: '0' on success, error code otherwise
11284 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
11285 + u8 irq_index, u32 *status)
11287 + struct fsl_mc_command cmd = { 0 };
11288 + struct dpseci_cmd_irq_status *cmd_params;
11291 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS,
11294 + cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
11295 + cmd_params->status = cpu_to_le32(*status);
11296 + cmd_params->irq_index = irq_index;
11297 + err = mc_send_command(mc_io, &cmd);
11301 + *status = le32_to_cpu(cmd_params->status);
11307 + * dpseci_clear_irq_status() - Clear a pending interrupt's status
11308 + * @mc_io: Pointer to MC portal's I/O object
11309 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11310 + * @token: Token of DPSECI object
11311 + * @irq_index: The interrupt index to configure
11312 + * @status: bits to clear (W1C) - one bit per cause:
11313 + * 0 = don't change
11314 + * 1 = clear status bit
11316 + * Return: '0' on success, error code otherwise
11318 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
11319 + u8 irq_index, u32 status)
11321 + struct fsl_mc_command cmd = { 0 };
11322 + struct dpseci_cmd_irq_status *cmd_params;
11324 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
11327 + cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
11328 + cmd_params->status = cpu_to_le32(status);
11329 + cmd_params->irq_index = irq_index;
11331 + return mc_send_command(mc_io, &cmd);
11335 + * dpseci_get_attributes() - Retrieve DPSECI attributes
11336 + * @mc_io: Pointer to MC portal's I/O object
11337 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11338 + * @token: Token of DPSECI object
11339 + * @attr: Returned object's attributes
11341 + * Return: '0' on success, error code otherwise
11343 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
11344 + struct dpseci_attr *attr)
11346 + struct fsl_mc_command cmd = { 0 };
11347 + struct dpseci_rsp_get_attributes *rsp_params;
11350 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
11353 + err = mc_send_command(mc_io, &cmd);
11357 + rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params;
11358 + attr->id = le32_to_cpu(rsp_params->id);
11359 + attr->num_tx_queues = rsp_params->num_tx_queues;
11360 + attr->num_rx_queues = rsp_params->num_rx_queues;
11361 + attr->options = le32_to_cpu(rsp_params->options);
11367 + * dpseci_set_rx_queue() - Set Rx queue configuration
11368 + * @mc_io: Pointer to MC portal's I/O object
11369 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11370 + * @token: Token of DPSECI object
11371 + * @queue: Select the queue relative to number of priorities configured at
11372 + * DPSECI creation; use DPSECI_ALL_QUEUES to configure all
11373 + * Rx queues identically.
11374 + * @cfg: Rx queue configuration
11376 + * Return: '0' on success, error code otherwise
11378 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
11379 + u8 queue, const struct dpseci_rx_queue_cfg *cfg)
11381 + struct fsl_mc_command cmd = { 0 };
11382 + struct dpseci_cmd_queue *cmd_params;
11384 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
11387 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
11388 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
11389 + cmd_params->priority = cfg->dest_cfg.priority;
11390 + cmd_params->queue = queue;
11391 + dpseci_set_field(cmd_params->dest_type, DEST_TYPE,
11392 + cfg->dest_cfg.dest_type);
11393 + cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
11394 + cmd_params->options = cpu_to_le32(cfg->options);
11395 + dpseci_set_field(cmd_params->order_preservation_en, ORDER_PRESERVATION,
11396 + cfg->order_preservation_en);
11398 + return mc_send_command(mc_io, &cmd);
11402 + * dpseci_get_rx_queue() - Retrieve Rx queue attributes
11403 + * @mc_io: Pointer to MC portal's I/O object
11404 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11405 + * @token: Token of DPSECI object
11406 + * @queue: Select the queue relative to number of priorities configured at
11407 + * DPSECI creation
11408 + * @attr: Returned Rx queue attributes
11410 + * Return: '0' on success, error code otherwise
11412 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
11413 + u8 queue, struct dpseci_rx_queue_attr *attr)
11415 + struct fsl_mc_command cmd = { 0 };
11416 + struct dpseci_cmd_queue *cmd_params;
11419 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
11422 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
11423 + cmd_params->queue = queue;
11424 + err = mc_send_command(mc_io, &cmd);
11428 + attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
11429 + attr->dest_cfg.priority = cmd_params->priority;
11430 + attr->dest_cfg.dest_type = dpseci_get_field(cmd_params->dest_type,
11432 + attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
11433 + attr->fqid = le32_to_cpu(cmd_params->fqid);
11434 + attr->order_preservation_en =
11435 + dpseci_get_field(cmd_params->order_preservation_en,
11436 + ORDER_PRESERVATION);
11442 + * dpseci_get_tx_queue() - Retrieve Tx queue attributes
11443 + * @mc_io: Pointer to MC portal's I/O object
11444 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11445 + * @token: Token of DPSECI object
11446 + * @queue: Select the queue relative to number of priorities configured at
11447 + * DPSECI creation
11448 + * @attr: Returned Tx queue attributes
11450 + * Return: '0' on success, error code otherwise
11452 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
11453 + u8 queue, struct dpseci_tx_queue_attr *attr)
11455 + struct fsl_mc_command cmd = { 0 };
11456 + struct dpseci_cmd_queue *cmd_params;
11457 + struct dpseci_rsp_get_tx_queue *rsp_params;
11460 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
11463 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
11464 + cmd_params->queue = queue;
11465 + err = mc_send_command(mc_io, &cmd);
11469 + rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
11470 + attr->fqid = le32_to_cpu(rsp_params->fqid);
11471 + attr->priority = rsp_params->priority;
11477 + * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes
11478 + * @mc_io: Pointer to MC portal's I/O object
11479 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11480 + * @token: Token of DPSECI object
11481 + * @attr: Returned SEC attributes
11483 + * Return: '0' on success, error code otherwise
11485 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
11486 + struct dpseci_sec_attr *attr)
11488 + struct fsl_mc_command cmd = { 0 };
11489 + struct dpseci_rsp_get_sec_attr *rsp_params;
11492 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
11495 + err = mc_send_command(mc_io, &cmd);
11499 + rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
11500 + attr->ip_id = le16_to_cpu(rsp_params->ip_id);
11501 + attr->major_rev = rsp_params->major_rev;
11502 + attr->minor_rev = rsp_params->minor_rev;
11503 + attr->era = rsp_params->era;
11504 + attr->deco_num = rsp_params->deco_num;
11505 + attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
11506 + attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
11507 + attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
11508 + attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
11509 + attr->crc_acc_num = rsp_params->crc_acc_num;
11510 + attr->pk_acc_num = rsp_params->pk_acc_num;
11511 + attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
11512 + attr->rng_acc_num = rsp_params->rng_acc_num;
11513 + attr->md_acc_num = rsp_params->md_acc_num;
11514 + attr->arc4_acc_num = rsp_params->arc4_acc_num;
11515 + attr->des_acc_num = rsp_params->des_acc_num;
11516 + attr->aes_acc_num = rsp_params->aes_acc_num;
11517 + attr->ccha_acc_num = rsp_params->ccha_acc_num;
11518 + attr->ptha_acc_num = rsp_params->ptha_acc_num;
11524 + * dpseci_get_sec_counters() - Retrieve SEC accelerator counters
11525 + * @mc_io: Pointer to MC portal's I/O object
11526 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11527 + * @token: Token of DPSECI object
11528 + * @counters: Returned SEC counters
11530 + * Return: '0' on success, error code otherwise
11532 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
11533 + struct dpseci_sec_counters *counters)
11535 + struct fsl_mc_command cmd = { 0 };
11536 + struct dpseci_rsp_get_sec_counters *rsp_params;
11539 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
11542 + err = mc_send_command(mc_io, &cmd);
11546 + rsp_params = (struct dpseci_rsp_get_sec_counters *)cmd.params;
11547 + counters->dequeued_requests =
11548 + le64_to_cpu(rsp_params->dequeued_requests);
11549 + counters->ob_enc_requests = le64_to_cpu(rsp_params->ob_enc_requests);
11550 + counters->ib_dec_requests = le64_to_cpu(rsp_params->ib_dec_requests);
11551 + counters->ob_enc_bytes = le64_to_cpu(rsp_params->ob_enc_bytes);
11552 + counters->ob_prot_bytes = le64_to_cpu(rsp_params->ob_prot_bytes);
11553 + counters->ib_dec_bytes = le64_to_cpu(rsp_params->ib_dec_bytes);
11554 + counters->ib_valid_bytes = le64_to_cpu(rsp_params->ib_valid_bytes);
11560 + * dpseci_get_api_version() - Get Data Path SEC Interface API version
11561 + * @mc_io: Pointer to MC portal's I/O object
11562 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11563 + * @major_ver: Major version of data path sec API
11564 + * @minor_ver: Minor version of data path sec API
11566 + * Return: '0' on success, error code otherwise
11568 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
11569 + u16 *major_ver, u16 *minor_ver)
11571 + struct fsl_mc_command cmd = { 0 };
11572 + struct dpseci_rsp_get_api_version *rsp_params;
11575 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
11577 + err = mc_send_command(mc_io, &cmd);
11581 + rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
11582 + *major_ver = le16_to_cpu(rsp_params->major);
11583 + *minor_ver = le16_to_cpu(rsp_params->minor);
11589 + * dpseci_set_opr() - Set Order Restoration configuration
11590 + * @mc_io: Pointer to MC portal's I/O object
11591 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11592 + * @token: Token of DPSECI object
11593 + * @index: The queue index
11594 + * @options: Configuration mode options; can be OPR_OPT_CREATE or
11596 + * @cfg: Configuration options for the OPR
11598 + * Return: '0' on success, error code otherwise
11600 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
11601 + u8 options, struct opr_cfg *cfg)
11603 + struct fsl_mc_command cmd = { 0 };
11604 + struct dpseci_cmd_opr *cmd_params;
11606 + cmd.header = mc_encode_cmd_header(
11607 + DPSECI_CMDID_SET_OPR,
11610 + cmd_params = (struct dpseci_cmd_opr *)cmd.params;
11611 + cmd_params->index = index;
11612 + cmd_params->options = options;
11613 + cmd_params->oloe = cfg->oloe;
11614 + cmd_params->oeane = cfg->oeane;
11615 + cmd_params->olws = cfg->olws;
11616 + cmd_params->oa = cfg->oa;
11617 + cmd_params->oprrws = cfg->oprrws;
11619 + return mc_send_command(mc_io, &cmd);
11623 + * dpseci_get_opr() - Retrieve Order Restoration config and query
11624 + * @mc_io: Pointer to MC portal's I/O object
11625 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11626 + * @token: Token of DPSECI object
11627 + * @index: The queue index
11628 + * @cfg: Returned OPR configuration
11629 + * @qry: Returned OPR query
11631 + * Return: '0' on success, error code otherwise
11633 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
11634 + struct opr_cfg *cfg, struct opr_qry *qry)
11636 + struct fsl_mc_command cmd = { 0 };
11637 + struct dpseci_cmd_opr *cmd_params;
11638 + struct dpseci_rsp_get_opr *rsp_params;
11641 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR,
11644 + cmd_params = (struct dpseci_cmd_opr *)cmd.params;
11645 + cmd_params->index = index;
11646 + err = mc_send_command(mc_io, &cmd);
11650 + rsp_params = (struct dpseci_rsp_get_opr *)cmd.params;
11651 + qry->rip = dpseci_get_field(rsp_params->flags, OPR_RIP);
11652 + qry->enable = dpseci_get_field(rsp_params->flags, OPR_ENABLE);
11653 + cfg->oloe = rsp_params->oloe;
11654 + cfg->oeane = rsp_params->oeane;
11655 + cfg->olws = rsp_params->olws;
11656 + cfg->oa = rsp_params->oa;
11657 + cfg->oprrws = rsp_params->oprrws;
11658 + qry->nesn = le16_to_cpu(rsp_params->nesn);
11659 + qry->ndsn = le16_to_cpu(rsp_params->ndsn);
11660 + qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
11661 + qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_TSEQ_NLIS);
11662 + qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
11663 + qry->hseq_nlis = dpseci_get_field(rsp_params->hseq_nlis, OPR_HSEQ_NLIS);
11664 + qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
11665 + qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
11666 + qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
11667 + qry->opr_id = le16_to_cpu(rsp_params->opr_id);
11673 + * dpseci_set_congestion_notification() - Set congestion group
11674 + * notification configuration
11675 + * @mc_io: Pointer to MC portal's I/O object
11676 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11677 + * @token: Token of DPSECI object
11678 + * @cfg: congestion notification configuration
11680 + * Return: '0' on success, error code otherwise
11682 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
11683 + u16 token, const struct dpseci_congestion_notification_cfg *cfg)
11685 + struct fsl_mc_command cmd = { 0 };
11686 + struct dpseci_cmd_congestion_notification *cmd_params;
11688 + cmd.header = mc_encode_cmd_header(
11689 + DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
11692 + cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
11693 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
11694 + cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
11695 + cmd_params->priority = cfg->dest_cfg.priority;
11696 + dpseci_set_field(cmd_params->options, CGN_DEST_TYPE,
11697 + cfg->dest_cfg.dest_type);
11698 + dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units);
11699 + cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
11700 + cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
11701 + cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
11702 + cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
11704 + return mc_send_command(mc_io, &cmd);
11708 + * dpseci_get_congestion_notification() - Get congestion group notification
11710 + * @mc_io: Pointer to MC portal's I/O object
11711 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11712 + * @token: Token of DPSECI object
11713 + * @cfg: congestion notification configuration
11715 + * Return: '0' on success, error code otherwise
11717 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
11718 + u16 token, struct dpseci_congestion_notification_cfg *cfg)
11720 + struct fsl_mc_command cmd = { 0 };
11721 + struct dpseci_cmd_congestion_notification *rsp_params;
11724 + cmd.header = mc_encode_cmd_header(
11725 + DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
11728 + err = mc_send_command(mc_io, &cmd);
11732 + rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
11733 + cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
11734 + cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
11735 + cfg->dest_cfg.priority = rsp_params->priority;
11736 + cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options,
11738 + cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS);
11739 + cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
11740 + cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
11741 + cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
11742 + cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
11747 +++ b/drivers/crypto/caam/dpseci.h
11750 + * Copyright 2013-2016 Freescale Semiconductor Inc.
11751 + * Copyright 2017 NXP
11753 + * Redistribution and use in source and binary forms, with or without
11754 + * modification, are permitted provided that the following conditions are met:
11755 + * * Redistributions of source code must retain the above copyright
11756 + * notice, this list of conditions and the following disclaimer.
11757 + * * Redistributions in binary form must reproduce the above copyright
11758 + * notice, this list of conditions and the following disclaimer in the
11759 + * documentation and/or other materials provided with the distribution.
11760 + * * Neither the names of the above-listed copyright holders nor the
11761 + * names of any contributors may be used to endorse or promote products
11762 + * derived from this software without specific prior written permission.
11765 + * ALTERNATIVELY, this software may be distributed under the terms of the
11766 + * GNU General Public License ("GPL") as published by the Free Software
11767 + * Foundation, either version 2 of that License or (at your option) any
11770 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
11771 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
11772 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
11773 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
11774 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
11775 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
11776 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
11777 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
11778 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
11779 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
11780 + * POSSIBILITY OF SUCH DAMAGE.
11782 +#ifndef _DPSECI_H_
11783 +#define _DPSECI_H_
11786 + * Data Path SEC Interface API
11787 + * Contains initialization APIs and runtime control APIs for DPSECI
11795 + * General DPSECI macros
11799 + * Maximum number of Tx/Rx queues per DPSECI object
11801 +#define DPSECI_MAX_QUEUE_NUM 16
11804 + * All queues considered; see dpseci_set_rx_queue()
11806 +#define DPSECI_ALL_QUEUES (u8)(-1)
11808 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
11811 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
11814 + * Enable the Congestion Group support
11816 +#define DPSECI_OPT_HAS_CG 0x000020
11819 + * Enable the Order Restoration support
11821 +#define DPSECI_OPT_HAS_OPR 0x000040
11824 + * Order Point Records are shared for the entire DPSECI
11826 +#define DPSECI_OPT_OPR_SHARED 0x000080
11829 + * struct dpseci_cfg - Structure representing DPSECI configuration
11830 + * @options: Any combination of the following options:
11831 + * DPSECI_OPT_HAS_CG
11832 + * DPSECI_OPT_HAS_OPR
11833 + * DPSECI_OPT_OPR_SHARED
11834 + * @num_tx_queues: num of queues towards the SEC
11835 + * @num_rx_queues: num of queues back from the SEC
11836 + * @priorities: Priorities for the SEC hardware processing;
11837 + * each place in the array is the priority of the tx queue
11838 + * towards the SEC;
11839 + * valid priorities are configured with values 1-8;
11841 +struct dpseci_cfg {
11843 + u8 num_tx_queues;
11844 + u8 num_rx_queues;
11845 + u8 priorities[DPSECI_MAX_QUEUE_NUM];
11848 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
11849 + const struct dpseci_cfg *cfg, u32 *obj_id);
11851 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
11854 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
11856 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
11858 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
11861 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
11863 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
11864 + u8 irq_index, u8 *en);
11866 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
11867 + u8 irq_index, u8 en);
11869 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
11870 + u8 irq_index, u32 *mask);
11872 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
11873 + u8 irq_index, u32 mask);
11875 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
11876 + u8 irq_index, u32 *status);
11878 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
11879 + u8 irq_index, u32 status);
11882 + * struct dpseci_attr - Structure representing DPSECI attributes
11883 + * @id: DPSECI object ID
11884 + * @num_tx_queues: number of queues towards the SEC
11885 + * @num_rx_queues: number of queues back from the SEC
11886 + * @options: any combination of the following options:
11887 + * DPSECI_OPT_HAS_CG
11888 + * DPSECI_OPT_HAS_OPR
11889 + * DPSECI_OPT_OPR_SHARED
11891 +struct dpseci_attr {
11893 + u8 num_tx_queues;
11894 + u8 num_rx_queues;
11898 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
11899 + struct dpseci_attr *attr);
11902 + * enum dpseci_dest - DPSECI destination types
11903 + * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
11904 + * and does not generate FQDAN notifications; user is expected to dequeue
11905 + * from the queue based on polling or other user-defined method
11906 + * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
11907 + * notifications to the specified DPIO; user is expected to dequeue from
11908 + * the queue only after notification is received
11909 + * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
11910 + * FQDAN notifications, but is connected to the specified DPCON object;
11911 + * user is expected to dequeue from the DPCON channel
11913 +enum dpseci_dest {
11914 + DPSECI_DEST_NONE = 0,
11915 + DPSECI_DEST_DPIO,
11916 + DPSECI_DEST_DPCON
11920 + * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
11921 + * @dest_type: Destination type
11922 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
11923 + * @priority: Priority selection within the DPIO or DPCON channel; valid values
11924 + * are 0-1 or 0-7, depending on the number of priorities in that channel;
11925 + * not relevant for 'DPSECI_DEST_NONE' option
11927 +struct dpseci_dest_cfg {
11928 + enum dpseci_dest dest_type;
11934 + * DPSECI queue modification options
11938 + * Select to modify the user's context associated with the queue
11940 +#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
11943 + * Select to modify the queue's destination
11945 +#define DPSECI_QUEUE_OPT_DEST 0x00000002
11948 + * Select to modify the queue's order preservation
11950 +#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
11953 + * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
11954 + * @options: Flags representing the suggested modifications to the queue;
11955 + * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
11956 + * @order_preservation_en: order preservation configuration for the rx queue
11957 + * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
11958 + * @user_ctx: User context value provided in the frame descriptor of each
11959 + * dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained
11961 + * @dest_cfg: Queue destination parameters; valid only if
11962 + * 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
11964 +struct dpseci_rx_queue_cfg {
11966 + int order_preservation_en;
11968 + struct dpseci_dest_cfg dest_cfg;
11971 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
11972 + u8 queue, const struct dpseci_rx_queue_cfg *cfg);
11975 + * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
11976 + * @user_ctx: User context value provided in the frame descriptor of each
11978 + * @order_preservation_en: Status of the order preservation configuration on the
11980 + * @dest_cfg: Queue destination configuration
11981 + * @fqid: Virtual FQID value to be used for dequeue operations
11983 +struct dpseci_rx_queue_attr {
11985 + int order_preservation_en;
11986 + struct dpseci_dest_cfg dest_cfg;
11990 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
11991 + u8 queue, struct dpseci_rx_queue_attr *attr);
11994 + * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
11995 + * @fqid: Virtual FQID to be used for sending frames to SEC hardware
11996 + * @priority: SEC hardware processing priority for the queue
11998 +struct dpseci_tx_queue_attr {
12003 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
12004 + u8 queue, struct dpseci_tx_queue_attr *attr);
12007 + * struct dpseci_sec_attr - Structure representing attributes of the SEC
12008 + * hardware accelerator
12009 + * @ip_id: ID for SEC
12010 + * @major_rev: Major revision number for SEC
12011 + * @minor_rev: Minor revision number for SEC
12013 + * @deco_num: The number of copies of the DECO that are implemented in this
12015 + * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this
12017 + * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this
12019 + * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
12020 + * implemented in this version of SEC
12021 + * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
12022 + * implemented in this version of SEC
12023 + * @crc_acc_num: The number of copies of the CRC module that are implemented in
12024 + * this version of SEC
12025 + * @pk_acc_num: The number of copies of the Public Key module that are
12026 + * implemented in this version of SEC
12027 + * @kasumi_acc_num: The number of copies of the Kasumi module that are
12028 + * implemented in this version of SEC
12029 + * @rng_acc_num: The number of copies of the Random Number Generator that are
12030 + * implemented in this version of SEC
12031 + * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
12032 + * implemented in this version of SEC
12033 + * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
12034 + * in this version of SEC
12035 + * @des_acc_num: The number of copies of the DES module that are implemented in
12036 + * this version of SEC
12037 + * @aes_acc_num: The number of copies of the AES module that are implemented in
12038 + * this version of SEC
12039 + * @ccha_acc_num: The number of copies of the ChaCha20 module that are
12040 + * implemented in this version of SEC.
12041 + * @ptha_acc_num: The number of copies of the Poly1305 module that are
12042 + * implemented in this version of SEC.
12044 +struct dpseci_sec_attr {
12050 + u8 zuc_auth_acc_num;
12051 + u8 zuc_enc_acc_num;
12052 + u8 snow_f8_acc_num;
12053 + u8 snow_f9_acc_num;
12056 + u8 kasumi_acc_num;
12066 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
12067 + struct dpseci_sec_attr *attr);
12070 + * struct dpseci_sec_counters - Structure representing global SEC counters and
12071 + * not per dpseci counters
12072 + * @dequeued_requests: Number of Requests Dequeued
12073 + * @ob_enc_requests: Number of Outbound Encrypt Requests
12074 + * @ib_dec_requests: Number of Inbound Decrypt Requests
12075 + * @ob_enc_bytes: Number of Outbound Bytes Encrypted
12076 + * @ob_prot_bytes: Number of Outbound Bytes Protected
12077 + * @ib_dec_bytes: Number of Inbound Bytes Decrypted
12078 + * @ib_valid_bytes: Number of Inbound Bytes Validated
12080 +struct dpseci_sec_counters {
12081 + u64 dequeued_requests;
12082 + u64 ob_enc_requests;
12083 + u64 ib_dec_requests;
12084 + u64 ob_enc_bytes;
12085 + u64 ob_prot_bytes;
12086 + u64 ib_dec_bytes;
12087 + u64 ib_valid_bytes;
12090 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
12091 + struct dpseci_sec_counters *counters);
12093 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
12094 + u16 *major_ver, u16 *minor_ver);
12096 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
12097 + u8 options, struct opr_cfg *cfg);
12099 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
12100 + struct opr_cfg *cfg, struct opr_qry *qry);
12103 + * enum dpseci_congestion_unit - DPSECI congestion units
12104 + * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
12105 + * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
12107 +enum dpseci_congestion_unit {
12108 + DPSECI_CONGESTION_UNIT_BYTES = 0,
12109 + DPSECI_CONGESTION_UNIT_FRAMES
12113 + * CSCN message is written to message_iova once entering a
12114 + * congestion state (see 'threshold_entry')
12116 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001
12119 + * CSCN message is written to message_iova once exiting a
12120 + * congestion state (see 'threshold_exit')
12122 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002
12125 + * CSCN write will attempt to allocate into a cache (coherent write);
12126 + * valid only if 'DPSECI_CGN_MODE_WRITE_MEM_<X>' is selected
12128 +#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004
12131 + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
12132 + * DPIO/DPCON's WQ channel once entering a congestion state
12133 + * (see 'threshold_entry')
12135 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008
12138 + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
12139 + * DPIO/DPCON's WQ channel once exiting a congestion state
12140 + * (see 'threshold_exit')
12142 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010
12145 + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' when the CSCN is written
12146 + * to the sw-portal's DQRR, the DQRI interrupt is asserted immediately
12149 +#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020
12152 + * struct dpseci_congestion_notification_cfg - congestion notification
12154 + * @units: units type
12155 + * @threshold_entry: above this threshold we enter a congestion state.
12156 + * set it to '0' to disable it
12157 + * @threshold_exit: below this threshold we exit the congestion state.
12158 + * @message_ctx: The context that will be part of the CSCN message
12159 + * @message_iova: I/O virtual address (must be in DMA-able memory),
12160 + * must be 16B aligned;
12161 + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
12162 + * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
12165 +struct dpseci_congestion_notification_cfg {
12166 + enum dpseci_congestion_unit units;
12167 + u32 threshold_entry;
12168 + u32 threshold_exit;
12170 + u64 message_iova;
12171 + struct dpseci_dest_cfg dest_cfg;
12172 + u16 notification_mode;
12175 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
12176 + u16 token, const struct dpseci_congestion_notification_cfg *cfg);
12178 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
12179 + u16 token, struct dpseci_congestion_notification_cfg *cfg);
12181 +#endif /* _DPSECI_H_ */
12183 +++ b/drivers/crypto/caam/dpseci_cmd.h
12186 + * Copyright 2013-2016 Freescale Semiconductor Inc.
12187 + * Copyright 2017 NXP
12189 + * Redistribution and use in source and binary forms, with or without
12190 + * modification, are permitted provided that the following conditions are met:
12191 + * * Redistributions of source code must retain the above copyright
12192 + * notice, this list of conditions and the following disclaimer.
12193 + * * Redistributions in binary form must reproduce the above copyright
12194 + * notice, this list of conditions and the following disclaimer in the
12195 + * documentation and/or other materials provided with the distribution.
12196 + * * Neither the names of the above-listed copyright holders nor the
12197 + * names of any contributors may be used to endorse or promote products
12198 + * derived from this software without specific prior written permission.
12201 + * ALTERNATIVELY, this software may be distributed under the terms of the
12202 + * GNU General Public License ("GPL") as published by the Free Software
12203 + * Foundation, either version 2 of that License or (at your option) any
12206 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
12207 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
12208 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
12209 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
12210 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
12211 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
12212 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
12213 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
12214 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
12215 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
12216 + * POSSIBILITY OF SUCH DAMAGE.
12219 +#ifndef _DPSECI_CMD_H_
12220 +#define _DPSECI_CMD_H_
12222 +/* DPSECI Version */
12223 +#define DPSECI_VER_MAJOR 5
12224 +#define DPSECI_VER_MINOR 3
12226 +#define DPSECI_VER(maj, min) (((maj) << 16) | (min))
12227 +#define DPSECI_VERSION DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR)
12229 +/* Command versioning */
12230 +#define DPSECI_CMD_BASE_VERSION 1
12231 +#define DPSECI_CMD_BASE_VERSION_V2 2
12232 +#define DPSECI_CMD_BASE_VERSION_V3 3
12233 +#define DPSECI_CMD_ID_OFFSET 4
12235 +#define DPSECI_CMD_V1(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
12236 + DPSECI_CMD_BASE_VERSION)
12238 +#define DPSECI_CMD_V2(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
12239 + DPSECI_CMD_BASE_VERSION_V2)
12241 +#define DPSECI_CMD_V3(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
12242 + DPSECI_CMD_BASE_VERSION_V3)
12245 +#define DPSECI_CMDID_CLOSE DPSECI_CMD_V1(0x800)
12246 +#define DPSECI_CMDID_OPEN DPSECI_CMD_V1(0x809)
12247 +#define DPSECI_CMDID_CREATE DPSECI_CMD_V3(0x909)
12248 +#define DPSECI_CMDID_DESTROY DPSECI_CMD_V1(0x989)
12249 +#define DPSECI_CMDID_GET_API_VERSION DPSECI_CMD_V1(0xa09)
12251 +#define DPSECI_CMDID_ENABLE DPSECI_CMD_V1(0x002)
12252 +#define DPSECI_CMDID_DISABLE DPSECI_CMD_V1(0x003)
12253 +#define DPSECI_CMDID_GET_ATTR DPSECI_CMD_V1(0x004)
12254 +#define DPSECI_CMDID_RESET DPSECI_CMD_V1(0x005)
12255 +#define DPSECI_CMDID_IS_ENABLED DPSECI_CMD_V1(0x006)
12257 +#define DPSECI_CMDID_SET_IRQ_ENABLE DPSECI_CMD_V1(0x012)
12258 +#define DPSECI_CMDID_GET_IRQ_ENABLE DPSECI_CMD_V1(0x013)
12259 +#define DPSECI_CMDID_SET_IRQ_MASK DPSECI_CMD_V1(0x014)
12260 +#define DPSECI_CMDID_GET_IRQ_MASK DPSECI_CMD_V1(0x015)
12261 +#define DPSECI_CMDID_GET_IRQ_STATUS DPSECI_CMD_V1(0x016)
12262 +#define DPSECI_CMDID_CLEAR_IRQ_STATUS DPSECI_CMD_V1(0x017)
12264 +#define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194)
12265 +#define DPSECI_CMDID_GET_RX_QUEUE DPSECI_CMD_V1(0x196)
12266 +#define DPSECI_CMDID_GET_TX_QUEUE DPSECI_CMD_V1(0x197)
12267 +#define DPSECI_CMDID_GET_SEC_ATTR DPSECI_CMD_V2(0x198)
12268 +#define DPSECI_CMDID_GET_SEC_COUNTERS DPSECI_CMD_V1(0x199)
12269 +#define DPSECI_CMDID_SET_OPR DPSECI_CMD_V1(0x19A)
12270 +#define DPSECI_CMDID_GET_OPR DPSECI_CMD_V1(0x19B)
12271 +#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x170)
12272 +#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x171)
12274 +/* Macros for accessing command fields smaller than 1 byte */
12275 +#define DPSECI_MASK(field) \
12276 + GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \
12277 + DPSECI_##field##_SHIFT)
12279 +#define dpseci_set_field(var, field, val) \
12280 + ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
12282 +#define dpseci_get_field(var, field) \
12283 + (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
12285 +struct dpseci_cmd_open {
12286 + __le32 dpseci_id;
12289 +struct dpseci_cmd_create {
12290 + u8 priorities[8];
12291 + u8 num_tx_queues;
12292 + u8 num_rx_queues;
12296 + u8 priorities2[8];
12299 +struct dpseci_cmd_destroy {
12300 + __le32 object_id;
12303 +#define DPSECI_ENABLE_SHIFT 0
12304 +#define DPSECI_ENABLE_SIZE 1
12306 +struct dpseci_rsp_is_enabled {
12310 +struct dpseci_cmd_irq_enable {
12316 +struct dpseci_rsp_get_irq_enable {
12320 +struct dpseci_cmd_irq_mask {
12325 +struct dpseci_cmd_irq_status {
12330 +struct dpseci_rsp_get_attributes {
12333 + u8 num_tx_queues;
12334 + u8 num_rx_queues;
12339 +#define DPSECI_DEST_TYPE_SHIFT 0
12340 +#define DPSECI_DEST_TYPE_SIZE 4
12342 +#define DPSECI_ORDER_PRESERVATION_SHIFT 0
12343 +#define DPSECI_ORDER_PRESERVATION_SIZE 1
12345 +struct dpseci_cmd_queue {
12356 + u8 order_preservation_en;
12359 +struct dpseci_rsp_get_tx_queue {
12365 +struct dpseci_rsp_get_sec_attr {
12372 + u8 zuc_auth_acc_num;
12373 + u8 zuc_enc_acc_num;
12375 + u8 snow_f8_acc_num;
12376 + u8 snow_f9_acc_num;
12380 + u8 kasumi_acc_num;
12391 +struct dpseci_rsp_get_sec_counters {
12392 + __le64 dequeued_requests;
12393 + __le64 ob_enc_requests;
12394 + __le64 ib_dec_requests;
12395 + __le64 ob_enc_bytes;
12396 + __le64 ob_prot_bytes;
12397 + __le64 ib_dec_bytes;
12398 + __le64 ib_valid_bytes;
12401 +struct dpseci_rsp_get_api_version {
12406 +struct dpseci_cmd_opr {
12418 +#define DPSECI_OPR_RIP_SHIFT 0
12419 +#define DPSECI_OPR_RIP_SIZE 1
12420 +#define DPSECI_OPR_ENABLE_SHIFT 1
12421 +#define DPSECI_OPR_ENABLE_SIZE 1
12422 +#define DPSECI_OPR_TSEQ_NLIS_SHIFT 0
12423 +#define DPSECI_OPR_TSEQ_NLIS_SIZE 1
12424 +#define DPSECI_OPR_HSEQ_NLIS_SHIFT 0
12425 +#define DPSECI_OPR_HSEQ_NLIS_SIZE 1
12427 +struct dpseci_rsp_get_opr {
12455 +#define DPSECI_CGN_DEST_TYPE_SHIFT 0
12456 +#define DPSECI_CGN_DEST_TYPE_SIZE 4
12457 +#define DPSECI_CGN_UNITS_SHIFT 4
12458 +#define DPSECI_CGN_UNITS_SIZE 2
12460 +struct dpseci_cmd_congestion_notification {
12462 + __le16 notification_mode;
12465 + __le64 message_iova;
12466 + __le64 message_ctx;
12467 + __le32 threshold_entry;
12468 + __le32 threshold_exit;
12471 +#endif /* _DPSECI_CMD_H_ */
12472 --- a/drivers/crypto/caam/error.c
12473 +++ b/drivers/crypto/caam/error.c
12474 @@ -108,6 +108,54 @@ static const struct {
12475 { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
12478 +static const struct {
12480 + const char *error_text;
12481 +} qi_error_list[] = {
12482 + { 0x1F, "Job terminated by FQ or ICID flush" },
12483 + { 0x20, "FD format error"},
12484 + { 0x21, "FD command format error"},
12485 + { 0x23, "FL format error"},
12486 + { 0x25, "CRJD specified in FD, but not enabled in FLC"},
12487 + { 0x30, "Max. buffer size too small"},
12488 + { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"},
12489 + { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"},
12490 + { 0x33, "Size over/underflow (allocate mode)"},
12491 + { 0x34, "Size over/underflow (reuse mode)"},
12492 + { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"},
12493 + { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"},
12494 + { 0x41, "SBC frame format not supported (allocate mode)"},
12495 + { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"},
12496 + { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"},
12497 + { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"},
12498 + { 0x45, "DHR correction underflow (reuse mode, single buffer format)"},
12499 + { 0x46, "Annotation length exceeds offset (reuse mode)"},
12500 + { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
12501 + { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
12502 + { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
12503 + { 0x51, "Unsupported IF reuse mode"},
12504 + { 0x52, "Unsupported FL use mode"},
12505 + { 0x53, "Unsupported RJD use mode"},
12506 + { 0x54, "Unsupported inline descriptor use mode"},
12507 + { 0xC0, "Table buffer pool 0 depletion"},
12508 + { 0xC1, "Table buffer pool 1 depletion"},
12509 + { 0xC2, "Data buffer pool 0 depletion, no OF allocated"},
12510 + { 0xC3, "Data buffer pool 1 depletion, no OF allocated"},
12511 + { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"},
12512 + { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"},
12513 + { 0xD0, "FLC read error"},
12514 + { 0xD1, "FL read error"},
12515 + { 0xD2, "FL write error"},
12516 + { 0xD3, "OF SGT write error"},
12517 + { 0xD4, "PTA read error"},
12518 + { 0xD5, "PTA write error"},
12519 + { 0xD6, "OF SGT F-bit write error"},
12520 + { 0xD7, "ASA write error"},
12521 + { 0xE1, "FLC[ICR]=0 ICID error"},
12522 + { 0xE2, "FLC[ICR]=1 ICID error"},
12523 + { 0xE4, "source of ICID flush not trusted (BDI = 0)"},
12526 static const char * const cha_id_list[] = {
12529 @@ -236,6 +284,27 @@ static void report_deco_status(struct de
12530 status, error, idx_str, idx, err_str, err_err_code);
12533 +static void report_qi_status(struct device *qidev, const u32 status,
12534 + const char *error)
12536 + u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
12537 + const char *err_str = "unidentified error value 0x";
12538 + char err_err_code[3] = { 0 };
12541 + for (i = 0; i < ARRAY_SIZE(qi_error_list); i++)
12542 + if (qi_error_list[i].value == err_id)
12545 + if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text)
12546 + err_str = qi_error_list[i].error_text;
12548 + snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
12550 + dev_err(qidev, "%08x: %s: %s%s\n",
12551 + status, error, err_str, err_err_code);
12554 static void report_jr_status(struct device *jrdev, const u32 status,
12557 @@ -250,7 +319,7 @@ static void report_cond_code_status(stru
12558 status, error, __func__);
12561 -void caam_jr_strstatus(struct device *jrdev, u32 status)
12562 +void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
12564 static const struct stat_src {
12565 void (*report_ssed)(struct device *jrdev, const u32 status,
12566 @@ -262,7 +331,7 @@ void caam_jr_strstatus(struct device *jr
12567 { report_ccb_status, "CCB" },
12568 { report_jump_status, "Jump" },
12569 { report_deco_status, "DECO" },
12570 - { NULL, "Queue Manager Interface" },
12571 + { report_qi_status, "Queue Manager Interface" },
12572 { report_jr_status, "Job Ring" },
12573 { report_cond_code_status, "Condition Code" },
12575 @@ -288,4 +357,4 @@ void caam_jr_strstatus(struct device *jr
12577 dev_err(jrdev, "%d: unknown error source\n", ssrc);
12579 -EXPORT_SYMBOL(caam_jr_strstatus);
12580 +EXPORT_SYMBOL(caam_strstatus);
12581 --- a/drivers/crypto/caam/error.h
12582 +++ b/drivers/crypto/caam/error.h
12584 #ifndef CAAM_ERROR_H
12585 #define CAAM_ERROR_H
12586 #define CAAM_ERROR_STR_MAX 302
12587 -void caam_jr_strstatus(struct device *jrdev, u32 status);
12589 +void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
12591 +#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
12592 +#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
12594 void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
12595 int rowsize, int groupsize, struct scatterlist *sg,
12596 --- a/drivers/crypto/caam/intern.h
12597 +++ b/drivers/crypto/caam/intern.h
12598 @@ -84,6 +84,7 @@ struct caam_drv_private {
12599 u8 qi_present; /* Nonzero if QI present in device */
12600 int secvio_irq; /* Security violation interrupt number */
12601 int virt_en; /* Virtualization enabled in CAAM */
12602 + int era; /* CAAM Era (internal HW revision) */
12604 #define RNG4_MAX_HANDLES 2
12606 --- a/drivers/crypto/caam/jr.c
12607 +++ b/drivers/crypto/caam/jr.c
12608 @@ -23,6 +23,14 @@ struct jr_driver_data {
12610 static struct jr_driver_data driver_data;
12612 +static int jr_driver_probed;
12614 +int caam_jr_driver_probed(void)
12616 + return jr_driver_probed;
12618 +EXPORT_SYMBOL(caam_jr_driver_probed);
12620 static int caam_reset_hw_jr(struct device *dev)
12622 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
12623 @@ -119,6 +127,8 @@ static int caam_jr_remove(struct platfor
12624 dev_err(jrdev, "Failed to shut down job ring\n");
12625 irq_dispose_mapping(jrpriv->irq);
12627 + jr_driver_probed--;
12632 @@ -282,6 +292,36 @@ struct device *caam_jr_alloc(void)
12633 EXPORT_SYMBOL(caam_jr_alloc);
12636 + * caam_jridx_alloc() - Alloc a specific job ring based on its index.
12638 + * returns : pointer to the newly allocated physical
12639 + * JobR dev can be written to if successful.
12641 +struct device *caam_jridx_alloc(int idx)
12643 + struct caam_drv_private_jr *jrpriv;
12644 + struct device *dev = ERR_PTR(-ENODEV);
12646 + spin_lock(&driver_data.jr_alloc_lock);
12648 + if (list_empty(&driver_data.jr_list))
12651 + list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
12652 + if (jrpriv->ridx == idx) {
12653 + atomic_inc(&jrpriv->tfm_count);
12654 + dev = jrpriv->dev;
12660 + spin_unlock(&driver_data.jr_alloc_lock);
12663 +EXPORT_SYMBOL(caam_jridx_alloc);
12666 * caam_jr_free() - Free the Job Ring
12667 * @rdev - points to the dev that identifies the Job ring to
12669 @@ -539,6 +579,8 @@ static int caam_jr_probe(struct platform
12671 atomic_set(&jrpriv->tfm_count, 0);
12673 + jr_driver_probed++;
12678 --- a/drivers/crypto/caam/jr.h
12679 +++ b/drivers/crypto/caam/jr.h
12683 /* Prototypes for backend-level services exposed to APIs */
12684 +int caam_jr_driver_probed(void);
12685 struct device *caam_jr_alloc(void);
12686 +struct device *caam_jridx_alloc(int idx);
12687 void caam_jr_free(struct device *rdev);
12688 int caam_jr_enqueue(struct device *dev, u32 *desc,
12689 void (*cbk)(struct device *dev, u32 *desc, u32 status,
12690 --- a/drivers/crypto/caam/key_gen.c
12691 +++ b/drivers/crypto/caam/key_gen.c
12693 #include "desc_constr.h"
12694 #include "key_gen.h"
12697 - * split_key_len - Compute MDHA split key length for a given algorithm
12698 - * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
12699 - * SHA224, SHA384, SHA512.
12701 - * Return: MDHA split key length
12703 -static inline u32 split_key_len(u32 hash)
12705 - /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
12706 - static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
12709 - idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
12711 - return (u32)(mdpadlen[idx] * 2);
12715 - * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
12716 - * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
12717 - * SHA224, SHA384, SHA512.
12719 - * Return: MDHA split key pad length
12721 -static inline u32 split_key_pad_len(u32 hash)
12723 - return ALIGN(split_key_len(hash), 16);
12726 void split_key_done(struct device *dev, u32 *desc, u32 err,
12729 --- a/drivers/crypto/caam/key_gen.h
12730 +++ b/drivers/crypto/caam/key_gen.h
12736 + * split_key_len - Compute MDHA split key length for a given algorithm
12737 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
12738 + * SHA224, SHA384, SHA512.
12740 + * Return: MDHA split key length
12742 +static inline u32 split_key_len(u32 hash)
12744 + /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
12745 + static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
12748 + idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
12750 + return (u32)(mdpadlen[idx] * 2);
12754 + * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
12755 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
12756 + * SHA224, SHA384, SHA512.
12758 + * Return: MDHA split key pad length
12760 +static inline u32 split_key_pad_len(u32 hash)
12762 + return ALIGN(split_key_len(hash), 16);
12765 struct split_key_result {
12766 struct completion completion;
12768 --- a/drivers/crypto/caam/qi.c
12769 +++ b/drivers/crypto/caam/qi.c
12772 #include <linux/cpumask.h>
12773 #include <linux/kthread.h>
12774 -#include <soc/fsl/qman.h>
12775 +#include <linux/fsl_qman.h>
12779 @@ -105,23 +105,21 @@ static struct kmem_cache *qi_cache;
12780 int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
12785 int num_retries = 0;
12787 - qm_fd_clear_fd(&fd);
12788 - qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1]));
12790 - addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
12792 + fd.format = qm_fd_compound;
12793 + fd.cong_weight = caam32_to_cpu(req->fd_sgt[1].length);
12794 + fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
12795 DMA_BIDIRECTIONAL);
12796 - if (dma_mapping_error(qidev, addr)) {
12797 + if (dma_mapping_error(qidev, fd.addr)) {
12798 dev_err(qidev, "DMA mapping error for QI enqueue request\n");
12801 - qm_fd_addr_set64(&fd, addr);
12804 - ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
12805 + ret = qman_enqueue(req->drv_ctx->req_fq, &fd, 0);
12809 @@ -137,7 +135,7 @@ int caam_qi_enqueue(struct device *qidev
12810 EXPORT_SYMBOL(caam_qi_enqueue);
12812 static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
12813 - const union qm_mr_entry *msg)
12814 + const struct qm_mr_entry *msg)
12816 const struct qm_fd *fd;
12817 struct caam_drv_req *drv_req;
12818 @@ -145,7 +143,7 @@ static void caam_fq_ern_cb(struct qman_p
12822 - if (qm_fd_get_format(fd) != qm_fd_compound) {
12823 + if (fd->format != qm_fd_compound) {
12824 dev_err(qidev, "Non-compound FD from CAAM\n");
12827 @@ -180,20 +178,22 @@ static struct qman_fq *create_caam_req_f
12828 req_fq->cb.fqs = NULL;
12830 ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
12831 - QMAN_FQ_FLAG_TO_DCPORTAL, req_fq);
12832 + QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_LOCKED,
12835 dev_err(qidev, "Failed to create session req FQ\n");
12836 goto create_req_fq_fail;
12839 - memset(&opts, 0, sizeof(opts));
12840 - opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
12841 - QM_INITFQ_WE_CONTEXTB |
12842 - QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
12843 - opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
12844 - qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2);
12845 - opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq));
12846 - qm_fqd_context_a_set64(&opts.fqd, hwdesc);
12847 + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
12848 + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
12849 + QM_INITFQ_WE_CGID;
12850 + opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE;
12851 + opts.fqd.dest.channel = qm_channel_caam;
12852 + opts.fqd.dest.wq = 2;
12853 + opts.fqd.context_b = qman_fq_fqid(rsp_fq);
12854 + opts.fqd.context_a.hi = upper_32_bits(hwdesc);
12855 + opts.fqd.context_a.lo = lower_32_bits(hwdesc);
12856 opts.fqd.cgid = qipriv.cgr.cgrid;
12858 ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
12859 @@ -207,7 +207,7 @@ static struct qman_fq *create_caam_req_f
12863 - qman_destroy_fq(req_fq);
12864 + qman_destroy_fq(req_fq, 0);
12865 create_req_fq_fail:
12867 return ERR_PTR(ret);
12868 @@ -275,7 +275,7 @@ empty_fq:
12870 dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
12872 - qman_destroy_fq(fq);
12873 + qman_destroy_fq(fq, 0);
12877 @@ -292,7 +292,7 @@ static int empty_caam_fq(struct qman_fq
12881 - if (!qm_mcr_np_get(&np, frm_cnt))
12886 @@ -572,22 +572,27 @@ static enum qman_cb_dqrr_result caam_rsp
12887 struct caam_drv_req *drv_req;
12888 const struct qm_fd *fd;
12889 struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
12892 if (caam_qi_napi_schedule(p, caam_napi))
12893 return qman_cb_dqrr_stop;
12896 - status = be32_to_cpu(fd->status);
12897 - if (unlikely(status))
12898 - dev_err(qidev, "Error: %#x in CAAM response FD\n", status);
12899 + if (unlikely(fd->status)) {
12900 + u32 ssrc = fd->status & JRSTA_SSRC_MASK;
12901 + u8 err_id = fd->status & JRSTA_CCBERR_ERRID_MASK;
12903 + if (ssrc != JRSTA_SSRC_CCB_ERROR ||
12904 + err_id != JRSTA_CCBERR_ERRID_ICVCHK)
12905 + dev_err(qidev, "Error: %#x in CAAM response FD\n",
12909 - if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
12910 + if (unlikely(fd->format != qm_fd_compound)) {
12911 dev_err(qidev, "Non-compound FD from CAAM\n");
12912 return qman_cb_dqrr_consume;
12915 - drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
12916 + drv_req = (struct caam_drv_req *)phys_to_virt(fd->addr);
12917 if (unlikely(!drv_req)) {
12919 "Can't find original request for caam response\n");
12920 @@ -597,7 +602,7 @@ static enum qman_cb_dqrr_result caam_rsp
12921 dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
12922 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
12924 - drv_req->cbk(drv_req, status);
12925 + drv_req->cbk(drv_req, fd->status);
12926 return qman_cb_dqrr_consume;
12929 @@ -621,17 +626,18 @@ static int alloc_rsp_fq_cpu(struct devic
12933 - memset(&opts, 0, sizeof(opts));
12934 - opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
12935 - QM_INITFQ_WE_CONTEXTB |
12936 - QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
12937 - opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING |
12938 - QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
12939 - qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3);
12940 + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
12941 + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
12942 + QM_INITFQ_WE_CGID;
12943 + opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING | QM_FQCTRL_CPCSTASH |
12945 + opts.fqd.dest.channel = qman_affine_channel(cpu);
12946 + opts.fqd.dest.wq = 3;
12947 opts.fqd.cgid = qipriv.cgr.cgrid;
12948 opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
12949 QM_STASHING_EXCL_DATA;
12950 - qm_fqd_set_stashing(&opts.fqd, 0, 1, 1);
12951 + opts.fqd.context_a.stashing.data_cl = 1;
12952 + opts.fqd.context_a.stashing.context_cl = 1;
12954 ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
12956 @@ -662,8 +668,7 @@ static int init_cgr(struct device *qidev
12958 qipriv.cgr.cb = cgr_cb;
12959 memset(&opts, 0, sizeof(opts));
12960 - opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES |
12962 + opts.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | QM_CGR_WE_MODE;
12963 opts.cgr.cscn_en = QM_CGR_EN;
12964 opts.cgr.mode = QMAN_CGR_MODE_FRAME;
12965 qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
12966 --- a/drivers/crypto/caam/qi.h
12967 +++ b/drivers/crypto/caam/qi.h
12972 -#include <soc/fsl/qman.h>
12973 +#include <linux/fsl_qman.h>
12974 #include "compat.h"
12976 #include "desc_constr.h"
12977 --- a/drivers/crypto/caam/regs.h
12978 +++ b/drivers/crypto/caam/regs.h
12979 @@ -627,6 +627,8 @@ struct caam_job_ring {
12980 #define JRSTA_DECOERR_INVSIGN 0x86
12981 #define JRSTA_DECOERR_DSASIGN 0x87
12983 +#define JRSTA_QIERR_ERROR_MASK 0x00ff
12985 #define JRSTA_CCBERR_JUMP 0x08000000
12986 #define JRSTA_CCBERR_INDEX_MASK 0xff00
12987 #define JRSTA_CCBERR_INDEX_SHIFT 8
12988 --- a/drivers/crypto/caam/sg_sw_qm.h
12989 +++ b/drivers/crypto/caam/sg_sw_qm.h
12990 @@ -34,46 +34,61 @@
12991 #ifndef __SG_SW_QM_H
12992 #define __SG_SW_QM_H
12994 -#include <soc/fsl/qman.h>
12995 +#include <linux/fsl_qman.h>
12998 +static inline void cpu_to_hw_sg(struct qm_sg_entry *qm_sg_ptr)
13000 + dma_addr_t addr = qm_sg_ptr->opaque;
13002 + qm_sg_ptr->opaque = cpu_to_caam64(addr);
13003 + qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
13006 static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
13008 + u32 len, u16 offset)
13010 - qm_sg_entry_set64(qm_sg_ptr, dma);
13011 + qm_sg_ptr->addr = dma;
13012 + qm_sg_ptr->length = len;
13013 qm_sg_ptr->__reserved2 = 0;
13014 qm_sg_ptr->bpid = 0;
13015 - qm_sg_ptr->offset = cpu_to_be16(offset & QM_SG_OFF_MASK);
13016 + qm_sg_ptr->__reserved3 = 0;
13017 + qm_sg_ptr->offset = offset & QM_SG_OFFSET_MASK;
13019 + cpu_to_hw_sg(qm_sg_ptr);
13022 static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
13023 dma_addr_t dma, u32 len, u16 offset)
13025 - __dma_to_qm_sg(qm_sg_ptr, dma, offset);
13026 - qm_sg_entry_set_len(qm_sg_ptr, len);
13027 + qm_sg_ptr->extension = 0;
13028 + qm_sg_ptr->final = 0;
13029 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
13032 static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
13033 dma_addr_t dma, u32 len, u16 offset)
13035 - __dma_to_qm_sg(qm_sg_ptr, dma, offset);
13036 - qm_sg_entry_set_f(qm_sg_ptr, len);
13037 + qm_sg_ptr->extension = 0;
13038 + qm_sg_ptr->final = 1;
13039 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
13042 static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
13043 dma_addr_t dma, u32 len, u16 offset)
13045 - __dma_to_qm_sg(qm_sg_ptr, dma, offset);
13046 - qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | (len & QM_SG_LEN_MASK));
13047 + qm_sg_ptr->extension = 1;
13048 + qm_sg_ptr->final = 0;
13049 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
13052 static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
13053 dma_addr_t dma, u32 len,
13056 - __dma_to_qm_sg(qm_sg_ptr, dma, offset);
13057 - qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | QM_SG_FIN |
13058 - (len & QM_SG_LEN_MASK));
13059 + qm_sg_ptr->extension = 1;
13060 + qm_sg_ptr->final = 1;
13061 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
13065 @@ -102,7 +117,10 @@ static inline void sg_to_qm_sg_last(stru
13066 struct qm_sg_entry *qm_sg_ptr, u16 offset)
13068 qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
13069 - qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
13071 + qm_sg_ptr->sgt_efl = caam32_to_cpu(qm_sg_ptr->sgt_efl);
13072 + qm_sg_ptr->final = 1;
13073 + qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
13076 #endif /* __SG_SW_QM_H */
13077 --- a/drivers/crypto/talitos.c
13078 +++ b/drivers/crypto/talitos.c
13079 @@ -1241,6 +1241,14 @@ static int ipsec_esp(struct talitos_edes
13080 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
13081 sg_count, areq->assoclen, tbl_off, elen);
13084 + * In case of SEC 2.x+, cipher in len must include only the ciphertext,
13085 + * while extent is used for ICV len.
13087 + if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
13088 + (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
13089 + desc->ptr[4].len = cpu_to_be16(cryptlen);
13093 sync_needed = true;