kernel: bump 4.14 to 4.14.125 (FS#2305 FS#2297)
[oweals/openwrt.git] / target / linux / layerscape / patches-4.14 / 820-sec-support-layerscape.patch
1 From ba8e92b322a3763880fdc4d19e9c7085f5504be7 Mon Sep 17 00:00:00 2001
2 From: Biwen Li <biwen.li@nxp.com>
3 Date: Tue, 23 Apr 2019 17:41:43 +0800
4 Subject: [PATCH] sec: support layerscape
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 This is an integrated patch of sec for layerscape
10
11 Signed-off-by: Alex Porosanu <alexandru.porosanu@nxp.com>
12 Signed-off-by: Arnd Bergmann <arnd@arndb.de>
13 Signed-off-by: Biwen Li <biwen.li@nxp.com>
14 Signed-off-by: Carmen Iorga <carmen.iorga@nxp.com>
15 Signed-off-by: Cristian Stoica <cristian.stoica@nxp.com>
16 Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
17 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
18 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
19 Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
20 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
21 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
22 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
23 Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
24 ---
25  crypto/Kconfig                      |   20 +
26  crypto/Makefile                     |    1 +
27  crypto/chacha20poly1305.c           |    2 -
28  crypto/tcrypt.c                     |   27 +-
29  crypto/testmgr.c                    |  244 ++
30  crypto/testmgr.h                    |  219 ++
31  crypto/tls.c                        |  607 ++++
32  drivers/crypto/Makefile             |    2 +-
33  drivers/crypto/caam/Kconfig         |   85 +-
34  drivers/crypto/caam/Makefile        |   26 +-
35  drivers/crypto/caam/caamalg.c       |  468 +++-
36  drivers/crypto/caam/caamalg_desc.c  |  903 +++++-
37  drivers/crypto/caam/caamalg_desc.h  |   52 +-
38  drivers/crypto/caam/caamalg_qi.c    | 1060 ++++++-
39  drivers/crypto/caam/caamalg_qi2.c   | 5843 +++++++++++++++++++++++++++++++++++
40  drivers/crypto/caam/caamalg_qi2.h   |  276 ++
41  drivers/crypto/caam/caamhash.c      |  192 +-
42  drivers/crypto/caam/caamhash_desc.c |  108 +
43  drivers/crypto/caam/caamhash_desc.h |   49 +
44  drivers/crypto/caam/caampkc.c       |   52 +-
45  drivers/crypto/caam/caamrng.c       |   52 +-
46  drivers/crypto/caam/compat.h        |    4 +
47  drivers/crypto/caam/ctrl.c          |  194 +-
48  drivers/crypto/caam/desc.h          |   89 +-
49  drivers/crypto/caam/desc_constr.h   |   59 +-
50  drivers/crypto/caam/dpseci.c        |  865 ++++++
51  drivers/crypto/caam/dpseci.h        |  433 +++
52  drivers/crypto/caam/dpseci_cmd.h    |  287 ++
53  drivers/crypto/caam/error.c         |   81 +-
54  drivers/crypto/caam/error.h         |    6 +-
55  drivers/crypto/caam/intern.h        |  102 +-
56  drivers/crypto/caam/jr.c            |   84 +
57  drivers/crypto/caam/jr.h            |    2 +
58  drivers/crypto/caam/key_gen.c       |   30 -
59  drivers/crypto/caam/key_gen.h       |   30 +
60  drivers/crypto/caam/qi.c            |  134 +-
61  drivers/crypto/caam/qi.h            |    2 +-
62  drivers/crypto/caam/regs.h          |   76 +-
63  drivers/crypto/caam/sg_sw_qm.h      |   46 +-
64  drivers/crypto/talitos.c            |    8 +
65  include/crypto/chacha20.h           |    1 +
66  41 files changed, 12088 insertions(+), 733 deletions(-)
67  create mode 100644 crypto/tls.c
68  create mode 100644 drivers/crypto/caam/caamalg_qi2.c
69  create mode 100644 drivers/crypto/caam/caamalg_qi2.h
70  create mode 100644 drivers/crypto/caam/caamhash_desc.c
71  create mode 100644 drivers/crypto/caam/caamhash_desc.h
72  create mode 100644 drivers/crypto/caam/dpseci.c
73  create mode 100644 drivers/crypto/caam/dpseci.h
74  create mode 100644 drivers/crypto/caam/dpseci_cmd.h
75
76 --- a/crypto/Kconfig
77 +++ b/crypto/Kconfig
78 @@ -312,6 +312,26 @@ config CRYPTO_ECHAINIV
79           a sequence number xored with a salt.  This is the default
80           algorithm for CBC.
81  
82 +config CRYPTO_TLS
83 +       tristate "TLS support"
84 +       select CRYPTO_AEAD
85 +       select CRYPTO_BLKCIPHER
86 +       select CRYPTO_MANAGER
87 +       select CRYPTO_HASH
88 +       select CRYPTO_NULL
89 +       select CRYPTO_AUTHENC
90 +       help
91 +         Support for TLS 1.0 record encryption and decryption
92 +
93 +         This module adds support for encryption/decryption of TLS 1.0 frames
94 +         using blockcipher algorithms. The name of the resulting algorithm is
95 +         "tls10(hmac(<digest>),cbc(<cipher>))". By default, the generic base
96 +         algorithms are used (e.g. aes-generic, sha1-generic), but hardware
97 +         accelerated versions will be used automatically if available.
98 +
99 +         User-space applications (OpenSSL, GnuTLS) can offload TLS 1.0
100 +         operations through AF_ALG or cryptodev interfaces
101 +
102  comment "Block modes"
103  
104  config CRYPTO_CBC
105 --- a/crypto/Makefile
106 +++ b/crypto/Makefile
107 @@ -118,6 +118,7 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_ge
108  obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
109  obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
110  obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
111 +obj-$(CONFIG_CRYPTO_TLS) += tls.o
112  obj-$(CONFIG_CRYPTO_LZO) += lzo.o
113  obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
114  obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
115 --- a/crypto/chacha20poly1305.c
116 +++ b/crypto/chacha20poly1305.c
117 @@ -22,8 +22,6 @@
118  
119  #include "internal.h"
120  
121 -#define CHACHAPOLY_IV_SIZE     12
122 -
123  struct chachapoly_instance_ctx {
124         struct crypto_skcipher_spawn chacha;
125         struct crypto_ahash_spawn poly;
126 --- a/crypto/tcrypt.c
127 +++ b/crypto/tcrypt.c
128 @@ -76,7 +76,7 @@ static char *check[] = {
129         "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta",  "fcrypt",
130         "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
131         "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
132 -       NULL
133 +       "rsa", NULL
134  };
135  
136  struct tcrypt_result {
137 @@ -355,11 +355,13 @@ static void test_aead_speed(const char *
138                                                iv);
139                         aead_request_set_ad(req, aad_size);
140  
141 -                       if (secs)
142 +                       if (secs) {
143                                 ret = test_aead_jiffies(req, enc, *b_size,
144                                                         secs);
145 -                       else
146 +                               cond_resched();
147 +                       } else {
148                                 ret = test_aead_cycles(req, enc, *b_size);
149 +                       }
150  
151                         if (ret) {
152                                 pr_err("%s() failed return code=%d\n", e, ret);
153 @@ -736,12 +738,14 @@ static void test_ahash_speed_common(cons
154  
155                 ahash_request_set_crypt(req, sg, output, speed[i].plen);
156  
157 -               if (secs)
158 +               if (secs) {
159                         ret = test_ahash_jiffies(req, speed[i].blen,
160                                                  speed[i].plen, output, secs);
161 -               else
162 +                       cond_resched();
163 +               } else {
164                         ret = test_ahash_cycles(req, speed[i].blen,
165                                                 speed[i].plen, output);
166 +               }
167  
168                 if (ret) {
169                         pr_err("hashing failed ret=%d\n", ret);
170 @@ -959,12 +963,14 @@ static void test_skcipher_speed(const ch
171  
172                         skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
173  
174 -                       if (secs)
175 +                       if (secs) {
176                                 ret = test_acipher_jiffies(req, enc,
177                                                            *b_size, secs);
178 -                       else
179 +                               cond_resched();
180 +                       } else {
181                                 ret = test_acipher_cycles(req, enc,
182                                                           *b_size);
183 +                       }
184  
185                         if (ret) {
186                                 pr_err("%s() failed flags=%x\n", e,
187 @@ -1336,6 +1342,10 @@ static int do_test(const char *alg, u32
188                 ret += tcrypt_test("hmac(sha3-512)");
189                 break;
190  
191 +       case 115:
192 +               ret += tcrypt_test("rsa");
193 +               break;
194 +
195         case 150:
196                 ret += tcrypt_test("ansi_cprng");
197                 break;
198 @@ -1397,6 +1407,9 @@ static int do_test(const char *alg, u32
199         case 190:
200                 ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
201                 break;
202 +       case 191:
203 +               ret += tcrypt_test("tls10(hmac(sha1),cbc(aes))");
204 +               break;
205         case 200:
206                 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
207                                 speed_template_16_24_32);
208 --- a/crypto/testmgr.c
209 +++ b/crypto/testmgr.c
210 @@ -117,6 +117,13 @@ struct drbg_test_suite {
211         unsigned int count;
212  };
213  
214 +struct tls_test_suite {
215 +       struct {
216 +               struct tls_testvec *vecs;
217 +               unsigned int count;
218 +       } enc, dec;
219 +};
220 +
221  struct akcipher_test_suite {
222         const struct akcipher_testvec *vecs;
223         unsigned int count;
224 @@ -140,6 +147,7 @@ struct alg_test_desc {
225                 struct hash_test_suite hash;
226                 struct cprng_test_suite cprng;
227                 struct drbg_test_suite drbg;
228 +               struct tls_test_suite tls;
229                 struct akcipher_test_suite akcipher;
230                 struct kpp_test_suite kpp;
231         } suite;
232 @@ -991,6 +999,233 @@ static int test_aead(struct crypto_aead
233         return 0;
234  }
235  
236 +static int __test_tls(struct crypto_aead *tfm, int enc,
237 +                     struct tls_testvec *template, unsigned int tcount,
238 +                     const bool diff_dst)
239 +{
240 +       const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
241 +       unsigned int i, k, authsize;
242 +       char *q;
243 +       struct aead_request *req;
244 +       struct scatterlist *sg;
245 +       struct scatterlist *sgout;
246 +       const char *e, *d;
247 +       struct tcrypt_result result;
248 +       void *input;
249 +       void *output;
250 +       void *assoc;
251 +       char *iv;
252 +       char *key;
253 +       char *xbuf[XBUFSIZE];
254 +       char *xoutbuf[XBUFSIZE];
255 +       char *axbuf[XBUFSIZE];
256 +       int ret = -ENOMEM;
257 +
258 +       if (testmgr_alloc_buf(xbuf))
259 +               goto out_noxbuf;
260 +
261 +       if (diff_dst && testmgr_alloc_buf(xoutbuf))
262 +               goto out_nooutbuf;
263 +
264 +       if (testmgr_alloc_buf(axbuf))
265 +               goto out_noaxbuf;
266 +
267 +       iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
268 +       if (!iv)
269 +               goto out_noiv;
270 +
271 +       key = kzalloc(MAX_KEYLEN, GFP_KERNEL);
272 +       if (!key)
273 +               goto out_nokey;
274 +
275 +       sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 2 : 1), GFP_KERNEL);
276 +       if (!sg)
277 +               goto out_nosg;
278 +
279 +       sgout = sg + 8;
280 +
281 +       d = diff_dst ? "-ddst" : "";
282 +       e = enc ? "encryption" : "decryption";
283 +
284 +       init_completion(&result.completion);
285 +
286 +       req = aead_request_alloc(tfm, GFP_KERNEL);
287 +       if (!req) {
288 +               pr_err("alg: tls%s: Failed to allocate request for %s\n",
289 +                      d, algo);
290 +               goto out;
291 +       }
292 +
293 +       aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
294 +                                 tcrypt_complete, &result);
295 +
296 +       for (i = 0; i < tcount; i++) {
297 +               input = xbuf[0];
298 +               assoc = axbuf[0];
299 +
300 +               ret = -EINVAL;
301 +               if (WARN_ON(template[i].ilen > PAGE_SIZE ||
302 +                           template[i].alen > PAGE_SIZE))
303 +                       goto out;
304 +
305 +               memcpy(assoc, template[i].assoc, template[i].alen);
306 +               memcpy(input, template[i].input, template[i].ilen);
307 +
308 +               if (template[i].iv)
309 +                       memcpy(iv, template[i].iv, MAX_IVLEN);
310 +               else
311 +                       memset(iv, 0, MAX_IVLEN);
312 +
313 +               crypto_aead_clear_flags(tfm, ~0);
314 +
315 +               if (template[i].klen > MAX_KEYLEN) {
316 +                       pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
317 +                              d, i, algo, template[i].klen, MAX_KEYLEN);
318 +                       ret = -EINVAL;
319 +                       goto out;
320 +               }
321 +               memcpy(key, template[i].key, template[i].klen);
322 +
323 +               ret = crypto_aead_setkey(tfm, key, template[i].klen);
324 +               if (!ret == template[i].fail) {
325 +                       pr_err("alg: tls%s: setkey failed on test %d for %s: flags=%x\n",
326 +                              d, i, algo, crypto_aead_get_flags(tfm));
327 +                       goto out;
328 +               } else if (ret)
329 +                       continue;
330 +
331 +               authsize = 20;
332 +               ret = crypto_aead_setauthsize(tfm, authsize);
333 +               if (ret) {
334 +                       pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n",
335 +                              d, authsize, i, algo);
336 +                       goto out;
337 +               }
338 +
339 +               k = !!template[i].alen;
340 +               sg_init_table(sg, k + 1);
341 +               sg_set_buf(&sg[0], assoc, template[i].alen);
342 +               sg_set_buf(&sg[k], input, (enc ? template[i].rlen :
343 +                                          template[i].ilen));
344 +               output = input;
345 +
346 +               if (diff_dst) {
347 +                       sg_init_table(sgout, k + 1);
348 +                       sg_set_buf(&sgout[0], assoc, template[i].alen);
349 +
350 +                       output = xoutbuf[0];
351 +                       sg_set_buf(&sgout[k], output,
352 +                                  (enc ? template[i].rlen : template[i].ilen));
353 +               }
354 +
355 +               aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
356 +                                      template[i].ilen, iv);
357 +
358 +               aead_request_set_ad(req, template[i].alen);
359 +
360 +               ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
361 +
362 +               switch (ret) {
363 +               case 0:
364 +                       if (template[i].novrfy) {
365 +                               /* verification was supposed to fail */
366 +                               pr_err("alg: tls%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
367 +                                      d, e, i, algo);
368 +                               /* so really, we got a bad message */
369 +                               ret = -EBADMSG;
370 +                               goto out;
371 +                       }
372 +                       break;
373 +               case -EINPROGRESS:
374 +               case -EBUSY:
375 +                       wait_for_completion(&result.completion);
376 +                       reinit_completion(&result.completion);
377 +                       ret = result.err;
378 +                       if (!ret)
379 +                               break;
380 +               case -EBADMSG:
381 +                       /* verification failure was expected */
382 +                       if (template[i].novrfy)
383 +                               continue;
384 +                       /* fall through */
385 +               default:
386 +                       pr_err("alg: tls%s: %s failed on test %d for %s: ret=%d\n",
387 +                              d, e, i, algo, -ret);
388 +                       goto out;
389 +               }
390 +
391 +               q = output;
392 +               if (memcmp(q, template[i].result, template[i].rlen)) {
393 +                       pr_err("alg: tls%s: Test %d failed on %s for %s\n",
394 +                              d, i, e, algo);
395 +                       hexdump(q, template[i].rlen);
396 +                       pr_err("should be:\n");
397 +                       hexdump(template[i].result, template[i].rlen);
398 +                       ret = -EINVAL;
399 +                       goto out;
400 +               }
401 +       }
402 +
403 +out:
404 +       aead_request_free(req);
405 +
406 +       kfree(sg);
407 +out_nosg:
408 +       kfree(key);
409 +out_nokey:
410 +       kfree(iv);
411 +out_noiv:
412 +       testmgr_free_buf(axbuf);
413 +out_noaxbuf:
414 +       if (diff_dst)
415 +               testmgr_free_buf(xoutbuf);
416 +out_nooutbuf:
417 +       testmgr_free_buf(xbuf);
418 +out_noxbuf:
419 +       return ret;
420 +}
421 +
422 +static int test_tls(struct crypto_aead *tfm, int enc,
423 +                   struct tls_testvec *template, unsigned int tcount)
424 +{
425 +       int ret;
426 +       /* test 'dst == src' case */
427 +       ret = __test_tls(tfm, enc, template, tcount, false);
428 +       if (ret)
429 +               return ret;
430 +       /* test 'dst != src' case */
431 +       return __test_tls(tfm, enc, template, tcount, true);
432 +}
433 +
434 +static int alg_test_tls(const struct alg_test_desc *desc, const char *driver,
435 +                       u32 type, u32 mask)
436 +{
437 +       struct crypto_aead *tfm;
438 +       int err = 0;
439 +
440 +       tfm = crypto_alloc_aead(driver, type, mask);
441 +       if (IS_ERR(tfm)) {
442 +               pr_err("alg: aead: Failed to load transform for %s: %ld\n",
443 +                      driver, PTR_ERR(tfm));
444 +               return PTR_ERR(tfm);
445 +       }
446 +
447 +       if (desc->suite.tls.enc.vecs) {
448 +               err = test_tls(tfm, ENCRYPT, desc->suite.tls.enc.vecs,
449 +                              desc->suite.tls.enc.count);
450 +               if (err)
451 +                       goto out;
452 +       }
453 +
454 +       if (!err && desc->suite.tls.dec.vecs)
455 +               err = test_tls(tfm, DECRYPT, desc->suite.tls.dec.vecs,
456 +                              desc->suite.tls.dec.count);
457 +
458 +out:
459 +       crypto_free_aead(tfm);
460 +       return err;
461 +}
462 +
463  static int test_cipher(struct crypto_cipher *tfm, int enc,
464                        const struct cipher_testvec *template,
465                        unsigned int tcount)
466 @@ -3524,6 +3759,15 @@ static const struct alg_test_desc alg_te
467                         .hash = __VECS(tgr192_tv_template)
468                 }
469         }, {
470 +               .alg = "tls10(hmac(sha1),cbc(aes))",
471 +               .test = alg_test_tls,
472 +               .suite = {
473 +                       .tls = {
474 +                               .enc = __VECS(tls_enc_tv_template),
475 +                               .dec = __VECS(tls_dec_tv_template)
476 +                       }
477 +               }
478 +       }, {
479                 .alg = "vmac(aes)",
480                 .test = alg_test_hash,
481                 .suite = {
482 --- a/crypto/testmgr.h
483 +++ b/crypto/testmgr.h
484 @@ -125,6 +125,20 @@ struct drbg_testvec {
485         size_t expectedlen;
486  };
487  
488 +struct tls_testvec {
489 +       char *key;      /* wrapped keys for encryption and authentication */
490 +       char *iv;       /* initialization vector */
491 +       char *input;    /* input data */
492 +       char *assoc;    /* associated data: seq num, type, version, input len */
493 +       char *result;   /* result data */
494 +       unsigned char fail;     /* the test failure is expected */
495 +       unsigned char novrfy;   /* dec verification failure expected */
496 +       unsigned char klen;     /* key length */
497 +       unsigned short ilen;    /* input data length */
498 +       unsigned short alen;    /* associated data length */
499 +       unsigned short rlen;    /* result length */
500 +};
501 +
502  struct akcipher_testvec {
503         const unsigned char *key;
504         const unsigned char *m;
505 @@ -153,6 +167,211 @@ struct kpp_testvec {
506  static const char zeroed_string[48];
507  
508  /*
509 + * TLS1.0 synthetic test vectors
510 + */
511 +static struct tls_testvec tls_enc_tv_template[] = {
512 +       {
513 +#ifdef __LITTLE_ENDIAN
514 +               .key    = "\x08\x00"            /* rta length */
515 +                       "\x01\x00"              /* rta type */
516 +#else
517 +               .key    = "\x00\x08"            /* rta length */
518 +                       "\x00\x01"              /* rta type */
519 +#endif
520 +                       "\x00\x00\x00\x10"      /* enc key length */
521 +                       "authenticationkey20benckeyis16_bytes",
522 +               .klen   = 8 + 20 + 16,
523 +               .iv     = "iv0123456789abcd",
524 +               .input  = "Single block msg",
525 +               .ilen   = 16,
526 +               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
527 +                       "\x00\x03\x01\x00\x10",
528 +               .alen   = 13,
529 +               .result = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
530 +                       "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
531 +                       "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
532 +                       "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
533 +                       "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
534 +                       "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
535 +               .rlen   = 16 + 20 + 12,
536 +       }, {
537 +#ifdef __LITTLE_ENDIAN
538 +               .key    = "\x08\x00"            /* rta length */
539 +                       "\x01\x00"              /* rta type */
540 +#else
541 +               .key    = "\x00\x08"            /* rta length */
542 +                       "\x00\x01"              /* rta type */
543 +#endif
544 +                       "\x00\x00\x00\x10"      /* enc key length */
545 +                       "authenticationkey20benckeyis16_bytes",
546 +               .klen   = 8 + 20 + 16,
547 +               .iv     = "iv0123456789abcd",
548 +               .input  = "",
549 +               .ilen   = 0,
550 +               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
551 +                       "\x00\x03\x01\x00\x00",
552 +               .alen   = 13,
553 +               .result = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
554 +                       "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
555 +                       "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
556 +                       "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
557 +               .rlen   = 20 + 12,
558 +       }, {
559 +#ifdef __LITTLE_ENDIAN
560 +               .key    = "\x08\x00"            /* rta length */
561 +                       "\x01\x00"              /* rta type */
562 +#else
563 +               .key    = "\x00\x08"            /* rta length */
564 +                       "\x00\x01"              /* rta type */
565 +#endif
566 +                       "\x00\x00\x00\x10"      /* enc key length */
567 +                       "authenticationkey20benckeyis16_bytes",
568 +               .klen   = 8 + 20 + 16,
569 +               .iv     = "iv0123456789abcd",
570 +               .input  = "285 bytes plaintext285 bytes plaintext285 bytes"
571 +                       " plaintext285 bytes plaintext285 bytes plaintext285"
572 +                       " bytes plaintext285 bytes plaintext285 bytes"
573 +                       " plaintext285 bytes plaintext285 bytes plaintext285"
574 +                       " bytes plaintext285 bytes plaintext285 bytes"
575 +                       " plaintext285 bytes plaintext285 bytes plaintext285"
576 +                       " bytes plaintext285 bytes plaintext",
577 +               .ilen   = 285,
578 +               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
579 +                       "\x00\x03\x01\x01\x1d",
580 +               .alen   = 13,
581 +               .result = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
582 +                       "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
583 +                       "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
584 +                       "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
585 +                       "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
586 +                       "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
587 +                       "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
588 +                       "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
589 +                       "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
590 +                       "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
591 +                       "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
592 +                       "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
593 +                       "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
594 +                       "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
595 +                       "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
596 +                       "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
597 +                       "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
598 +                       "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
599 +                       "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
600 +                       "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
601 +                       "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
602 +                       "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
603 +                       "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
604 +                       "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
605 +                       "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
606 +                       "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
607 +                       "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
608 +               .rlen   = 285 + 20 + 15,
609 +       }
610 +};
611 +
612 +static struct tls_testvec tls_dec_tv_template[] = {
613 +       {
614 +#ifdef __LITTLE_ENDIAN
615 +               .key    = "\x08\x00"            /* rta length */
616 +                       "\x01\x00"              /* rta type */
617 +#else
618 +               .key    = "\x00\x08"            /* rta length */
619 +                       "\x00\x01"              /* rta type */
620 +#endif
621 +                       "\x00\x00\x00\x10"      /* enc key length */
622 +                       "authenticationkey20benckeyis16_bytes",
623 +               .klen   = 8 + 20 + 16,
624 +               .iv     = "iv0123456789abcd",
625 +               .input  = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
626 +                       "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
627 +                       "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
628 +                       "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
629 +                       "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
630 +                       "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
631 +               .ilen   = 16 + 20 + 12,
632 +               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
633 +                       "\x00\x03\x01\x00\x30",
634 +               .alen   = 13,
635 +               .result = "Single block msg",
636 +               .rlen   = 16,
637 +       }, {
638 +#ifdef __LITTLE_ENDIAN
639 +               .key    = "\x08\x00"            /* rta length */
640 +                       "\x01\x00"              /* rta type */
641 +#else
642 +               .key    = "\x00\x08"            /* rta length */
643 +                       "\x00\x01"              /* rta type */
644 +#endif
645 +                       "\x00\x00\x00\x10"      /* enc key length */
646 +                       "authenticationkey20benckeyis16_bytes",
647 +               .klen   = 8 + 20 + 16,
648 +               .iv     = "iv0123456789abcd",
649 +               .input = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
650 +                       "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
651 +                       "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
652 +                       "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
653 +               .ilen   = 20 + 12,
654 +               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
655 +                       "\x00\x03\x01\x00\x20",
656 +               .alen   = 13,
657 +               .result = "",
658 +               .rlen   = 0,
659 +       }, {
660 +#ifdef __LITTLE_ENDIAN
661 +               .key    = "\x08\x00"            /* rta length */
662 +                       "\x01\x00"              /* rta type */
663 +#else
664 +               .key    = "\x00\x08"            /* rta length */
665 +                       "\x00\x01"              /* rta type */
666 +#endif
667 +                       "\x00\x00\x00\x10"      /* enc key length */
668 +                       "authenticationkey20benckeyis16_bytes",
669 +               .klen   = 8 + 20 + 16,
670 +               .iv     = "iv0123456789abcd",
671 +               .input = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
672 +                       "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
673 +                       "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
674 +                       "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
675 +                       "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
676 +                       "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
677 +                       "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
678 +                       "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
679 +                       "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
680 +                       "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
681 +                       "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
682 +                       "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
683 +                       "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
684 +                       "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
685 +                       "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
686 +                       "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
687 +                       "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
688 +                       "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
689 +                       "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
690 +                       "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
691 +                       "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
692 +                       "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
693 +                       "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
694 +                       "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
695 +                       "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
696 +                       "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
697 +                       "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
698 +
699 +               .ilen   = 285 + 20 + 15,
700 +               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
701 +                       "\x00\x03\x01\x01\x40",
702 +               .alen   = 13,
703 +               .result = "285 bytes plaintext285 bytes plaintext285 bytes"
704 +                       " plaintext285 bytes plaintext285 bytes plaintext285"
705 +                       " bytes plaintext285 bytes plaintext285 bytes"
706 +                       " plaintext285 bytes plaintext285 bytes plaintext285"
707 +                       " bytes plaintext285 bytes plaintext285 bytes"
708 +                       " plaintext285 bytes plaintext285 bytes plaintext",
709 +               .rlen   = 285,
710 +       }
711 +};
712 +
713 +/*
714   * RSA test vectors. Borrowed from openSSL.
715   */
716  static const struct akcipher_testvec rsa_tv_template[] = {
717 --- /dev/null
718 +++ b/crypto/tls.c
719 @@ -0,0 +1,607 @@
720 +/*
721 + * Copyright 2013 Freescale Semiconductor, Inc.
722 + * Copyright 2017 NXP Semiconductor, Inc.
723 + *
724 + * This program is free software; you can redistribute it and/or modify it
725 + * under the terms of the GNU General Public License as published by the Free
726 + * Software Foundation; either version 2 of the License, or (at your option)
727 + * any later version.
728 + *
729 + */
730 +
731 +#include <crypto/internal/aead.h>
732 +#include <crypto/internal/hash.h>
733 +#include <crypto/internal/skcipher.h>
734 +#include <crypto/authenc.h>
735 +#include <crypto/null.h>
736 +#include <crypto/scatterwalk.h>
737 +#include <linux/err.h>
738 +#include <linux/init.h>
739 +#include <linux/module.h>
740 +#include <linux/rtnetlink.h>
741 +
742 +struct tls_instance_ctx {
743 +       struct crypto_ahash_spawn auth;
744 +       struct crypto_skcipher_spawn enc;
745 +};
746 +
747 +struct crypto_tls_ctx {
748 +       unsigned int reqoff;
749 +       struct crypto_ahash *auth;
750 +       struct crypto_skcipher *enc;
751 +       struct crypto_skcipher *null;
752 +};
753 +
754 +struct tls_request_ctx {
755 +       /*
756 +        * cryptlen holds the payload length in the case of encryption or
757 +        * payload_len + icv_len + padding_len in case of decryption
758 +        */
759 +       unsigned int cryptlen;
760 +       /* working space for partial results */
761 +       struct scatterlist tmp[2];
762 +       struct scatterlist cipher[2];
763 +       struct scatterlist dst[2];
764 +       char tail[];
765 +};
766 +
767 +struct async_op {
768 +       struct completion completion;
769 +       int err;
770 +};
771 +
772 +static void tls_async_op_done(struct crypto_async_request *req, int err)
773 +{
774 +       struct async_op *areq = req->data;
775 +
776 +       if (err == -EINPROGRESS)
777 +               return;
778 +
779 +       areq->err = err;
780 +       complete(&areq->completion);
781 +}
782 +
783 +static int crypto_tls_setkey(struct crypto_aead *tls, const u8 *key,
784 +                            unsigned int keylen)
785 +{
786 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
787 +       struct crypto_ahash *auth = ctx->auth;
788 +       struct crypto_skcipher *enc = ctx->enc;
789 +       struct crypto_authenc_keys keys;
790 +       int err = -EINVAL;
791 +
792 +       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
793 +               goto badkey;
794 +
795 +       crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
796 +       crypto_ahash_set_flags(auth, crypto_aead_get_flags(tls) &
797 +                                   CRYPTO_TFM_REQ_MASK);
798 +       err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
799 +       crypto_aead_set_flags(tls, crypto_ahash_get_flags(auth) &
800 +                                      CRYPTO_TFM_RES_MASK);
801 +
802 +       if (err)
803 +               goto out;
804 +
805 +       crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
806 +       crypto_skcipher_set_flags(enc, crypto_aead_get_flags(tls) &
807 +                                        CRYPTO_TFM_REQ_MASK);
808 +       err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
809 +       crypto_aead_set_flags(tls, crypto_skcipher_get_flags(enc) &
810 +                                      CRYPTO_TFM_RES_MASK);
811 +
812 +out:
813 +       return err;
814 +
815 +badkey:
816 +       crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
817 +       goto out;
818 +}
819 +
820 +/**
821 + * crypto_tls_genicv - Calculate hmac digest for a TLS record
822 + * @hash:      (output) buffer to save the digest into
823 + * @src:       (input) scatterlist with the assoc and payload data
824 + * @srclen:    (input) size of the source buffer (assoclen + cryptlen)
825 + * @req:       (input) aead request
826 + **/
827 +static int crypto_tls_genicv(u8 *hash, struct scatterlist *src,
828 +                            unsigned int srclen, struct aead_request *req)
829 +{
830 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
831 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
832 +       struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
833 +       struct async_op ahash_op;
834 +       struct ahash_request *ahreq = (void *)(treq_ctx->tail + ctx->reqoff);
835 +       unsigned int flags = CRYPTO_TFM_REQ_MAY_SLEEP;
836 +       int err = -EBADMSG;
837 +
838 +        /* Bail out if the request assoc len is 0 */
839 +       if (!req->assoclen)
840 +               return err;
841 +
842 +       init_completion(&ahash_op.completion);
843 +
844 +       /* the hash transform to be executed comes from the original request */
845 +       ahash_request_set_tfm(ahreq, ctx->auth);
846 +       /* prepare the hash request with input data and result pointer */
847 +       ahash_request_set_crypt(ahreq, src, hash, srclen);
848 +       /* set the notifier for when the async hash function returns */
849 +       ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
850 +                                  tls_async_op_done, &ahash_op);
851 +
852 +       /* Calculate the digest on the given data. The result is put in hash */
853 +       err = crypto_ahash_digest(ahreq);
854 +       if (err == -EINPROGRESS) {
855 +               err = wait_for_completion_interruptible(&ahash_op.completion);
856 +               if (!err)
857 +                       err = ahash_op.err;
858 +       }
859 +
860 +       return err;
861 +}
862 +
863 +/**
864 + * crypto_tls_gen_padicv - Calculate and pad hmac digest for a TLS record
865 + * @hash:      (output) buffer to save the digest and padding into
866 + * @phashlen:  (output) the size of digest + padding
867 + * @req:       (input) aead request
868 + **/
869 +static int crypto_tls_gen_padicv(u8 *hash, unsigned int *phashlen,
870 +                                struct aead_request *req)
871 +{
872 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
873 +       unsigned int hash_size = crypto_aead_authsize(tls);
874 +       unsigned int block_size = crypto_aead_blocksize(tls);
875 +       unsigned int srclen = req->cryptlen + hash_size;
876 +       unsigned int icvlen = req->cryptlen + req->assoclen;
877 +       unsigned int padlen;
878 +       int err;
879 +
880 +       err = crypto_tls_genicv(hash, req->src, icvlen, req);
881 +       if (err)
882 +               goto out;
883 +
884 +       /* add padding after digest */
885 +       padlen = block_size - (srclen % block_size);
886 +       memset(hash + hash_size, padlen - 1, padlen);
887 +
888 +       *phashlen = hash_size + padlen;
889 +out:
890 +       return err;
891 +}
892 +
893 +static int crypto_tls_copy_data(struct aead_request *req,
894 +                               struct scatterlist *src,
895 +                               struct scatterlist *dst,
896 +                               unsigned int len)
897 +{
898 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
899 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
900 +       SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
901 +
902 +       skcipher_request_set_tfm(skreq, ctx->null);
903 +       skcipher_request_set_callback(skreq, aead_request_flags(req),
904 +                                     NULL, NULL);
905 +       skcipher_request_set_crypt(skreq, src, dst, len, NULL);
906 +
907 +       return crypto_skcipher_encrypt(skreq);
908 +}
909 +
910 +static int crypto_tls_encrypt(struct aead_request *req)
911 +{
912 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
913 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
914 +       struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
915 +       struct skcipher_request *skreq;
916 +       struct scatterlist *cipher = treq_ctx->cipher;
917 +       struct scatterlist *tmp = treq_ctx->tmp;
918 +       struct scatterlist *sg, *src, *dst;
919 +       unsigned int cryptlen, phashlen;
920 +       u8 *hash = treq_ctx->tail;
921 +       int err;
922 +
923 +       /*
924 +        * The hash result is saved at the beginning of the tls request ctx
925 +        * and is aligned as required by the hash transform. Enough space was
926 +        * allocated in crypto_tls_init_tfm to accommodate the difference. The
927 +        * requests themselves start later at treq_ctx->tail + ctx->reqoff so
928 +        * the result is not overwritten by the second (cipher) request.
929 +        */
930 +       hash = (u8 *)ALIGN((unsigned long)hash +
931 +                          crypto_ahash_alignmask(ctx->auth),
932 +                          crypto_ahash_alignmask(ctx->auth) + 1);
933 +
934 +       /*
935 +        * STEP 1: create ICV together with necessary padding
936 +        */
937 +       err = crypto_tls_gen_padicv(hash, &phashlen, req);
938 +       if (err)
939 +               return err;
940 +
941 +       /*
942 +        * STEP 2: Hash and padding are combined with the payload
943 +        * depending on the form it arrives. Scatter tables must have at least
944 +        * one page of data before chaining with another table and can't have
945 +        * an empty data page. The following code addresses these requirements.
946 +        *
947 +        * If the payload is empty, only the hash is encrypted, otherwise the
948 +        * payload scatterlist is merged with the hash. A special merging case
949 +        * is when the payload has only one page of data. In that case the
950 +        * payload page is moved to another scatterlist and prepared there for
951 +        * encryption.
952 +        */
953 +       if (req->cryptlen) {
954 +               src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
955 +
956 +               sg_init_table(cipher, 2);
957 +               sg_set_buf(cipher + 1, hash, phashlen);
958 +
959 +               if (sg_is_last(src)) {
960 +                       sg_set_page(cipher, sg_page(src), req->cryptlen,
961 +                                   src->offset);
962 +                       src = cipher;
963 +               } else {
964 +                       unsigned int rem_len = req->cryptlen;
965 +
966 +                       for (sg = src; rem_len > sg->length; sg = sg_next(sg))
967 +                               rem_len -= min(rem_len, sg->length);
968 +
969 +                       sg_set_page(cipher, sg_page(sg), rem_len, sg->offset);
970 +                       sg_chain(sg, 1, cipher);
971 +               }
972 +       } else {
973 +               sg_init_one(cipher, hash, phashlen);
974 +               src = cipher;
975 +       }
976 +
977 +       /**
978 +        * If src != dst copy the associated data from source to destination.
979 +        * In both cases fast-forward passed the associated data in the dest.
980 +        */
981 +       if (req->src != req->dst) {
982 +               err = crypto_tls_copy_data(req, req->src, req->dst,
983 +                                          req->assoclen);
984 +               if (err)
985 +                       return err;
986 +       }
987 +       dst = scatterwalk_ffwd(treq_ctx->dst, req->dst, req->assoclen);
988 +
989 +       /*
990 +        * STEP 3: encrypt the frame and return the result
991 +        */
992 +       cryptlen = req->cryptlen + phashlen;
993 +
994 +       /*
995 +        * The hash and the cipher are applied at different times and their
996 +        * requests can use the same memory space without interference
997 +        */
998 +       skreq = (void *)(treq_ctx->tail + ctx->reqoff);
999 +       skcipher_request_set_tfm(skreq, ctx->enc);
1000 +       skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
1001 +       skcipher_request_set_callback(skreq, aead_request_flags(req),
1002 +                                     req->base.complete, req->base.data);
1003 +       /*
1004 +        * Apply the cipher transform. The result will be in req->dst when the
1005 +        * asynchronuous call terminates
1006 +        */
1007 +       err = crypto_skcipher_encrypt(skreq);
1008 +
1009 +       return err;
1010 +}
1011 +
1012 +static int crypto_tls_decrypt(struct aead_request *req)
1013 +{
1014 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
1015 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
1016 +       struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
1017 +       unsigned int cryptlen = req->cryptlen;
1018 +       unsigned int hash_size = crypto_aead_authsize(tls);
1019 +       unsigned int block_size = crypto_aead_blocksize(tls);
1020 +       struct skcipher_request *skreq = (void *)(treq_ctx->tail + ctx->reqoff);
1021 +       struct scatterlist *tmp = treq_ctx->tmp;
1022 +       struct scatterlist *src, *dst;
1023 +
1024 +       u8 padding[255]; /* padding can be 0-255 bytes */
1025 +       u8 pad_size;
1026 +       u16 *len_field;
1027 +       u8 *ihash, *hash = treq_ctx->tail;
1028 +
1029 +       int paderr = 0;
1030 +       int err = -EINVAL;
1031 +       int i;
1032 +       struct async_op ciph_op;
1033 +
1034 +       /*
1035 +        * Rule out bad packets. The input packet length must be at least one
1036 +        * byte more than the hash_size
1037 +        */
1038 +       if (cryptlen <= hash_size || cryptlen % block_size)
1039 +               goto out;
1040 +
1041 +       /*
1042 +        * Step 1 - Decrypt the source. Fast-forward past the associated data
1043 +        * to the encrypted data. The result will be overwritten in place so
1044 +        * that the decrypted data will be adjacent to the associated data. The
1045 +        * last step (computing the hash) will have it's input data already
1046 +        * prepared and ready to be accessed at req->src.
1047 +        */
1048 +       src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
1049 +       dst = src;
1050 +
1051 +       init_completion(&ciph_op.completion);
1052 +       skcipher_request_set_tfm(skreq, ctx->enc);
1053 +       skcipher_request_set_callback(skreq, aead_request_flags(req),
1054 +                                     tls_async_op_done, &ciph_op);
1055 +       skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
1056 +       err = crypto_skcipher_decrypt(skreq);
1057 +       if (err == -EINPROGRESS) {
1058 +               err = wait_for_completion_interruptible(&ciph_op.completion);
1059 +               if (!err)
1060 +                       err = ciph_op.err;
1061 +       }
1062 +       if (err)
1063 +               goto out;
1064 +
1065 +       /*
1066 +        * Step 2 - Verify padding
1067 +        * Retrieve the last byte of the payload; this is the padding size.
1068 +        */
1069 +       cryptlen -= 1;
1070 +       scatterwalk_map_and_copy(&pad_size, dst, cryptlen, 1, 0);
1071 +
1072 +       /* RFC recommendation for invalid padding size. */
1073 +       if (cryptlen < pad_size + hash_size) {
1074 +               pad_size = 0;
1075 +               paderr = -EBADMSG;
1076 +       }
1077 +       cryptlen -= pad_size;
1078 +       scatterwalk_map_and_copy(padding, dst, cryptlen, pad_size, 0);
1079 +
1080 +       /* Padding content must be equal with pad_size. We verify it all */
1081 +       for (i = 0; i < pad_size; i++)
1082 +               if (padding[i] != pad_size)
1083 +                       paderr = -EBADMSG;
1084 +
1085 +       /*
1086 +        * Step 3 - Verify hash
1087 +        * Align the digest result as required by the hash transform. Enough
1088 +        * space was allocated in crypto_tls_init_tfm
1089 +        */
1090 +       hash = (u8 *)ALIGN((unsigned long)hash +
1091 +                          crypto_ahash_alignmask(ctx->auth),
1092 +                          crypto_ahash_alignmask(ctx->auth) + 1);
1093 +       /*
1094 +        * Two bytes at the end of the associated data make the length field.
1095 +        * It must be updated with the length of the cleartext message before
1096 +        * the hash is calculated.
1097 +        */
1098 +       len_field = sg_virt(req->src) + req->assoclen - 2;
1099 +       cryptlen -= hash_size;
1100 +       *len_field = htons(cryptlen);
1101 +
1102 +       /* This is the hash from the decrypted packet. Save it for later */
1103 +       ihash = hash + hash_size;
1104 +       scatterwalk_map_and_copy(ihash, dst, cryptlen, hash_size, 0);
1105 +
1106 +       /* Now compute and compare our ICV with the one from the packet */
1107 +       err = crypto_tls_genicv(hash, req->src, cryptlen + req->assoclen, req);
1108 +       if (!err)
1109 +               err = memcmp(hash, ihash, hash_size) ? -EBADMSG : 0;
1110 +
1111 +       if (req->src != req->dst) {
1112 +               err = crypto_tls_copy_data(req, req->src, req->dst, cryptlen +
1113 +                                          req->assoclen);
1114 +               if (err)
1115 +                       goto out;
1116 +       }
1117 +
1118 +       /* return the first found error */
1119 +       if (paderr)
1120 +               err = paderr;
1121 +
1122 +out:
1123 +       aead_request_complete(req, err);
1124 +       return err;
1125 +}
1126 +
1127 +static int crypto_tls_init_tfm(struct crypto_aead *tfm)
1128 +{
1129 +       struct aead_instance *inst = aead_alg_instance(tfm);
1130 +       struct tls_instance_ctx *ictx = aead_instance_ctx(inst);
1131 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
1132 +       struct crypto_ahash *auth;
1133 +       struct crypto_skcipher *enc;
1134 +       struct crypto_skcipher *null;
1135 +       int err;
1136 +
1137 +       auth = crypto_spawn_ahash(&ictx->auth);
1138 +       if (IS_ERR(auth))
1139 +               return PTR_ERR(auth);
1140 +
1141 +       enc = crypto_spawn_skcipher(&ictx->enc);
1142 +       err = PTR_ERR(enc);
1143 +       if (IS_ERR(enc))
1144 +               goto err_free_ahash;
1145 +
1146 +       null = crypto_get_default_null_skcipher2();
1147 +       err = PTR_ERR(null);
1148 +       if (IS_ERR(null))
1149 +               goto err_free_skcipher;
1150 +
1151 +       ctx->auth = auth;
1152 +       ctx->enc = enc;
1153 +       ctx->null = null;
1154 +
1155 +       /*
1156 +        * Allow enough space for two digests. The two digests will be compared
1157 +        * during the decryption phase. One will come from the decrypted packet
1158 +        * and the other will be calculated. For encryption, one digest is
1159 +        * padded (up to a cipher blocksize) and chained with the payload
1160 +        */
1161 +       ctx->reqoff = ALIGN(crypto_ahash_digestsize(auth) +
1162 +                           crypto_ahash_alignmask(auth),
1163 +                           crypto_ahash_alignmask(auth) + 1) +
1164 +                           max(crypto_ahash_digestsize(auth),
1165 +                               crypto_skcipher_blocksize(enc));
1166 +
1167 +       crypto_aead_set_reqsize(tfm,
1168 +                               sizeof(struct tls_request_ctx) +
1169 +                               ctx->reqoff +
1170 +                               max_t(unsigned int,
1171 +                                     crypto_ahash_reqsize(auth) +
1172 +                                     sizeof(struct ahash_request),
1173 +                                     crypto_skcipher_reqsize(enc) +
1174 +                                     sizeof(struct skcipher_request)));
1175 +
1176 +       return 0;
1177 +
1178 +err_free_skcipher:
1179 +       crypto_free_skcipher(enc);
1180 +err_free_ahash:
1181 +       crypto_free_ahash(auth);
1182 +       return err;
1183 +}
1184 +
1185 +static void crypto_tls_exit_tfm(struct crypto_aead *tfm)
1186 +{
1187 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
1188 +
1189 +       crypto_free_ahash(ctx->auth);
1190 +       crypto_free_skcipher(ctx->enc);
1191 +       crypto_put_default_null_skcipher2();
1192 +}
1193 +
1194 +static void crypto_tls_free(struct aead_instance *inst)
1195 +{
1196 +       struct tls_instance_ctx *ctx = aead_instance_ctx(inst);
1197 +
1198 +       crypto_drop_skcipher(&ctx->enc);
1199 +       crypto_drop_ahash(&ctx->auth);
1200 +       kfree(inst);
1201 +}
1202 +
1203 +static int crypto_tls_create(struct crypto_template *tmpl, struct rtattr **tb)
1204 +{
1205 +       struct crypto_attr_type *algt;
1206 +       struct aead_instance *inst;
1207 +       struct hash_alg_common *auth;
1208 +       struct crypto_alg *auth_base;
1209 +       struct skcipher_alg *enc;
1210 +       struct tls_instance_ctx *ctx;
1211 +       const char *enc_name;
1212 +       int err;
1213 +
1214 +       algt = crypto_get_attr_type(tb);
1215 +       if (IS_ERR(algt))
1216 +               return PTR_ERR(algt);
1217 +
1218 +       if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
1219 +               return -EINVAL;
1220 +
1221 +       auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
1222 +                             CRYPTO_ALG_TYPE_AHASH_MASK |
1223 +                             crypto_requires_sync(algt->type, algt->mask));
1224 +       if (IS_ERR(auth))
1225 +               return PTR_ERR(auth);
1226 +
1227 +       auth_base = &auth->base;
1228 +
1229 +       enc_name = crypto_attr_alg_name(tb[2]);
1230 +       err = PTR_ERR(enc_name);
1231 +       if (IS_ERR(enc_name))
1232 +               goto out_put_auth;
1233 +
1234 +       inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
1235 +       err = -ENOMEM;
1236 +       if (!inst)
1237 +               goto out_put_auth;
1238 +
1239 +       ctx = aead_instance_ctx(inst);
1240 +
1241 +       err = crypto_init_ahash_spawn(&ctx->auth, auth,
1242 +                                     aead_crypto_instance(inst));
1243 +       if (err)
1244 +               goto err_free_inst;
1245 +
1246 +       crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
1247 +       err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
1248 +                                  crypto_requires_sync(algt->type,
1249 +                                                       algt->mask));
1250 +       if (err)
1251 +               goto err_drop_auth;
1252 +
1253 +       enc = crypto_spawn_skcipher_alg(&ctx->enc);
1254 +
1255 +       err = -ENAMETOOLONG;
1256 +       if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
1257 +                    "tls10(%s,%s)", auth_base->cra_name,
1258 +                    enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
1259 +               goto err_drop_enc;
1260 +
1261 +       if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1262 +                    "tls10(%s,%s)", auth_base->cra_driver_name,
1263 +                    enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
1264 +               goto err_drop_enc;
1265 +
1266 +       inst->alg.base.cra_flags = (auth_base->cra_flags |
1267 +                                       enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
1268 +       inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
1269 +                                       auth_base->cra_priority;
1270 +       inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
1271 +       inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
1272 +                                       enc->base.cra_alignmask;
1273 +       inst->alg.base.cra_ctxsize = sizeof(struct crypto_tls_ctx);
1274 +
1275 +       inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
1276 +       inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
1277 +       inst->alg.maxauthsize = auth->digestsize;
1278 +
1279 +       inst->alg.init = crypto_tls_init_tfm;
1280 +       inst->alg.exit = crypto_tls_exit_tfm;
1281 +
1282 +       inst->alg.setkey = crypto_tls_setkey;
1283 +       inst->alg.encrypt = crypto_tls_encrypt;
1284 +       inst->alg.decrypt = crypto_tls_decrypt;
1285 +
1286 +       inst->free = crypto_tls_free;
1287 +
1288 +       err = aead_register_instance(tmpl, inst);
1289 +       if (err)
1290 +               goto err_drop_enc;
1291 +
1292 +out:
1293 +       crypto_mod_put(auth_base);
1294 +       return err;
1295 +
1296 +err_drop_enc:
1297 +       crypto_drop_skcipher(&ctx->enc);
1298 +err_drop_auth:
1299 +       crypto_drop_ahash(&ctx->auth);
1300 +err_free_inst:
1301 +       kfree(inst);
1302 +out_put_auth:
1303 +       goto out;
1304 +}
1305 +
1306 +static struct crypto_template crypto_tls_tmpl = {
1307 +       .name = "tls10",
1308 +       .create = crypto_tls_create,
1309 +       .module = THIS_MODULE,
1310 +};
1311 +
1312 +static int __init crypto_tls_module_init(void)
1313 +{
1314 +       return crypto_register_template(&crypto_tls_tmpl);
1315 +}
1316 +
1317 +static void __exit crypto_tls_module_exit(void)
1318 +{
1319 +       crypto_unregister_template(&crypto_tls_tmpl);
1320 +}
1321 +
1322 +module_init(crypto_tls_module_init);
1323 +module_exit(crypto_tls_module_exit);
1324 +
1325 +MODULE_LICENSE("GPL");
1326 +MODULE_DESCRIPTION("TLS 1.0 record encryption");
1327 --- a/drivers/crypto/Makefile
1328 +++ b/drivers/crypto/Makefile
1329 @@ -10,7 +10,7 @@ obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chel
1330  obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
1331  obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
1332  obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
1333 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
1334 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
1335  obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
1336  obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
1337  obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
1338 --- a/drivers/crypto/caam/Kconfig
1339 +++ b/drivers/crypto/caam/Kconfig
1340 @@ -1,7 +1,17 @@
1341 +config CRYPTO_DEV_FSL_CAAM_COMMON
1342 +       tristate
1343 +
1344 +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1345 +       tristate
1346 +
1347 +config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
1348 +       tristate
1349 +
1350  config CRYPTO_DEV_FSL_CAAM
1351 -       tristate "Freescale CAAM-Multicore driver backend"
1352 +       tristate "Freescale CAAM-Multicore platform driver backend"
1353         depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
1354         select SOC_BUS
1355 +       select CRYPTO_DEV_FSL_CAAM_COMMON
1356         help
1357           Enables the driver module for Freescale's Cryptographic Accelerator
1358           and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
1359 @@ -12,9 +22,16 @@ config CRYPTO_DEV_FSL_CAAM
1360           To compile this driver as a module, choose M here: the module
1361           will be called caam.
1362  
1363 +if CRYPTO_DEV_FSL_CAAM
1364 +
1365 +config CRYPTO_DEV_FSL_CAAM_DEBUG
1366 +       bool "Enable debug output in CAAM driver"
1367 +       help
1368 +         Selecting this will enable printing of various debug
1369 +         information in the CAAM driver.
1370 +
1371  config CRYPTO_DEV_FSL_CAAM_JR
1372         tristate "Freescale CAAM Job Ring driver backend"
1373 -       depends on CRYPTO_DEV_FSL_CAAM
1374         default y
1375         help
1376           Enables the driver module for Job Rings which are part of
1377 @@ -25,9 +42,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
1378           To compile this driver as a module, choose M here: the module
1379           will be called caam_jr.
1380  
1381 +if CRYPTO_DEV_FSL_CAAM_JR
1382 +
1383  config CRYPTO_DEV_FSL_CAAM_RINGSIZE
1384         int "Job Ring size"
1385 -       depends on CRYPTO_DEV_FSL_CAAM_JR
1386         range 2 9
1387         default "9"
1388         help
1389 @@ -45,7 +63,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
1390  
1391  config CRYPTO_DEV_FSL_CAAM_INTC
1392         bool "Job Ring interrupt coalescing"
1393 -       depends on CRYPTO_DEV_FSL_CAAM_JR
1394         help
1395           Enable the Job Ring's interrupt coalescing feature.
1396  
1397 @@ -74,9 +91,9 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THL
1398           threshold. Range is 1-65535.
1399  
1400  config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
1401 -       tristate "Register algorithm implementations with the Crypto API"
1402 -       depends on CRYPTO_DEV_FSL_CAAM_JR
1403 +       bool "Register algorithm implementations with the Crypto API"
1404         default y
1405 +       select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1406         select CRYPTO_AEAD
1407         select CRYPTO_AUTHENC
1408         select CRYPTO_BLKCIPHER
1409 @@ -85,13 +102,11 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
1410           scatterlist crypto API (such as the linux native IPSec
1411           stack) to the SEC4 via job ring.
1412  
1413 -         To compile this as a module, choose M here: the module
1414 -         will be called caamalg.
1415 -
1416  config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
1417 -       tristate "Queue Interface as Crypto API backend"
1418 -       depends on CRYPTO_DEV_FSL_CAAM_JR && FSL_DPAA && NET
1419 +       bool "Queue Interface as Crypto API backend"
1420 +       depends on FSL_SDK_DPA && NET
1421         default y
1422 +       select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1423         select CRYPTO_AUTHENC
1424         select CRYPTO_BLKCIPHER
1425         help
1426 @@ -102,36 +117,26 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
1427           assigned to the kernel should also be more than the number of
1428           job rings.
1429  
1430 -         To compile this as a module, choose M here: the module
1431 -         will be called caamalg_qi.
1432 -
1433  config CRYPTO_DEV_FSL_CAAM_AHASH_API
1434 -       tristate "Register hash algorithm implementations with Crypto API"
1435 -       depends on CRYPTO_DEV_FSL_CAAM_JR
1436 +       bool "Register hash algorithm implementations with Crypto API"
1437         default y
1438 +       select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
1439         select CRYPTO_HASH
1440         help
1441           Selecting this will offload ahash for users of the
1442           scatterlist crypto API to the SEC4 via job ring.
1443  
1444 -         To compile this as a module, choose M here: the module
1445 -         will be called caamhash.
1446 -
1447  config CRYPTO_DEV_FSL_CAAM_PKC_API
1448 -        tristate "Register public key cryptography implementations with Crypto API"
1449 -        depends on CRYPTO_DEV_FSL_CAAM_JR
1450 +        bool "Register public key cryptography implementations with Crypto API"
1451          default y
1452          select CRYPTO_RSA
1453          help
1454            Selecting this will allow SEC Public key support for RSA.
1455            Supported cryptographic primitives: encryption, decryption,
1456            signature and verification.
1457 -          To compile this as a module, choose M here: the module
1458 -          will be called caam_pkc.
1459  
1460  config CRYPTO_DEV_FSL_CAAM_RNG_API
1461 -       tristate "Register caam device for hwrng API"
1462 -       depends on CRYPTO_DEV_FSL_CAAM_JR
1463 +       bool "Register caam device for hwrng API"
1464         default y
1465         select CRYPTO_RNG
1466         select HW_RANDOM
1467 @@ -139,16 +144,24 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
1468           Selecting this will register the SEC4 hardware rng to
1469           the hw_random API for suppying the kernel entropy pool.
1470  
1471 -         To compile this as a module, choose M here: the module
1472 -         will be called caamrng.
1473 +endif # CRYPTO_DEV_FSL_CAAM_JR
1474  
1475 -config CRYPTO_DEV_FSL_CAAM_DEBUG
1476 -       bool "Enable debug output in CAAM driver"
1477 -       depends on CRYPTO_DEV_FSL_CAAM
1478 -       help
1479 -         Selecting this will enable printing of various debug
1480 -         information in the CAAM driver.
1481 +endif # CRYPTO_DEV_FSL_CAAM
1482  
1483 -config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1484 -       def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
1485 -                     CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
1486 +config CRYPTO_DEV_FSL_DPAA2_CAAM
1487 +       tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
1488 +       depends on FSL_MC_DPIO
1489 +       select CRYPTO_DEV_FSL_CAAM_COMMON
1490 +       select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1491 +       select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
1492 +       select CRYPTO_BLKCIPHER
1493 +       select CRYPTO_AUTHENC
1494 +       select CRYPTO_AEAD
1495 +       select CRYPTO_HASH
1496 +       ---help---
1497 +         CAAM driver for QorIQ Data Path Acceleration Architecture 2.
1498 +         It handles DPSECI DPAA2 objects that sit on the Management Complex
1499 +         (MC) fsl-mc bus.
1500 +
1501 +         To compile this as a module, choose M here: the module
1502 +         will be called dpaa2_caam.
1503 --- a/drivers/crypto/caam/Makefile
1504 +++ b/drivers/crypto/caam/Makefile
1505 @@ -6,19 +6,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG
1506         ccflags-y := -DDEBUG
1507  endif
1508  
1509 +ccflags-y += -DVERSION=\"\"
1510 +
1511 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
1512  obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
1513  obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
1514 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
1515 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
1516  obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
1517 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
1518 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
1519 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
1520 -
1521 -caam-objs := ctrl.o
1522 -caam_jr-objs := jr.o key_gen.o error.o
1523 -caam_pkc-y := caampkc.o pkc_desc.o
1524 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
1525 +
1526 +caam-y := ctrl.o
1527 +caam_jr-y := jr.o key_gen.o
1528 +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
1529 +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
1530 +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
1531 +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
1532 +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o
1533 +
1534 +caam-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += qi.o
1535  ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
1536         ccflags-y += -DCONFIG_CAAM_QI
1537 -       caam-objs += qi.o
1538  endif
1539 +
1540 +obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
1541 +
1542 +dpaa2_caam-y    := caamalg_qi2.o dpseci.o
1543 --- a/drivers/crypto/caam/caamalg.c
1544 +++ b/drivers/crypto/caam/caamalg.c
1545 @@ -71,6 +71,8 @@
1546  #define AUTHENC_DESC_JOB_IO_LEN                (AEAD_DESC_JOB_IO_LEN + \
1547                                          CAAM_CMD_SZ * 5)
1548  
1549 +#define CHACHAPOLY_DESC_JOB_IO_LEN     (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6)
1550 +
1551  #define DESC_MAX_USED_BYTES            (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
1552  #define DESC_MAX_USED_LEN              (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
1553  
1554 @@ -108,6 +110,7 @@ struct caam_ctx {
1555         dma_addr_t sh_desc_dec_dma;
1556         dma_addr_t sh_desc_givenc_dma;
1557         dma_addr_t key_dma;
1558 +       enum dma_data_direction dir;
1559         struct device *jrdev;
1560         struct alginfo adata;
1561         struct alginfo cdata;
1562 @@ -118,6 +121,7 @@ static int aead_null_set_sh_desc(struct
1563  {
1564         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1565         struct device *jrdev = ctx->jrdev;
1566 +       struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
1567         u32 *desc;
1568         int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
1569                         ctx->adata.keylen_pad;
1570 @@ -136,9 +140,10 @@ static int aead_null_set_sh_desc(struct
1571  
1572         /* aead_encrypt shared descriptor */
1573         desc = ctx->sh_desc_enc;
1574 -       cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
1575 +       cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
1576 +                                   ctrlpriv->era);
1577         dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1578 -                                  desc_bytes(desc), DMA_TO_DEVICE);
1579 +                                  desc_bytes(desc), ctx->dir);
1580  
1581         /*
1582          * Job Descriptor and Shared Descriptors
1583 @@ -154,9 +159,10 @@ static int aead_null_set_sh_desc(struct
1584  
1585         /* aead_decrypt shared descriptor */
1586         desc = ctx->sh_desc_dec;
1587 -       cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
1588 +       cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
1589 +                                   ctrlpriv->era);
1590         dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1591 -                                  desc_bytes(desc), DMA_TO_DEVICE);
1592 +                                  desc_bytes(desc), ctx->dir);
1593  
1594         return 0;
1595  }
1596 @@ -168,6 +174,7 @@ static int aead_set_sh_desc(struct crypt
1597         unsigned int ivsize = crypto_aead_ivsize(aead);
1598         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1599         struct device *jrdev = ctx->jrdev;
1600 +       struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
1601         u32 ctx1_iv_off = 0;
1602         u32 *desc, *nonce = NULL;
1603         u32 inl_mask;
1604 @@ -234,9 +241,9 @@ static int aead_set_sh_desc(struct crypt
1605         desc = ctx->sh_desc_enc;
1606         cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
1607                                ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
1608 -                              false);
1609 +                              false, ctrlpriv->era);
1610         dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1611 -                                  desc_bytes(desc), DMA_TO_DEVICE);
1612 +                                  desc_bytes(desc), ctx->dir);
1613  
1614  skip_enc:
1615         /*
1616 @@ -266,9 +273,9 @@ skip_enc:
1617         desc = ctx->sh_desc_dec;
1618         cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
1619                                ctx->authsize, alg->caam.geniv, is_rfc3686,
1620 -                              nonce, ctx1_iv_off, false);
1621 +                              nonce, ctx1_iv_off, false, ctrlpriv->era);
1622         dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1623 -                                  desc_bytes(desc), DMA_TO_DEVICE);
1624 +                                  desc_bytes(desc), ctx->dir);
1625  
1626         if (!alg->caam.geniv)
1627                 goto skip_givenc;
1628 @@ -300,9 +307,9 @@ skip_enc:
1629         desc = ctx->sh_desc_enc;
1630         cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
1631                                   ctx->authsize, is_rfc3686, nonce,
1632 -                                 ctx1_iv_off, false);
1633 +                                 ctx1_iv_off, false, ctrlpriv->era);
1634         dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1635 -                                  desc_bytes(desc), DMA_TO_DEVICE);
1636 +                                  desc_bytes(desc), ctx->dir);
1637  
1638  skip_givenc:
1639         return 0;
1640 @@ -323,6 +330,7 @@ static int gcm_set_sh_desc(struct crypto
1641  {
1642         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1643         struct device *jrdev = ctx->jrdev;
1644 +       unsigned int ivsize = crypto_aead_ivsize(aead);
1645         u32 *desc;
1646         int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
1647                         ctx->cdata.keylen;
1648 @@ -344,9 +352,9 @@ static int gcm_set_sh_desc(struct crypto
1649         }
1650  
1651         desc = ctx->sh_desc_enc;
1652 -       cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
1653 +       cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
1654         dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1655 -                                  desc_bytes(desc), DMA_TO_DEVICE);
1656 +                                  desc_bytes(desc), ctx->dir);
1657  
1658         /*
1659          * Job Descriptor and Shared Descriptors
1660 @@ -361,9 +369,9 @@ static int gcm_set_sh_desc(struct crypto
1661         }
1662  
1663         desc = ctx->sh_desc_dec;
1664 -       cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
1665 +       cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
1666         dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1667 -                                  desc_bytes(desc), DMA_TO_DEVICE);
1668 +                                  desc_bytes(desc), ctx->dir);
1669  
1670         return 0;
1671  }
1672 @@ -382,6 +390,7 @@ static int rfc4106_set_sh_desc(struct cr
1673  {
1674         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1675         struct device *jrdev = ctx->jrdev;
1676 +       unsigned int ivsize = crypto_aead_ivsize(aead);
1677         u32 *desc;
1678         int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
1679                         ctx->cdata.keylen;
1680 @@ -403,9 +412,10 @@ static int rfc4106_set_sh_desc(struct cr
1681         }
1682  
1683         desc = ctx->sh_desc_enc;
1684 -       cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
1685 +       cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
1686 +                                 false);
1687         dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1688 -                                  desc_bytes(desc), DMA_TO_DEVICE);
1689 +                                  desc_bytes(desc), ctx->dir);
1690  
1691         /*
1692          * Job Descriptor and Shared Descriptors
1693 @@ -420,9 +430,10 @@ static int rfc4106_set_sh_desc(struct cr
1694         }
1695  
1696         desc = ctx->sh_desc_dec;
1697 -       cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
1698 +       cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
1699 +                                 false);
1700         dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1701 -                                  desc_bytes(desc), DMA_TO_DEVICE);
1702 +                                  desc_bytes(desc), ctx->dir);
1703  
1704         return 0;
1705  }
1706 @@ -442,6 +453,7 @@ static int rfc4543_set_sh_desc(struct cr
1707  {
1708         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1709         struct device *jrdev = ctx->jrdev;
1710 +       unsigned int ivsize = crypto_aead_ivsize(aead);
1711         u32 *desc;
1712         int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
1713                         ctx->cdata.keylen;
1714 @@ -463,9 +475,10 @@ static int rfc4543_set_sh_desc(struct cr
1715         }
1716  
1717         desc = ctx->sh_desc_enc;
1718 -       cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
1719 +       cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
1720 +                                 false);
1721         dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1722 -                                  desc_bytes(desc), DMA_TO_DEVICE);
1723 +                                  desc_bytes(desc), ctx->dir);
1724  
1725         /*
1726          * Job Descriptor and Shared Descriptors
1727 @@ -480,9 +493,10 @@ static int rfc4543_set_sh_desc(struct cr
1728         }
1729  
1730         desc = ctx->sh_desc_dec;
1731 -       cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
1732 +       cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
1733 +                                 false);
1734         dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1735 -                                  desc_bytes(desc), DMA_TO_DEVICE);
1736 +                                  desc_bytes(desc), ctx->dir);
1737  
1738         return 0;
1739  }
1740 @@ -498,11 +512,67 @@ static int rfc4543_setauthsize(struct cr
1741         return 0;
1742  }
1743  
1744 +static int chachapoly_set_sh_desc(struct crypto_aead *aead)
1745 +{
1746 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
1747 +       struct device *jrdev = ctx->jrdev;
1748 +       unsigned int ivsize = crypto_aead_ivsize(aead);
1749 +       u32 *desc;
1750 +
1751 +       if (!ctx->cdata.keylen || !ctx->authsize)
1752 +               return 0;
1753 +
1754 +       desc = ctx->sh_desc_enc;
1755 +       cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
1756 +                              ctx->authsize, true, false);
1757 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1758 +                                  desc_bytes(desc), ctx->dir);
1759 +
1760 +       desc = ctx->sh_desc_dec;
1761 +       cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
1762 +                              ctx->authsize, false, false);
1763 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1764 +                                  desc_bytes(desc), ctx->dir);
1765 +
1766 +       return 0;
1767 +}
1768 +
1769 +static int chachapoly_setauthsize(struct crypto_aead *aead,
1770 +                                 unsigned int authsize)
1771 +{
1772 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
1773 +
1774 +       if (authsize != POLY1305_DIGEST_SIZE)
1775 +               return -EINVAL;
1776 +
1777 +       ctx->authsize = authsize;
1778 +       return chachapoly_set_sh_desc(aead);
1779 +}
1780 +
1781 +static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
1782 +                            unsigned int keylen)
1783 +{
1784 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
1785 +       unsigned int ivsize = crypto_aead_ivsize(aead);
1786 +       unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
1787 +
1788 +       if (keylen != CHACHA20_KEY_SIZE + saltlen) {
1789 +               crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1790 +               return -EINVAL;
1791 +       }
1792 +
1793 +       ctx->cdata.key_virt = key;
1794 +       ctx->cdata.keylen = keylen - saltlen;
1795 +
1796 +       return chachapoly_set_sh_desc(aead);
1797 +}
1798 +
1799  static int aead_setkey(struct crypto_aead *aead,
1800                                const u8 *key, unsigned int keylen)
1801  {
1802         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1803         struct device *jrdev = ctx->jrdev;
1804 +       struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
1805         struct crypto_authenc_keys keys;
1806         int ret = 0;
1807  
1808 @@ -517,6 +587,27 @@ static int aead_setkey(struct crypto_aea
1809                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1810  #endif
1811  
1812 +       /*
1813 +        * If DKP is supported, use it in the shared descriptor to generate
1814 +        * the split key.
1815 +        */
1816 +       if (ctrlpriv->era >= 6) {
1817 +               ctx->adata.keylen = keys.authkeylen;
1818 +               ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
1819 +                                                     OP_ALG_ALGSEL_MASK);
1820 +
1821 +               if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1822 +                       goto badkey;
1823 +
1824 +               memcpy(ctx->key, keys.authkey, keys.authkeylen);
1825 +               memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
1826 +                      keys.enckeylen);
1827 +               dma_sync_single_for_device(jrdev, ctx->key_dma,
1828 +                                          ctx->adata.keylen_pad +
1829 +                                          keys.enckeylen, ctx->dir);
1830 +               goto skip_split_key;
1831 +       }
1832 +
1833         ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
1834                             keys.authkeylen, CAAM_MAX_KEY_SIZE -
1835                             keys.enckeylen);
1836 @@ -527,12 +618,14 @@ static int aead_setkey(struct crypto_aea
1837         /* postpend encryption key to auth split key */
1838         memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
1839         dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
1840 -                                  keys.enckeylen, DMA_TO_DEVICE);
1841 +                                  keys.enckeylen, ctx->dir);
1842  #ifdef DEBUG
1843         print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
1844                        DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
1845                        ctx->adata.keylen_pad + keys.enckeylen, 1);
1846  #endif
1847 +
1848 +skip_split_key:
1849         ctx->cdata.keylen = keys.enckeylen;
1850         return aead_set_sh_desc(aead);
1851  badkey:
1852 @@ -552,7 +645,7 @@ static int gcm_setkey(struct crypto_aead
1853  #endif
1854  
1855         memcpy(ctx->key, key, keylen);
1856 -       dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
1857 +       dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
1858         ctx->cdata.keylen = keylen;
1859  
1860         return gcm_set_sh_desc(aead);
1861 @@ -580,7 +673,7 @@ static int rfc4106_setkey(struct crypto_
1862          */
1863         ctx->cdata.keylen = keylen - 4;
1864         dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
1865 -                                  DMA_TO_DEVICE);
1866 +                                  ctx->dir);
1867         return rfc4106_set_sh_desc(aead);
1868  }
1869  
1870 @@ -606,7 +699,7 @@ static int rfc4543_setkey(struct crypto_
1871          */
1872         ctx->cdata.keylen = keylen - 4;
1873         dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
1874 -                                  DMA_TO_DEVICE);
1875 +                                  ctx->dir);
1876         return rfc4543_set_sh_desc(aead);
1877  }
1878  
1879 @@ -658,21 +751,21 @@ static int ablkcipher_setkey(struct cryp
1880         cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
1881                                      ctx1_iv_off);
1882         dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1883 -                                  desc_bytes(desc), DMA_TO_DEVICE);
1884 +                                  desc_bytes(desc), ctx->dir);
1885  
1886         /* ablkcipher_decrypt shared descriptor */
1887         desc = ctx->sh_desc_dec;
1888         cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
1889                                      ctx1_iv_off);
1890         dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1891 -                                  desc_bytes(desc), DMA_TO_DEVICE);
1892 +                                  desc_bytes(desc), ctx->dir);
1893  
1894         /* ablkcipher_givencrypt shared descriptor */
1895         desc = ctx->sh_desc_givenc;
1896         cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
1897                                         ctx1_iv_off);
1898         dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
1899 -                                  desc_bytes(desc), DMA_TO_DEVICE);
1900 +                                  desc_bytes(desc), ctx->dir);
1901  
1902         return 0;
1903  }
1904 @@ -701,13 +794,13 @@ static int xts_ablkcipher_setkey(struct
1905         desc = ctx->sh_desc_enc;
1906         cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
1907         dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1908 -                                  desc_bytes(desc), DMA_TO_DEVICE);
1909 +                                  desc_bytes(desc), ctx->dir);
1910  
1911         /* xts_ablkcipher_decrypt shared descriptor */
1912         desc = ctx->sh_desc_dec;
1913         cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
1914         dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1915 -                                  desc_bytes(desc), DMA_TO_DEVICE);
1916 +                                  desc_bytes(desc), ctx->dir);
1917  
1918         return 0;
1919  }
1920 @@ -987,9 +1080,6 @@ static void init_aead_job(struct aead_re
1921                 append_seq_out_ptr(desc, dst_dma,
1922                                    req->assoclen + req->cryptlen - authsize,
1923                                    out_options);
1924 -
1925 -       /* REG3 = assoclen */
1926 -       append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1927  }
1928  
1929  static void init_gcm_job(struct aead_request *req,
1930 @@ -1004,6 +1094,7 @@ static void init_gcm_job(struct aead_req
1931         unsigned int last;
1932  
1933         init_aead_job(req, edesc, all_contig, encrypt);
1934 +       append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1935  
1936         /* BUG This should not be specific to generic GCM. */
1937         last = 0;
1938 @@ -1021,6 +1112,40 @@ static void init_gcm_job(struct aead_req
1939         /* End of blank commands */
1940  }
1941  
1942 +static void init_chachapoly_job(struct aead_request *req,
1943 +                               struct aead_edesc *edesc, bool all_contig,
1944 +                               bool encrypt)
1945 +{
1946 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
1947 +       unsigned int ivsize = crypto_aead_ivsize(aead);
1948 +       unsigned int assoclen = req->assoclen;
1949 +       u32 *desc = edesc->hw_desc;
1950 +       u32 ctx_iv_off = 4;
1951 +
1952 +       init_aead_job(req, edesc, all_contig, encrypt);
1953 +
1954 +       if (ivsize != CHACHAPOLY_IV_SIZE) {
1955 +               /* IPsec specific: CONTEXT1[223:128] = {NONCE, IV} */
1956 +               ctx_iv_off += 4;
1957 +
1958 +               /*
1959 +                * The associated data comes already with the IV but we need
1960 +                * to skip it when we authenticate or encrypt...
1961 +                */
1962 +               assoclen -= ivsize;
1963 +       }
1964 +
1965 +       append_math_add_imm_u32(desc, REG3, ZERO, IMM, assoclen);
1966 +
1967 +       /*
1968 +        * For IPsec load the IV further in the same register.
1969 +        * For RFC7539 simply load the 12 bytes nonce in a single operation
1970 +        */
1971 +       append_load_as_imm(desc, req->iv, ivsize, LDST_CLASS_1_CCB |
1972 +                          LDST_SRCDST_BYTE_CONTEXT |
1973 +                          ctx_iv_off << LDST_OFFSET_SHIFT);
1974 +}
1975 +
1976  static void init_authenc_job(struct aead_request *req,
1977                              struct aead_edesc *edesc,
1978                              bool all_contig, bool encrypt)
1979 @@ -1030,6 +1155,7 @@ static void init_authenc_job(struct aead
1980                                                  struct caam_aead_alg, aead);
1981         unsigned int ivsize = crypto_aead_ivsize(aead);
1982         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1983 +       struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
1984         const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
1985                                OP_ALG_AAI_CTR_MOD128);
1986         const bool is_rfc3686 = alg->caam.rfc3686;
1987 @@ -1053,6 +1179,15 @@ static void init_authenc_job(struct aead
1988  
1989         init_aead_job(req, edesc, all_contig, encrypt);
1990  
1991 +       /*
1992 +        * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
1993 +        * having DPOVRD as destination.
1994 +        */
1995 +       if (ctrlpriv->era < 3)
1996 +               append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1997 +       else
1998 +               append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
1999 +
2000         if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
2001                 append_load_as_imm(desc, req->iv, ivsize,
2002                                    LDST_CLASS_1_CCB |
2003 @@ -1225,8 +1360,16 @@ static struct aead_edesc *aead_edesc_all
2004                 }
2005         }
2006  
2007 +       /*
2008 +        * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
2009 +        * the end of the table by allocating more S/G entries.
2010 +        */
2011         sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
2012 -       sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
2013 +       if (mapped_dst_nents > 1)
2014 +               sec4_sg_len += ALIGN(mapped_dst_nents, 4);
2015 +       else
2016 +               sec4_sg_len = ALIGN(sec4_sg_len, 4);
2017 +
2018         sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
2019  
2020         /* allocate space for base edesc and hw desc commands, link tables */
2021 @@ -1307,6 +1450,72 @@ static int gcm_encrypt(struct aead_reque
2022         return ret;
2023  }
2024  
2025 +static int chachapoly_encrypt(struct aead_request *req)
2026 +{
2027 +       struct aead_edesc *edesc;
2028 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
2029 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
2030 +       struct device *jrdev = ctx->jrdev;
2031 +       bool all_contig;
2032 +       u32 *desc;
2033 +       int ret;
2034 +
2035 +       edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
2036 +                                true);
2037 +       if (IS_ERR(edesc))
2038 +               return PTR_ERR(edesc);
2039 +
2040 +       desc = edesc->hw_desc;
2041 +
2042 +       init_chachapoly_job(req, edesc, all_contig, true);
2043 +       print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
2044 +                            DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2045 +                            1);
2046 +
2047 +       ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2048 +       if (!ret) {
2049 +               ret = -EINPROGRESS;
2050 +       } else {
2051 +               aead_unmap(jrdev, edesc, req);
2052 +               kfree(edesc);
2053 +       }
2054 +
2055 +       return ret;
2056 +}
2057 +
2058 +static int chachapoly_decrypt(struct aead_request *req)
2059 +{
2060 +       struct aead_edesc *edesc;
2061 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
2062 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
2063 +       struct device *jrdev = ctx->jrdev;
2064 +       bool all_contig;
2065 +       u32 *desc;
2066 +       int ret;
2067 +
2068 +       edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
2069 +                                false);
2070 +       if (IS_ERR(edesc))
2071 +               return PTR_ERR(edesc);
2072 +
2073 +       desc = edesc->hw_desc;
2074 +
2075 +       init_chachapoly_job(req, edesc, all_contig, false);
2076 +       print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
2077 +                            DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2078 +                            1);
2079 +
2080 +       ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2081 +       if (!ret) {
2082 +               ret = -EINPROGRESS;
2083 +       } else {
2084 +               aead_unmap(jrdev, edesc, req);
2085 +               kfree(edesc);
2086 +       }
2087 +
2088 +       return ret;
2089 +}
2090 +
2091  static int ipsec_gcm_encrypt(struct aead_request *req)
2092  {
2093         if (req->assoclen < 8)
2094 @@ -1494,7 +1703,25 @@ static struct ablkcipher_edesc *ablkciph
2095  
2096         sec4_sg_ents = 1 + mapped_src_nents;
2097         dst_sg_idx = sec4_sg_ents;
2098 -       sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
2099 +
2100 +       /*
2101 +        * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
2102 +        * the end of the table by allocating more S/G entries. Logic:
2103 +        * if (src != dst && output S/G)
2104 +        *      pad output S/G, if needed
2105 +        * else if (src == dst && S/G)
2106 +        *      overlapping S/Gs; pad one of them
2107 +        * else if (input S/G) ...
2108 +        *      pad input S/G, if needed
2109 +        */
2110 +       if (mapped_dst_nents > 1)
2111 +               sec4_sg_ents += ALIGN(mapped_dst_nents, 4);
2112 +       else if ((req->src == req->dst) && (mapped_src_nents > 1))
2113 +               sec4_sg_ents = max(ALIGN(sec4_sg_ents, 4),
2114 +                                  1 + ALIGN(mapped_src_nents, 4));
2115 +       else
2116 +               sec4_sg_ents = ALIGN(sec4_sg_ents, 4);
2117 +
2118         sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
2119  
2120         /*
2121 @@ -3196,6 +3423,50 @@ static struct caam_aead_alg driver_aeads
2122                         .geniv = true,
2123                 },
2124         },
2125 +       {
2126 +               .aead = {
2127 +                       .base = {
2128 +                               .cra_name = "rfc7539(chacha20,poly1305)",
2129 +                               .cra_driver_name = "rfc7539-chacha20-poly1305-"
2130 +                                                  "caam",
2131 +                               .cra_blocksize = 1,
2132 +                       },
2133 +                       .setkey = chachapoly_setkey,
2134 +                       .setauthsize = chachapoly_setauthsize,
2135 +                       .encrypt = chachapoly_encrypt,
2136 +                       .decrypt = chachapoly_decrypt,
2137 +                       .ivsize = CHACHAPOLY_IV_SIZE,
2138 +                       .maxauthsize = POLY1305_DIGEST_SIZE,
2139 +               },
2140 +               .caam = {
2141 +                       .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2142 +                                          OP_ALG_AAI_AEAD,
2143 +                       .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2144 +                                          OP_ALG_AAI_AEAD,
2145 +               },
2146 +       },
2147 +       {
2148 +               .aead = {
2149 +                       .base = {
2150 +                               .cra_name = "rfc7539esp(chacha20,poly1305)",
2151 +                               .cra_driver_name = "rfc7539esp-chacha20-"
2152 +                                                  "poly1305-caam",
2153 +                               .cra_blocksize = 1,
2154 +                       },
2155 +                       .setkey = chachapoly_setkey,
2156 +                       .setauthsize = chachapoly_setauthsize,
2157 +                       .encrypt = chachapoly_encrypt,
2158 +                       .decrypt = chachapoly_decrypt,
2159 +                       .ivsize = 8,
2160 +                       .maxauthsize = POLY1305_DIGEST_SIZE,
2161 +               },
2162 +               .caam = {
2163 +                       .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2164 +                                          OP_ALG_AAI_AEAD,
2165 +                       .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2166 +                                          OP_ALG_AAI_AEAD,
2167 +               },
2168 +       },
2169  };
2170  
2171  struct caam_crypto_alg {
2172 @@ -3204,9 +3475,11 @@ struct caam_crypto_alg {
2173         struct caam_alg_entry caam;
2174  };
2175  
2176 -static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
2177 +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
2178 +                          bool uses_dkp)
2179  {
2180         dma_addr_t dma_addr;
2181 +       struct caam_drv_private *priv;
2182  
2183         ctx->jrdev = caam_jr_alloc();
2184         if (IS_ERR(ctx->jrdev)) {
2185 @@ -3214,10 +3487,16 @@ static int caam_init_common(struct caam_
2186                 return PTR_ERR(ctx->jrdev);
2187         }
2188  
2189 +       priv = dev_get_drvdata(ctx->jrdev->parent);
2190 +       if (priv->era >= 6 && uses_dkp)
2191 +               ctx->dir = DMA_BIDIRECTIONAL;
2192 +       else
2193 +               ctx->dir = DMA_TO_DEVICE;
2194 +
2195         dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
2196                                         offsetof(struct caam_ctx,
2197                                                  sh_desc_enc_dma),
2198 -                                       DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
2199 +                                       ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
2200         if (dma_mapping_error(ctx->jrdev, dma_addr)) {
2201                 dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
2202                 caam_jr_free(ctx->jrdev);
2203 @@ -3245,7 +3524,7 @@ static int caam_cra_init(struct crypto_t
2204                  container_of(alg, struct caam_crypto_alg, crypto_alg);
2205         struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2206  
2207 -       return caam_init_common(ctx, &caam_alg->caam);
2208 +       return caam_init_common(ctx, &caam_alg->caam, false);
2209  }
2210  
2211  static int caam_aead_init(struct crypto_aead *tfm)
2212 @@ -3255,14 +3534,15 @@ static int caam_aead_init(struct crypto_
2213                  container_of(alg, struct caam_aead_alg, aead);
2214         struct caam_ctx *ctx = crypto_aead_ctx(tfm);
2215  
2216 -       return caam_init_common(ctx, &caam_alg->caam);
2217 +       return caam_init_common(ctx, &caam_alg->caam,
2218 +                               alg->setkey == aead_setkey);
2219  }
2220  
2221  static void caam_exit_common(struct caam_ctx *ctx)
2222  {
2223         dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
2224                                offsetof(struct caam_ctx, sh_desc_enc_dma),
2225 -                              DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
2226 +                              ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
2227         caam_jr_free(ctx->jrdev);
2228  }
2229  
2230 @@ -3276,7 +3556,7 @@ static void caam_aead_exit(struct crypto
2231         caam_exit_common(crypto_aead_ctx(tfm));
2232  }
2233  
2234 -static void __exit caam_algapi_exit(void)
2235 +void caam_algapi_exit(void)
2236  {
2237  
2238         struct caam_crypto_alg *t_alg, *n;
2239 @@ -3355,56 +3635,52 @@ static void caam_aead_alg_init(struct ca
2240         alg->exit = caam_aead_exit;
2241  }
2242  
2243 -static int __init caam_algapi_init(void)
2244 +int caam_algapi_init(struct device *ctrldev)
2245  {
2246 -       struct device_node *dev_node;
2247 -       struct platform_device *pdev;
2248 -       struct device *ctrldev;
2249 -       struct caam_drv_private *priv;
2250 +       struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
2251         int i = 0, err = 0;
2252 -       u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
2253 +       u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
2254         unsigned int md_limit = SHA512_DIGEST_SIZE;
2255         bool registered = false;
2256  
2257 -       dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2258 -       if (!dev_node) {
2259 -               dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2260 -               if (!dev_node)
2261 -                       return -ENODEV;
2262 -       }
2263 -
2264 -       pdev = of_find_device_by_node(dev_node);
2265 -       if (!pdev) {
2266 -               of_node_put(dev_node);
2267 -               return -ENODEV;
2268 -       }
2269 -
2270 -       ctrldev = &pdev->dev;
2271 -       priv = dev_get_drvdata(ctrldev);
2272 -       of_node_put(dev_node);
2273 -
2274 -       /*
2275 -        * If priv is NULL, it's probably because the caam driver wasn't
2276 -        * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2277 -        */
2278 -       if (!priv)
2279 -               return -ENODEV;
2280 -
2281 -
2282         INIT_LIST_HEAD(&alg_list);
2283  
2284         /*
2285          * Register crypto algorithms the device supports.
2286          * First, detect presence and attributes of DES, AES, and MD blocks.
2287          */
2288 -       cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2289 -       cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2290 -       des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
2291 -       aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
2292 -       md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2293 +       if (priv->era < 10) {
2294 +               u32 cha_vid, cha_inst;
2295 +
2296 +               cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2297 +               aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
2298 +               md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2299 +
2300 +               cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2301 +               des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
2302 +                          CHA_ID_LS_DES_SHIFT;
2303 +               aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
2304 +               md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2305 +               ccha_inst = 0;
2306 +               ptha_inst = 0;
2307 +       } else {
2308 +               u32 aesa, mdha;
2309 +
2310 +               aesa = rd_reg32(&priv->ctrl->vreg.aesa);
2311 +               mdha = rd_reg32(&priv->ctrl->vreg.mdha);
2312 +
2313 +               aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
2314 +               md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
2315 +
2316 +               des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
2317 +               aes_inst = aesa & CHA_VER_NUM_MASK;
2318 +               md_inst = mdha & CHA_VER_NUM_MASK;
2319 +               ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
2320 +               ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
2321 +       }
2322  
2323         /* If MD is present, limit digest size based on LP256 */
2324 -       if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
2325 +       if (md_inst && md_vid  == CHA_VER_VID_MD_LP256)
2326                 md_limit = SHA256_DIGEST_SIZE;
2327  
2328         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2329 @@ -3426,10 +3702,10 @@ static int __init caam_algapi_init(void)
2330                  * Check support for AES modes not available
2331                  * on LP devices.
2332                  */
2333 -               if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
2334 -                       if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
2335 -                            OP_ALG_AAI_XTS)
2336 -                               continue;
2337 +               if (aes_vid == CHA_VER_VID_AES_LP &&
2338 +                   (alg->class1_alg_type & OP_ALG_AAI_MASK) ==
2339 +                   OP_ALG_AAI_XTS)
2340 +                       continue;
2341  
2342                 t_alg = caam_alg_alloc(alg);
2343                 if (IS_ERR(t_alg)) {
2344 @@ -3468,21 +3744,28 @@ static int __init caam_algapi_init(void)
2345                 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
2346                                 continue;
2347  
2348 +               /* Skip CHACHA20 algorithms if not supported by device */
2349 +               if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && !ccha_inst)
2350 +                       continue;
2351 +
2352 +               /* Skip POLY1305 algorithms if not supported by device */
2353 +               if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst)
2354 +                       continue;
2355 +
2356                 /*
2357                  * Check support for AES algorithms not available
2358                  * on LP devices.
2359                  */
2360 -               if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
2361 -                       if (alg_aai == OP_ALG_AAI_GCM)
2362 -                               continue;
2363 +               if (aes_vid  == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
2364 +                       continue;
2365  
2366                 /*
2367                  * Skip algorithms requiring message digests
2368                  * if MD or MD size is not supported by device.
2369                  */
2370 -               if (c2_alg_sel &&
2371 -                   (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
2372 -                               continue;
2373 +               if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
2374 +                   (!md_inst || t_alg->aead.maxauthsize > md_limit))
2375 +                       continue;
2376  
2377                 caam_aead_alg_init(t_alg);
2378  
2379 @@ -3502,10 +3785,3 @@ static int __init caam_algapi_init(void)
2380  
2381         return err;
2382  }
2383 -
2384 -module_init(caam_algapi_init);
2385 -module_exit(caam_algapi_exit);
2386 -
2387 -MODULE_LICENSE("GPL");
2388 -MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2389 -MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
2390 --- a/drivers/crypto/caam/caamalg_desc.c
2391 +++ b/drivers/crypto/caam/caamalg_desc.c
2392 @@ -45,16 +45,16 @@ static inline void append_dec_op1(u32 *d
2393   * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
2394   *                               (non-protocol) with no (null) encryption.
2395   * @desc: pointer to buffer used for descriptor construction
2396 - * @adata: pointer to authentication transform definitions. Note that since a
2397 - *         split key is to be used, the size of the split key itself is
2398 - *         specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2399 - *         SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2400 + * @adata: pointer to authentication transform definitions.
2401 + *         A split key is required for SEC Era < 6; the size of the split key
2402 + *         is specified in this case. Valid algorithm values - one of
2403 + *         OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2404 + *         with OP_ALG_AAI_HMAC_PRECOMP.
2405   * @icvsize: integrity check value (ICV) size (truncated or full)
2406 - *
2407 - * Note: Requires an MDHA split key.
2408 + * @era: SEC Era
2409   */
2410  void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
2411 -                                unsigned int icvsize)
2412 +                                unsigned int icvsize, int era)
2413  {
2414         u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
2415  
2416 @@ -63,13 +63,18 @@ void cnstr_shdsc_aead_null_encap(u32 * c
2417         /* Skip if already shared */
2418         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2419                                    JUMP_COND_SHRD);
2420 -       if (adata->key_inline)
2421 -               append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
2422 -                                 adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
2423 -                                 KEY_ENC);
2424 -       else
2425 -               append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
2426 -                          KEY_DEST_MDHA_SPLIT | KEY_ENC);
2427 +       if (era < 6) {
2428 +               if (adata->key_inline)
2429 +                       append_key_as_imm(desc, adata->key_virt,
2430 +                                         adata->keylen_pad, adata->keylen,
2431 +                                         CLASS_2 | KEY_DEST_MDHA_SPLIT |
2432 +                                         KEY_ENC);
2433 +               else
2434 +                       append_key(desc, adata->key_dma, adata->keylen,
2435 +                                  CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
2436 +       } else {
2437 +               append_proto_dkp(desc, adata);
2438 +       }
2439         set_jump_tgt_here(desc, key_jump_cmd);
2440  
2441         /* assoclen + cryptlen = seqinlen */
2442 @@ -121,16 +126,16 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_enca
2443   * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
2444   *                               (non-protocol) with no (null) decryption.
2445   * @desc: pointer to buffer used for descriptor construction
2446 - * @adata: pointer to authentication transform definitions. Note that since a
2447 - *         split key is to be used, the size of the split key itself is
2448 - *         specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2449 - *         SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2450 + * @adata: pointer to authentication transform definitions.
2451 + *         A split key is required for SEC Era < 6; the size of the split key
2452 + *         is specified in this case. Valid algorithm values - one of
2453 + *         OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2454 + *         with OP_ALG_AAI_HMAC_PRECOMP.
2455   * @icvsize: integrity check value (ICV) size (truncated or full)
2456 - *
2457 - * Note: Requires an MDHA split key.
2458 + * @era: SEC Era
2459   */
2460  void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
2461 -                                unsigned int icvsize)
2462 +                                unsigned int icvsize, int era)
2463  {
2464         u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
2465  
2466 @@ -139,13 +144,18 @@ void cnstr_shdsc_aead_null_decap(u32 * c
2467         /* Skip if already shared */
2468         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2469                                    JUMP_COND_SHRD);
2470 -       if (adata->key_inline)
2471 -               append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
2472 -                                 adata->keylen, CLASS_2 |
2473 -                                 KEY_DEST_MDHA_SPLIT | KEY_ENC);
2474 -       else
2475 -               append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
2476 -                          KEY_DEST_MDHA_SPLIT | KEY_ENC);
2477 +       if (era < 6) {
2478 +               if (adata->key_inline)
2479 +                       append_key_as_imm(desc, adata->key_virt,
2480 +                                         adata->keylen_pad, adata->keylen,
2481 +                                         CLASS_2 | KEY_DEST_MDHA_SPLIT |
2482 +                                         KEY_ENC);
2483 +               else
2484 +                       append_key(desc, adata->key_dma, adata->keylen,
2485 +                                  CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
2486 +       } else {
2487 +               append_proto_dkp(desc, adata);
2488 +       }
2489         set_jump_tgt_here(desc, key_jump_cmd);
2490  
2491         /* Class 2 operation */
2492 @@ -204,7 +214,7 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_deca
2493  static void init_sh_desc_key_aead(u32 * const desc,
2494                                   struct alginfo * const cdata,
2495                                   struct alginfo * const adata,
2496 -                                 const bool is_rfc3686, u32 *nonce)
2497 +                                 const bool is_rfc3686, u32 *nonce, int era)
2498  {
2499         u32 *key_jump_cmd;
2500         unsigned int enckeylen = cdata->keylen;
2501 @@ -224,13 +234,18 @@ static void init_sh_desc_key_aead(u32 *
2502         if (is_rfc3686)
2503                 enckeylen -= CTR_RFC3686_NONCE_SIZE;
2504  
2505 -       if (adata->key_inline)
2506 -               append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
2507 -                                 adata->keylen, CLASS_2 |
2508 -                                 KEY_DEST_MDHA_SPLIT | KEY_ENC);
2509 -       else
2510 -               append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
2511 -                          KEY_DEST_MDHA_SPLIT | KEY_ENC);
2512 +       if (era < 6) {
2513 +               if (adata->key_inline)
2514 +                       append_key_as_imm(desc, adata->key_virt,
2515 +                                         adata->keylen_pad, adata->keylen,
2516 +                                         CLASS_2 | KEY_DEST_MDHA_SPLIT |
2517 +                                         KEY_ENC);
2518 +               else
2519 +                       append_key(desc, adata->key_dma, adata->keylen,
2520 +                                  CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
2521 +       } else {
2522 +               append_proto_dkp(desc, adata);
2523 +       }
2524  
2525         if (cdata->key_inline)
2526                 append_key_as_imm(desc, cdata->key_virt, enckeylen,
2527 @@ -261,26 +276,27 @@ static void init_sh_desc_key_aead(u32 *
2528   * @cdata: pointer to block cipher transform definitions
2529   *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
2530   *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
2531 - * @adata: pointer to authentication transform definitions. Note that since a
2532 - *         split key is to be used, the size of the split key itself is
2533 - *         specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2534 - *         SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2535 + * @adata: pointer to authentication transform definitions.
2536 + *         A split key is required for SEC Era < 6; the size of the split key
2537 + *         is specified in this case. Valid algorithm values - one of
2538 + *         OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2539 + *         with OP_ALG_AAI_HMAC_PRECOMP.
2540   * @ivsize: initialization vector size
2541   * @icvsize: integrity check value (ICV) size (truncated or full)
2542   * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
2543   * @nonce: pointer to rfc3686 nonce
2544   * @ctx1_iv_off: IV offset in CONTEXT1 register
2545   * @is_qi: true when called from caam/qi
2546 - *
2547 - * Note: Requires an MDHA split key.
2548 + * @era: SEC Era
2549   */
2550  void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
2551                             struct alginfo *adata, unsigned int ivsize,
2552                             unsigned int icvsize, const bool is_rfc3686,
2553 -                           u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
2554 +                           u32 *nonce, const u32 ctx1_iv_off, const bool is_qi,
2555 +                           int era)
2556  {
2557         /* Note: Context registers are saved. */
2558 -       init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
2559 +       init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
2560  
2561         /* Class 2 operation */
2562         append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2563 @@ -306,8 +322,13 @@ void cnstr_shdsc_aead_encap(u32 * const
2564         }
2565  
2566         /* Read and write assoclen bytes */
2567 -       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2568 -       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2569 +       if (is_qi || era < 3) {
2570 +               append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2571 +               append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2572 +       } else {
2573 +               append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2574 +               append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2575 +       }
2576  
2577         /* Skip assoc data */
2578         append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
2579 @@ -350,27 +371,27 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
2580   * @cdata: pointer to block cipher transform definitions
2581   *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
2582   *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
2583 - * @adata: pointer to authentication transform definitions. Note that since a
2584 - *         split key is to be used, the size of the split key itself is
2585 - *         specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2586 - *         SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2587 + * @adata: pointer to authentication transform definitions.
2588 + *         A split key is required for SEC Era < 6; the size of the split key
2589 + *         is specified in this case. Valid algorithm values - one of
2590 + *         OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2591 + *         with OP_ALG_AAI_HMAC_PRECOMP.
2592   * @ivsize: initialization vector size
2593   * @icvsize: integrity check value (ICV) size (truncated or full)
2594   * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
2595   * @nonce: pointer to rfc3686 nonce
2596   * @ctx1_iv_off: IV offset in CONTEXT1 register
2597   * @is_qi: true when called from caam/qi
2598 - *
2599 - * Note: Requires an MDHA split key.
2600 + * @era: SEC Era
2601   */
2602  void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
2603                             struct alginfo *adata, unsigned int ivsize,
2604                             unsigned int icvsize, const bool geniv,
2605                             const bool is_rfc3686, u32 *nonce,
2606 -                           const u32 ctx1_iv_off, const bool is_qi)
2607 +                           const u32 ctx1_iv_off, const bool is_qi, int era)
2608  {
2609         /* Note: Context registers are saved. */
2610 -       init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
2611 +       init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
2612  
2613         /* Class 2 operation */
2614         append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2615 @@ -397,11 +418,23 @@ void cnstr_shdsc_aead_decap(u32 * const
2616         }
2617  
2618         /* Read and write assoclen bytes */
2619 -       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2620 -       if (geniv)
2621 -               append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
2622 -       else
2623 -               append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2624 +       if (is_qi || era < 3) {
2625 +               append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2626 +               if (geniv)
2627 +                       append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM,
2628 +                                               ivsize);
2629 +               else
2630 +                       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3,
2631 +                                       CAAM_CMD_SZ);
2632 +       } else {
2633 +               append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2634 +               if (geniv)
2635 +                       append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM,
2636 +                                               ivsize);
2637 +               else
2638 +                       append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD,
2639 +                                       CAAM_CMD_SZ);
2640 +       }
2641  
2642         /* Skip assoc data */
2643         append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
2644 @@ -456,29 +489,29 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
2645   * @cdata: pointer to block cipher transform definitions
2646   *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
2647   *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
2648 - * @adata: pointer to authentication transform definitions. Note that since a
2649 - *         split key is to be used, the size of the split key itself is
2650 - *         specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2651 - *         SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2652 + * @adata: pointer to authentication transform definitions.
2653 + *         A split key is required for SEC Era < 6; the size of the split key
2654 + *         is specified in this case. Valid algorithm values - one of
2655 + *         OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2656 + *         with OP_ALG_AAI_HMAC_PRECOMP.
2657   * @ivsize: initialization vector size
2658   * @icvsize: integrity check value (ICV) size (truncated or full)
2659   * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
2660   * @nonce: pointer to rfc3686 nonce
2661   * @ctx1_iv_off: IV offset in CONTEXT1 register
2662   * @is_qi: true when called from caam/qi
2663 - *
2664 - * Note: Requires an MDHA split key.
2665 + * @era: SEC Era
2666   */
2667  void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
2668                                struct alginfo *adata, unsigned int ivsize,
2669                                unsigned int icvsize, const bool is_rfc3686,
2670                                u32 *nonce, const u32 ctx1_iv_off,
2671 -                              const bool is_qi)
2672 +                              const bool is_qi, int era)
2673  {
2674         u32 geniv, moveiv;
2675  
2676         /* Note: Context registers are saved. */
2677 -       init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
2678 +       init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
2679  
2680         if (is_qi) {
2681                 u32 *wait_load_cmd;
2682 @@ -528,8 +561,13 @@ copy_iv:
2683                          OP_ALG_ENCRYPT);
2684  
2685         /* Read and write assoclen bytes */
2686 -       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2687 -       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2688 +       if (is_qi || era < 3) {
2689 +               append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2690 +               append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2691 +       } else {
2692 +               append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2693 +               append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2694 +       }
2695  
2696         /* Skip assoc data */
2697         append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
2698 @@ -583,14 +621,431 @@ copy_iv:
2699  EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
2700  
2701  /**
2702 + * cnstr_shdsc_tls_encap - tls encapsulation shared descriptor
2703 + * @desc: pointer to buffer used for descriptor construction
2704 + * @cdata: pointer to block cipher transform definitions
2705 + *         Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
2706 + *         with OP_ALG_AAI_CBC
2707 + * @adata: pointer to authentication transform definitions.
2708 + *         A split key is required for SEC Era < 6; the size of the split key
2709 + *         is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
2710 + *         ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2711 + * @assoclen: associated data length
2712 + * @ivsize: initialization vector size
2713 + * @authsize: authentication data size
2714 + * @blocksize: block cipher size
2715 + * @era: SEC Era
2716 + */
2717 +void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
2718 +                          struct alginfo *adata, unsigned int assoclen,
2719 +                          unsigned int ivsize, unsigned int authsize,
2720 +                          unsigned int blocksize, int era)
2721 +{
2722 +       u32 *key_jump_cmd, *zero_payload_jump_cmd;
2723 +       u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
2724 +
2725 +       /*
2726 +        * Compute the index (in bytes) for the LOAD with destination of
2727 +        * Class 1 Data Size Register and for the LOAD that generates padding
2728 +        */
2729 +       if (adata->key_inline) {
2730 +               idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad +
2731 +                               cdata->keylen - 4 * CAAM_CMD_SZ;
2732 +               idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad +
2733 +                            cdata->keylen - 2 * CAAM_CMD_SZ;
2734 +       } else {
2735 +               idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
2736 +                               4 * CAAM_CMD_SZ;
2737 +               idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
2738 +                            2 * CAAM_CMD_SZ;
2739 +       }
2740 +
2741 +       stidx = 1 << HDR_START_IDX_SHIFT;
2742 +       init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
2743 +
2744 +       /* skip key loading if they are loaded due to sharing */
2745 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2746 +                                  JUMP_COND_SHRD);
2747 +
2748 +       if (era < 6) {
2749 +               if (adata->key_inline)
2750 +                       append_key_as_imm(desc, adata->key_virt,
2751 +                                         adata->keylen_pad, adata->keylen,
2752 +                                         CLASS_2 | KEY_DEST_MDHA_SPLIT |
2753 +                                         KEY_ENC);
2754 +               else
2755 +                       append_key(desc, adata->key_dma, adata->keylen,
2756 +                                  CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
2757 +       } else {
2758 +               append_proto_dkp(desc, adata);
2759 +       }
2760 +
2761 +       if (cdata->key_inline)
2762 +               append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
2763 +                                 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
2764 +       else
2765 +               append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
2766 +                          KEY_DEST_CLASS_REG);
2767 +
2768 +       set_jump_tgt_here(desc, key_jump_cmd);
2769 +
2770 +       /* class 2 operation */
2771 +       append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2772 +                        OP_ALG_ENCRYPT);
2773 +       /* class 1 operation */
2774 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
2775 +                        OP_ALG_ENCRYPT);
2776 +
2777 +       /* payloadlen = input data length - (assoclen + ivlen) */
2778 +       append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize);
2779 +
2780 +       /* math1 = payloadlen + icvlen */
2781 +       append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize);
2782 +
2783 +       /* padlen = block_size - math1 % block_size */
2784 +       append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
2785 +       append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
2786 +
2787 +       /* cryptlen = payloadlen + icvlen + padlen */
2788 +       append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
2789 +
2790 +       /*
2791 +        * update immediate data with the padding length value
2792 +        * for the LOAD in the class 1 data size register.
2793 +        */
2794 +       append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
2795 +                       (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7);
2796 +       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
2797 +                       (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8);
2798 +
2799 +       /* overwrite PL field for the padding iNFO FIFO entry  */
2800 +       append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
2801 +                       (idx_ld_pad << MOVE_OFFSET_SHIFT) | 7);
2802 +       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
2803 +                       (idx_ld_pad << MOVE_OFFSET_SHIFT) | 8);
2804 +
2805 +       /* store encrypted payload, icv and padding */
2806 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
2807 +
2808 +       /* if payload length is zero, jump to zero-payload commands */
2809 +       append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4);
2810 +       zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
2811 +                                           JUMP_COND_MATH_Z);
2812 +
2813 +       /* load iv in context1 */
2814 +       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
2815 +                  LDST_CLASS_1_CCB | ivsize);
2816 +
2817 +       /* read assoc for authentication */
2818 +       append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
2819 +                            FIFOLD_TYPE_MSG);
2820 +       /* insnoop payload */
2821 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
2822 +                            FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
2823 +
2824 +       /* jump the zero-payload commands */
2825 +       append_jump(desc, JUMP_TEST_ALL | 3);
2826 +
2827 +       /* zero-payload commands */
2828 +       set_jump_tgt_here(desc, zero_payload_jump_cmd);
2829 +
2830 +       /* load iv in context1 */
2831 +       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
2832 +                  LDST_CLASS_1_CCB | ivsize);
2833 +
2834 +       /* assoc data is the only data for authentication */
2835 +       append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
2836 +                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
2837 +
2838 +       /* send icv to encryption */
2839 +       append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
2840 +                   authsize);
2841 +
2842 +       /* update class 1 data size register with padding length */
2843 +       append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB |
2844 +                           LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
2845 +
2846 +       /* generate padding and send it to encryption */
2847 +       genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
2848 +             NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
2849 +       append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
2850 +                           LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
2851 +
2852 +#ifdef DEBUG
2853 +       print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ",
2854 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
2855 +                      desc_bytes(desc), 1);
2856 +#endif
2857 +}
2858 +EXPORT_SYMBOL(cnstr_shdsc_tls_encap);
2859 +
2860 +/**
2861 + * cnstr_shdsc_tls_decap - tls decapsulation shared descriptor
2862 + * @desc: pointer to buffer used for descriptor construction
2863 + * @cdata: pointer to block cipher transform definitions
2864 + *         Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
2865 + *         with OP_ALG_AAI_CBC
2866 + * @adata: pointer to authentication transform definitions.
2867 + *         A split key is required for SEC Era < 6; the size of the split key
2868 + *         is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
2869 + *         ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2870 + * @assoclen: associated data length
2871 + * @ivsize: initialization vector size
2872 + * @authsize: authentication data size
2873 + * @blocksize: block cipher size
2874 + * @era: SEC Era
2875 + */
2876 +void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
2877 +                          struct alginfo *adata, unsigned int assoclen,
2878 +                          unsigned int ivsize, unsigned int authsize,
2879 +                          unsigned int blocksize, int era)
2880 +{
2881 +       u32 stidx, jumpback;
2882 +       u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
2883 +       /*
2884 +        * Pointer Size bool determines the size of address pointers.
2885 +        * false - Pointers fit in one 32-bit word.
2886 +        * true - Pointers fit in two 32-bit words.
2887 +        */
2888 +       static const bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ);
2889 +
2890 +       stidx = 1 << HDR_START_IDX_SHIFT;
2891 +       init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
2892 +
2893 +       /* skip key loading if they are loaded due to sharing */
2894 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2895 +                                  JUMP_COND_SHRD);
2896 +
2897 +       if (era < 6)
2898 +               append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
2899 +                          KEY_DEST_MDHA_SPLIT | KEY_ENC);
2900 +       else
2901 +               append_proto_dkp(desc, adata);
2902 +
2903 +       append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
2904 +                  KEY_DEST_CLASS_REG);
2905 +
2906 +       set_jump_tgt_here(desc, key_jump_cmd);
2907 +
2908 +       /* class 2 operation */
2909 +       append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2910 +                        OP_ALG_DECRYPT | OP_ALG_ICV_ON);
2911 +       /* class 1 operation */
2912 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
2913 +                        OP_ALG_DECRYPT);
2914 +
2915 +       /* VSIL = input data length - 2 * block_size */
2916 +       append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
2917 +                               blocksize);
2918 +
2919 +       /*
2920 +        * payloadlen + icvlen + padlen = input data length - (assoclen +
2921 +        * ivsize)
2922 +        */
2923 +       append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize);
2924 +
2925 +       /* skip data to the last but one cipher block */
2926 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
2927 +
2928 +       /* load iv for the last cipher block */
2929 +       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
2930 +                  LDST_CLASS_1_CCB | ivsize);
2931 +
2932 +       /* read last cipher block */
2933 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
2934 +                            FIFOLD_TYPE_LAST1 | blocksize);
2935 +
2936 +       /* move decrypted block into math0 and math1 */
2937 +       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
2938 +                   blocksize);
2939 +
2940 +       /* reset AES CHA */
2941 +       append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB |
2942 +                           LDST_SRCDST_WORD_CHACTRL | LDST_IMM);
2943 +
2944 +       /* rewind input sequence */
2945 +       append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
2946 +
2947 +       /* key1 is in decryption form */
2948 +       append_operation(desc, cdata->algtype | OP_ALG_AAI_DK |
2949 +                        OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
2950 +
2951 +       /* load iv in context1 */
2952 +       append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
2953 +                  LDST_SRCDST_WORD_CLASS_CTX | ivsize);
2954 +
2955 +       /* read sequence number */
2956 +       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
2957 +       /* load Type, Version and Len fields in math0 */
2958 +       append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
2959 +                  LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5);
2960 +
2961 +       /* compute (padlen - 1) */
2962 +       append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
2963 +
2964 +       /* math2 = icvlen + (padlen - 1) + 1 */
2965 +       append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1);
2966 +
2967 +       append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
2968 +
2969 +       /* VSOL = payloadlen + icvlen + padlen */
2970 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
2971 +
2972 +       if (caam_little_end)
2973 +               append_moveb(desc, MOVE_WAITCOMP |
2974 +                            MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
2975 +
2976 +       /* update Len field */
2977 +       append_math_sub(desc, REG0, REG0, REG2, 8);
2978 +
2979 +       /* store decrypted payload, icv and padding */
2980 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
2981 +
2982 +       /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
2983 +       append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
2984 +
2985 +       zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
2986 +                                           JUMP_COND_MATH_Z);
2987 +
2988 +       /* send Type, Version and Len(pre ICV) fields to authentication */
2989 +       append_move(desc, MOVE_WAITCOMP |
2990 +                   MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
2991 +                   (3 << MOVE_OFFSET_SHIFT) | 5);
2992 +
2993 +       /* outsnooping payload */
2994 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
2995 +                            FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
2996 +                            FIFOLDST_VLF);
2997 +       skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2);
2998 +
2999 +       set_jump_tgt_here(desc, zero_payload_jump_cmd);
3000 +       /* send Type, Version and Len(pre ICV) fields to authentication */
3001 +       append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS |
3002 +                   MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
3003 +                   (3 << MOVE_OFFSET_SHIFT) | 5);
3004 +
3005 +       set_jump_tgt_here(desc, skip_zero_jump_cmd);
3006 +       append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
3007 +
3008 +       /* load icvlen and padlen */
3009 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
3010 +                            FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
3011 +
3012 +       /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
3013 +       append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
3014 +
3015 +       /*
3016 +        * Start a new input sequence using the SEQ OUT PTR command options,
3017 +        * pointer and length used when the current output sequence was defined.
3018 +        */
3019 +       if (ps) {
3020 +               /*
3021 +                * Move the lower 32 bits of Shared Descriptor address, the
3022 +                * SEQ OUT PTR command, Output Pointer (2 words) and
3023 +                * Output Length into math registers.
3024 +                */
3025 +               if (caam_little_end)
3026 +                       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
3027 +                                   MOVE_DEST_MATH0 |
3028 +                                   (55 * 4 << MOVE_OFFSET_SHIFT) | 20);
3029 +               else
3030 +                       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
3031 +                                   MOVE_DEST_MATH0 |
3032 +                                   (54 * 4 << MOVE_OFFSET_SHIFT) | 20);
3033 +
3034 +               /* Transform SEQ OUT PTR command in SEQ IN PTR command */
3035 +               append_math_and_imm_u32(desc, REG0, REG0, IMM,
3036 +                                       ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
3037 +               /* Append a JUMP command after the copied fields */
3038 +               jumpback = CMD_JUMP | (char)-9;
3039 +               append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
3040 +                                   LDST_SRCDST_WORD_DECO_MATH2 |
3041 +                                   (4 << LDST_OFFSET_SHIFT));
3042 +               append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
3043 +               /* Move the updated fields back to the Job Descriptor */
3044 +               if (caam_little_end)
3045 +                       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
3046 +                                   MOVE_DEST_DESCBUF |
3047 +                                   (55 * 4 << MOVE_OFFSET_SHIFT) | 24);
3048 +               else
3049 +                       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
3050 +                                   MOVE_DEST_DESCBUF |
3051 +                                   (54 * 4 << MOVE_OFFSET_SHIFT) | 24);
3052 +
3053 +               /*
3054 +                * Read the new SEQ IN PTR command, Input Pointer, Input Length
3055 +                * and then jump back to the next command from the
3056 +                * Shared Descriptor.
3057 +                */
3058 +               append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
3059 +       } else {
3060 +               /*
3061 +                * Move the SEQ OUT PTR command, Output Pointer (1 word) and
3062 +                * Output Length into math registers.
3063 +                */
3064 +               if (caam_little_end)
3065 +                       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
3066 +                                   MOVE_DEST_MATH0 |
3067 +                                   (54 * 4 << MOVE_OFFSET_SHIFT) | 12);
3068 +               else
3069 +                       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
3070 +                                   MOVE_DEST_MATH0 |
3071 +                                   (53 * 4 << MOVE_OFFSET_SHIFT) | 12);
3072 +
3073 +               /* Transform SEQ OUT PTR command in SEQ IN PTR command */
3074 +               append_math_and_imm_u64(desc, REG0, REG0, IMM,
3075 +                                       ~(((u64)(CMD_SEQ_IN_PTR ^
3076 +                                                CMD_SEQ_OUT_PTR)) << 32));
3077 +               /* Append a JUMP command after the copied fields */
3078 +               jumpback = CMD_JUMP | (char)-7;
3079 +               append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
3080 +                                   LDST_SRCDST_WORD_DECO_MATH1 |
3081 +                                   (4 << LDST_OFFSET_SHIFT));
3082 +               append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
3083 +               /* Move the updated fields back to the Job Descriptor */
3084 +               if (caam_little_end)
3085 +                       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
3086 +                                   MOVE_DEST_DESCBUF |
3087 +                                   (54 * 4 << MOVE_OFFSET_SHIFT) | 16);
3088 +               else
3089 +                       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
3090 +                                   MOVE_DEST_DESCBUF |
3091 +                                   (53 * 4 << MOVE_OFFSET_SHIFT) | 16);
3092 +
3093 +               /*
3094 +                * Read the new SEQ IN PTR command, Input Pointer, Input Length
3095 +                * and then jump back to the next command from the
3096 +                * Shared Descriptor.
3097 +                */
3098 +                append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5);
3099 +       }
3100 +
3101 +       /* skip payload */
3102 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
3103 +       /* check icv */
3104 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
3105 +                            FIFOLD_TYPE_LAST2 | authsize);
3106 +
3107 +#ifdef DEBUG
3108 +       print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ",
3109 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
3110 +                      desc_bytes(desc), 1);
3111 +#endif
3112 +}
3113 +EXPORT_SYMBOL(cnstr_shdsc_tls_decap);
3114 +
3115 +/**
3116   * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
3117   * @desc: pointer to buffer used for descriptor construction
3118   * @cdata: pointer to block cipher transform definitions
3119   *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3120 + * @ivsize: initialization vector size
3121   * @icvsize: integrity check value (ICV) size (truncated or full)
3122 + * @is_qi: true when called from caam/qi
3123   */
3124  void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
3125 -                          unsigned int icvsize)
3126 +                          unsigned int ivsize, unsigned int icvsize,
3127 +                          const bool is_qi)
3128  {
3129         u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
3130             *zero_assoc_jump_cmd2;
3131 @@ -612,11 +1067,35 @@ void cnstr_shdsc_gcm_encap(u32 * const d
3132         append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3133                          OP_ALG_ENCRYPT);
3134  
3135 +       if (is_qi) {
3136 +               u32 *wait_load_cmd;
3137 +
3138 +               /* REG3 = assoclen */
3139 +               append_seq_load(desc, 4, LDST_CLASS_DECO |
3140 +                               LDST_SRCDST_WORD_DECO_MATH3 |
3141 +                               (4 << LDST_OFFSET_SHIFT));
3142 +
3143 +               wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3144 +                                           JUMP_COND_CALM | JUMP_COND_NCP |
3145 +                                           JUMP_COND_NOP | JUMP_COND_NIP |
3146 +                                           JUMP_COND_NIFP);
3147 +               set_jump_tgt_here(desc, wait_load_cmd);
3148 +
3149 +               append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
3150 +                                       ivsize);
3151 +       } else {
3152 +               append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
3153 +                               CAAM_CMD_SZ);
3154 +       }
3155 +
3156         /* if assoclen + cryptlen is ZERO, skip to ICV write */
3157 -       append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
3158         zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
3159                                                  JUMP_COND_MATH_Z);
3160  
3161 +       if (is_qi)
3162 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3163 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3164 +
3165         /* if assoclen is ZERO, skip reading the assoc data */
3166         append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
3167         zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
3168 @@ -648,8 +1127,11 @@ void cnstr_shdsc_gcm_encap(u32 * const d
3169         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
3170                              FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
3171  
3172 -       /* jump the zero-payload commands */
3173 -       append_jump(desc, JUMP_TEST_ALL | 2);
3174 +       /* jump to ICV writing */
3175 +       if (is_qi)
3176 +               append_jump(desc, JUMP_TEST_ALL | 4);
3177 +       else
3178 +               append_jump(desc, JUMP_TEST_ALL | 2);
3179  
3180         /* zero-payload commands */
3181         set_jump_tgt_here(desc, zero_payload_jump_cmd);
3182 @@ -657,10 +1139,18 @@ void cnstr_shdsc_gcm_encap(u32 * const d
3183         /* read assoc data */
3184         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
3185                              FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
3186 +       if (is_qi)
3187 +               /* jump to ICV writing */
3188 +               append_jump(desc, JUMP_TEST_ALL | 2);
3189  
3190         /* There is no input data */
3191         set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
3192  
3193 +       if (is_qi)
3194 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3195 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
3196 +                                    FIFOLD_TYPE_LAST1);
3197 +
3198         /* write ICV */
3199         append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
3200                          LDST_SRCDST_BYTE_CONTEXT);
3201 @@ -677,10 +1167,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
3202   * @desc: pointer to buffer used for descriptor construction
3203   * @cdata: pointer to block cipher transform definitions
3204   *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3205 + * @ivsize: initialization vector size
3206   * @icvsize: integrity check value (ICV) size (truncated or full)
3207 + * @is_qi: true when called from caam/qi
3208   */
3209  void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
3210 -                          unsigned int icvsize)
3211 +                          unsigned int ivsize, unsigned int icvsize,
3212 +                          const bool is_qi)
3213  {
3214         u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
3215  
3216 @@ -701,6 +1194,24 @@ void cnstr_shdsc_gcm_decap(u32 * const d
3217         append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3218                          OP_ALG_DECRYPT | OP_ALG_ICV_ON);
3219  
3220 +       if (is_qi) {
3221 +               u32 *wait_load_cmd;
3222 +
3223 +               /* REG3 = assoclen */
3224 +               append_seq_load(desc, 4, LDST_CLASS_DECO |
3225 +                               LDST_SRCDST_WORD_DECO_MATH3 |
3226 +                               (4 << LDST_OFFSET_SHIFT));
3227 +
3228 +               wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3229 +                                           JUMP_COND_CALM | JUMP_COND_NCP |
3230 +                                           JUMP_COND_NOP | JUMP_COND_NIP |
3231 +                                           JUMP_COND_NIFP);
3232 +               set_jump_tgt_here(desc, wait_load_cmd);
3233 +
3234 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3235 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3236 +       }
3237 +
3238         /* if assoclen is ZERO, skip reading the assoc data */
3239         append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
3240         zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
3241 @@ -753,10 +1264,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
3242   * @desc: pointer to buffer used for descriptor construction
3243   * @cdata: pointer to block cipher transform definitions
3244   *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3245 + * @ivsize: initialization vector size
3246   * @icvsize: integrity check value (ICV) size (truncated or full)
3247 + * @is_qi: true when called from caam/qi
3248   */
3249  void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
3250 -                              unsigned int icvsize)
3251 +                              unsigned int ivsize, unsigned int icvsize,
3252 +                              const bool is_qi)
3253  {
3254         u32 *key_jump_cmd;
3255  
3256 @@ -777,7 +1291,29 @@ void cnstr_shdsc_rfc4106_encap(u32 * con
3257         append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3258                          OP_ALG_ENCRYPT);
3259  
3260 -       append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
3261 +       if (is_qi) {
3262 +               u32 *wait_load_cmd;
3263 +
3264 +               /* REG3 = assoclen */
3265 +               append_seq_load(desc, 4, LDST_CLASS_DECO |
3266 +                               LDST_SRCDST_WORD_DECO_MATH3 |
3267 +                               (4 << LDST_OFFSET_SHIFT));
3268 +
3269 +               wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3270 +                                           JUMP_COND_CALM | JUMP_COND_NCP |
3271 +                                           JUMP_COND_NOP | JUMP_COND_NIP |
3272 +                                           JUMP_COND_NIFP);
3273 +               set_jump_tgt_here(desc, wait_load_cmd);
3274 +
3275 +               /* Read salt and IV */
3276 +               append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
3277 +                                       cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
3278 +                                       FIFOLD_TYPE_IV);
3279 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3280 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3281 +       }
3282 +
3283 +       append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
3284         append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
3285  
3286         /* Read assoc data */
3287 @@ -785,7 +1321,7 @@ void cnstr_shdsc_rfc4106_encap(u32 * con
3288                              FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
3289  
3290         /* Skip IV */
3291 -       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
3292 +       append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
3293  
3294         /* Will read cryptlen bytes */
3295         append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
3296 @@ -824,10 +1360,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap)
3297   * @desc: pointer to buffer used for descriptor construction
3298   * @cdata: pointer to block cipher transform definitions
3299   *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3300 + * @ivsize: initialization vector size
3301   * @icvsize: integrity check value (ICV) size (truncated or full)
3302 + * @is_qi: true when called from caam/qi
3303   */
3304  void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
3305 -                              unsigned int icvsize)
3306 +                              unsigned int ivsize, unsigned int icvsize,
3307 +                              const bool is_qi)
3308  {
3309         u32 *key_jump_cmd;
3310  
3311 @@ -849,7 +1388,29 @@ void cnstr_shdsc_rfc4106_decap(u32 * con
3312         append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3313                          OP_ALG_DECRYPT | OP_ALG_ICV_ON);
3314  
3315 -       append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
3316 +       if (is_qi) {
3317 +               u32 *wait_load_cmd;
3318 +
3319 +               /* REG3 = assoclen */
3320 +               append_seq_load(desc, 4, LDST_CLASS_DECO |
3321 +                               LDST_SRCDST_WORD_DECO_MATH3 |
3322 +                               (4 << LDST_OFFSET_SHIFT));
3323 +
3324 +               wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3325 +                                           JUMP_COND_CALM | JUMP_COND_NCP |
3326 +                                           JUMP_COND_NOP | JUMP_COND_NIP |
3327 +                                           JUMP_COND_NIFP);
3328 +               set_jump_tgt_here(desc, wait_load_cmd);
3329 +
3330 +               /* Read salt and IV */
3331 +               append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
3332 +                                       cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
3333 +                                       FIFOLD_TYPE_IV);
3334 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3335 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3336 +       }
3337 +
3338 +       append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
3339         append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
3340  
3341         /* Read assoc data */
3342 @@ -857,7 +1418,7 @@ void cnstr_shdsc_rfc4106_decap(u32 * con
3343                              FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
3344  
3345         /* Skip IV */
3346 -       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
3347 +       append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
3348  
3349         /* Will read cryptlen bytes */
3350         append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
3351 @@ -896,10 +1457,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap)
3352   * @desc: pointer to buffer used for descriptor construction
3353   * @cdata: pointer to block cipher transform definitions
3354   *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3355 + * @ivsize: initialization vector size
3356   * @icvsize: integrity check value (ICV) size (truncated or full)
3357 + * @is_qi: true when called from caam/qi
3358   */
3359  void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
3360 -                              unsigned int icvsize)
3361 +                              unsigned int ivsize, unsigned int icvsize,
3362 +                              const bool is_qi)
3363  {
3364         u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
3365  
3366 @@ -920,6 +1484,18 @@ void cnstr_shdsc_rfc4543_encap(u32 * con
3367         append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3368                          OP_ALG_ENCRYPT);
3369  
3370 +       if (is_qi) {
3371 +               /* assoclen is not needed, skip it */
3372 +               append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
3373 +
3374 +               /* Read salt and IV */
3375 +               append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
3376 +                                       cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
3377 +                                       FIFOLD_TYPE_IV);
3378 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3379 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3380 +       }
3381 +
3382         /* assoclen + cryptlen = seqinlen */
3383         append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
3384  
3385 @@ -931,7 +1507,7 @@ void cnstr_shdsc_rfc4543_encap(u32 * con
3386         read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
3387                                     (0x6 << MOVE_LEN_SHIFT));
3388         write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
3389 -                                    (0x8 << MOVE_LEN_SHIFT));
3390 +                                    (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP);
3391  
3392         /* Will read assoclen + cryptlen bytes */
3393         append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
3394 @@ -966,10 +1542,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap)
3395   * @desc: pointer to buffer used for descriptor construction
3396   * @cdata: pointer to block cipher transform definitions
3397   *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3398 + * @ivsize: initialization vector size
3399   * @icvsize: integrity check value (ICV) size (truncated or full)
3400 + * @is_qi: true when called from caam/qi
3401   */
3402  void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
3403 -                              unsigned int icvsize)
3404 +                              unsigned int ivsize, unsigned int icvsize,
3405 +                              const bool is_qi)
3406  {
3407         u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
3408  
3409 @@ -990,6 +1569,18 @@ void cnstr_shdsc_rfc4543_decap(u32 * con
3410         append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3411                          OP_ALG_DECRYPT | OP_ALG_ICV_ON);
3412  
3413 +       if (is_qi) {
3414 +               /* assoclen is not needed, skip it */
3415 +               append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
3416 +
3417 +               /* Read salt and IV */
3418 +               append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
3419 +                                       cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
3420 +                                       FIFOLD_TYPE_IV);
3421 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3422 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3423 +       }
3424 +
3425         /* assoclen + cryptlen = seqoutlen */
3426         append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
3427  
3428 @@ -1001,7 +1592,7 @@ void cnstr_shdsc_rfc4543_decap(u32 * con
3429         read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
3430                                     (0x6 << MOVE_LEN_SHIFT));
3431         write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
3432 -                                    (0x8 << MOVE_LEN_SHIFT));
3433 +                                    (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP);
3434  
3435         /* Will read assoclen + cryptlen bytes */
3436         append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
3437 @@ -1035,6 +1626,138 @@ void cnstr_shdsc_rfc4543_decap(u32 * con
3438  }
3439  EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
3440  
3441 +/**
3442 + * cnstr_shdsc_chachapoly - Chacha20 + Poly1305 generic AEAD (rfc7539) and
3443 + *                          IPsec ESP (rfc7634, a.k.a. rfc7539esp) shared
3444 + *                          descriptor (non-protocol).
3445 + * @desc: pointer to buffer used for descriptor construction
3446 + * @cdata: pointer to block cipher transform definitions
3447 + *         Valid algorithm values - OP_ALG_ALGSEL_CHACHA20 ANDed with
3448 + *         OP_ALG_AAI_AEAD.
3449 + * @adata: pointer to authentication transform definitions
3450 + *         Valid algorithm values - OP_ALG_ALGSEL_POLY1305 ANDed with
3451 + *         OP_ALG_AAI_AEAD.
3452 + * @ivsize: initialization vector size
3453 + * @icvsize: integrity check value (ICV) size (truncated or full)
3454 + * @encap: true if encapsulation, false if decapsulation
3455 + * @is_qi: true when called from caam/qi
3456 + */
3457 +void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata,
3458 +                           struct alginfo *adata, unsigned int ivsize,
3459 +                           unsigned int icvsize, const bool encap,
3460 +                           const bool is_qi)
3461 +{
3462 +       u32 *key_jump_cmd, *wait_cmd;
3463 +       u32 nfifo;
3464 +       const bool is_ipsec = (ivsize != CHACHAPOLY_IV_SIZE);
3465 +
3466 +       /* Note: Context registers are saved. */
3467 +       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
3468 +
3469 +       /* skip key loading if they are loaded due to sharing */
3470 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3471 +                                  JUMP_COND_SHRD);
3472 +
3473 +       append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen,
3474 +                         CLASS_1 | KEY_DEST_CLASS_REG);
3475 +
3476 +       /* For IPsec load the salt from keymat in the context register */
3477 +       if (is_ipsec)
3478 +               append_load_as_imm(desc, cdata->key_virt + cdata->keylen, 4,
3479 +                                  LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT |
3480 +                                  4 << LDST_OFFSET_SHIFT);
3481 +
3482 +       set_jump_tgt_here(desc, key_jump_cmd);
3483 +
3484 +       /* Class 2 and 1 operations: Poly & ChaCha */
3485 +       if (encap) {
3486 +               append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
3487 +                                OP_ALG_ENCRYPT);
3488 +               append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3489 +                                OP_ALG_ENCRYPT);
3490 +       } else {
3491 +               append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
3492 +                                OP_ALG_DECRYPT | OP_ALG_ICV_ON);
3493 +               append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3494 +                                OP_ALG_DECRYPT);
3495 +       }
3496 +
3497 +       if (is_qi) {
3498 +               u32 *wait_load_cmd;
3499 +               u32 ctx1_iv_off = is_ipsec ? 8 : 4;
3500 +
3501 +               /* REG3 = assoclen */
3502 +               append_seq_load(desc, 4, LDST_CLASS_DECO |
3503 +                               LDST_SRCDST_WORD_DECO_MATH3 |
3504 +                               4 << LDST_OFFSET_SHIFT);
3505 +
3506 +               wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3507 +                                           JUMP_COND_CALM | JUMP_COND_NCP |
3508 +                                           JUMP_COND_NOP | JUMP_COND_NIP |
3509 +                                           JUMP_COND_NIFP);
3510 +               set_jump_tgt_here(desc, wait_load_cmd);
3511 +
3512 +               append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
3513 +                               LDST_SRCDST_BYTE_CONTEXT |
3514 +                               ctx1_iv_off << LDST_OFFSET_SHIFT);
3515 +       }
3516 +
3517 +       /*
3518 +        * MAGIC with NFIFO
3519 +        * Read associated data from the input and send them to class1 and
3520 +        * class2 alignment blocks. From class1 send data to output fifo and
3521 +        * then write it to memory since we don't need to encrypt AD.
3522 +        */
3523 +       nfifo = NFIFOENTRY_DEST_BOTH | NFIFOENTRY_FC1 | NFIFOENTRY_FC2 |
3524 +               NFIFOENTRY_DTYPE_POLY | NFIFOENTRY_BND;
3525 +       append_load_imm_u32(desc, nfifo, LDST_CLASS_IND_CCB |
3526 +                           LDST_SRCDST_WORD_INFO_FIFO_SM | LDLEN_MATH3);
3527 +
3528 +       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
3529 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
3530 +       append_seq_fifo_load(desc, 0, FIFOLD_TYPE_NOINFOFIFO |
3531 +                            FIFOLD_CLASS_CLASS1 | LDST_VLF);
3532 +       append_move_len(desc, MOVE_AUX_LS | MOVE_SRC_AUX_ABLK |
3533 +                       MOVE_DEST_OUTFIFO | MOVELEN_MRSEL_MATH3);
3534 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
3535 +
3536 +       /* IPsec - copy IV at the output */
3537 +       if (is_ipsec)
3538 +               append_seq_fifo_store(desc, ivsize, FIFOST_TYPE_METADATA |
3539 +                                     0x2 << 25);
3540 +
3541 +       wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TYPE_LOCAL |
3542 +                              JUMP_COND_NOP | JUMP_TEST_ALL);
3543 +       set_jump_tgt_here(desc, wait_cmd);
3544 +
3545 +       if (encap) {
3546 +               /* Read and write cryptlen bytes */
3547 +               append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
3548 +               append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0,
3549 +                               CAAM_CMD_SZ);
3550 +               aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
3551 +
3552 +               /* Write ICV */
3553 +               append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
3554 +                                LDST_SRCDST_BYTE_CONTEXT);
3555 +       } else {
3556 +               /* Read and write cryptlen bytes */
3557 +               append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
3558 +               append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0,
3559 +                               CAAM_CMD_SZ);
3560 +               aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
3561 +
3562 +               /* Load ICV for verification */
3563 +               append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
3564 +                                    FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
3565 +       }
3566 +
3567 +       print_hex_dump_debug("chachapoly shdesc@" __stringify(__LINE__)": ",
3568 +                            DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3569 +                            1);
3570 +}
3571 +EXPORT_SYMBOL(cnstr_shdsc_chachapoly);
3572 +
3573  /*
3574   * For ablkcipher encrypt and decrypt, read from req->src and
3575   * write to req->dst
3576 @@ -1053,7 +1776,8 @@ static inline void ablkcipher_append_src
3577   * @desc: pointer to buffer used for descriptor construction
3578   * @cdata: pointer to block cipher transform definitions
3579   *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
3580 - *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
3581 + *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128
3582 + *                                - OP_ALG_ALGSEL_CHACHA20
3583   * @ivsize: initialization vector size
3584   * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
3585   * @ctx1_iv_off: IV offset in CONTEXT1 register
3586 @@ -1075,7 +1799,7 @@ void cnstr_shdsc_ablkcipher_encap(u32 *
3587  
3588         /* Load nonce into CONTEXT1 reg */
3589         if (is_rfc3686) {
3590 -               u8 *nonce = cdata->key_virt + cdata->keylen;
3591 +               const u8 *nonce = cdata->key_virt + cdata->keylen;
3592  
3593                 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
3594                                    LDST_CLASS_IND_CCB |
3595 @@ -1118,7 +1842,8 @@ EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_enc
3596   * @desc: pointer to buffer used for descriptor construction
3597   * @cdata: pointer to block cipher transform definitions
3598   *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
3599 - *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
3600 + *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128
3601 + *                                - OP_ALG_ALGSEL_CHACHA20
3602   * @ivsize: initialization vector size
3603   * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
3604   * @ctx1_iv_off: IV offset in CONTEXT1 register
3605 @@ -1140,7 +1865,7 @@ void cnstr_shdsc_ablkcipher_decap(u32 *
3606  
3607         /* Load nonce into CONTEXT1 reg */
3608         if (is_rfc3686) {
3609 -               u8 *nonce = cdata->key_virt + cdata->keylen;
3610 +               const u8 *nonce = cdata->key_virt + cdata->keylen;
3611  
3612                 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
3613                                    LDST_CLASS_IND_CCB |
3614 @@ -1209,7 +1934,7 @@ void cnstr_shdsc_ablkcipher_givencap(u32
3615  
3616         /* Load Nonce into CONTEXT1 reg */
3617         if (is_rfc3686) {
3618 -               u8 *nonce = cdata->key_virt + cdata->keylen;
3619 +               const u8 *nonce = cdata->key_virt + cdata->keylen;
3620  
3621                 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
3622                                    LDST_CLASS_IND_CCB |
3623 --- a/drivers/crypto/caam/caamalg_desc.h
3624 +++ b/drivers/crypto/caam/caamalg_desc.h
3625 @@ -17,6 +17,9 @@
3626  #define DESC_QI_AEAD_DEC_LEN           (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
3627  #define DESC_QI_AEAD_GIVENC_LEN                (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
3628  
3629 +#define DESC_TLS_BASE                  (4 * CAAM_CMD_SZ)
3630 +#define DESC_TLS10_ENC_LEN             (DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
3631 +
3632  /* Note: Nonce is counted in cdata.keylen */
3633  #define DESC_AEAD_CTR_RFC3686_LEN      (4 * CAAM_CMD_SZ)
3634  
3635 @@ -27,14 +30,20 @@
3636  #define DESC_GCM_BASE                  (3 * CAAM_CMD_SZ)
3637  #define DESC_GCM_ENC_LEN               (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
3638  #define DESC_GCM_DEC_LEN               (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
3639 +#define DESC_QI_GCM_ENC_LEN            (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
3640 +#define DESC_QI_GCM_DEC_LEN            (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
3641  
3642  #define DESC_RFC4106_BASE              (3 * CAAM_CMD_SZ)
3643  #define DESC_RFC4106_ENC_LEN           (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
3644  #define DESC_RFC4106_DEC_LEN           (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
3645 +#define DESC_QI_RFC4106_ENC_LEN                (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
3646 +#define DESC_QI_RFC4106_DEC_LEN                (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
3647  
3648  #define DESC_RFC4543_BASE              (3 * CAAM_CMD_SZ)
3649  #define DESC_RFC4543_ENC_LEN           (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
3650  #define DESC_RFC4543_DEC_LEN           (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
3651 +#define DESC_QI_RFC4543_ENC_LEN                (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
3652 +#define DESC_QI_RFC4543_DEC_LEN                (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
3653  
3654  #define DESC_ABLKCIPHER_BASE           (3 * CAAM_CMD_SZ)
3655  #define DESC_ABLKCIPHER_ENC_LEN                (DESC_ABLKCIPHER_BASE + \
3656 @@ -43,46 +52,67 @@
3657                                          15 * CAAM_CMD_SZ)
3658  
3659  void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
3660 -                                unsigned int icvsize);
3661 +                                unsigned int icvsize, int era);
3662  
3663  void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
3664 -                                unsigned int icvsize);
3665 +                                unsigned int icvsize, int era);
3666  
3667  void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
3668                             struct alginfo *adata, unsigned int ivsize,
3669                             unsigned int icvsize, const bool is_rfc3686,
3670                             u32 *nonce, const u32 ctx1_iv_off,
3671 -                           const bool is_qi);
3672 +                           const bool is_qi, int era);
3673  
3674  void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
3675                             struct alginfo *adata, unsigned int ivsize,
3676                             unsigned int icvsize, const bool geniv,
3677                             const bool is_rfc3686, u32 *nonce,
3678 -                           const u32 ctx1_iv_off, const bool is_qi);
3679 +                           const u32 ctx1_iv_off, const bool is_qi, int era);
3680  
3681  void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
3682                                struct alginfo *adata, unsigned int ivsize,
3683                                unsigned int icvsize, const bool is_rfc3686,
3684                                u32 *nonce, const u32 ctx1_iv_off,
3685 -                              const bool is_qi);
3686 +                              const bool is_qi, int era);
3687 +
3688 +void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
3689 +                          struct alginfo *adata, unsigned int assoclen,
3690 +                          unsigned int ivsize, unsigned int authsize,
3691 +                          unsigned int blocksize, int era);
3692 +
3693 +void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
3694 +                          struct alginfo *adata, unsigned int assoclen,
3695 +                          unsigned int ivsize, unsigned int authsize,
3696 +                          unsigned int blocksize, int era);
3697  
3698  void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
3699 -                          unsigned int icvsize);
3700 +                          unsigned int ivsize, unsigned int icvsize,
3701 +                          const bool is_qi);
3702  
3703  void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
3704 -                          unsigned int icvsize);
3705 +                          unsigned int ivsize, unsigned int icvsize,
3706 +                          const bool is_qi);
3707  
3708  void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
3709 -                              unsigned int icvsize);
3710 +                              unsigned int ivsize, unsigned int icvsize,
3711 +                              const bool is_qi);
3712  
3713  void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
3714 -                              unsigned int icvsize);
3715 +                              unsigned int ivsize, unsigned int icvsize,
3716 +                              const bool is_qi);
3717  
3718  void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
3719 -                              unsigned int icvsize);
3720 +                              unsigned int ivsize, unsigned int icvsize,
3721 +                              const bool is_qi);
3722  
3723  void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
3724 -                              unsigned int icvsize);
3725 +                              unsigned int ivsize, unsigned int icvsize,
3726 +                              const bool is_qi);
3727 +
3728 +void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata,
3729 +                           struct alginfo *adata, unsigned int ivsize,
3730 +                           unsigned int icvsize, const bool encap,
3731 +                           const bool is_qi);
3732  
3733  void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
3734                                   unsigned int ivsize, const bool is_rfc3686,
3735 --- a/drivers/crypto/caam/caamalg_qi.c
3736 +++ b/drivers/crypto/caam/caamalg_qi.c
3737 @@ -7,7 +7,7 @@
3738   */
3739  
3740  #include "compat.h"
3741 -
3742 +#include "ctrl.h"
3743  #include "regs.h"
3744  #include "intern.h"
3745  #include "desc_constr.h"
3746 @@ -53,6 +53,7 @@ struct caam_ctx {
3747         u32 sh_desc_givenc[DESC_MAX_USED_LEN];
3748         u8 key[CAAM_MAX_KEY_SIZE];
3749         dma_addr_t key_dma;
3750 +       enum dma_data_direction dir;
3751         struct alginfo adata;
3752         struct alginfo cdata;
3753         unsigned int authsize;
3754 @@ -74,6 +75,7 @@ static int aead_set_sh_desc(struct crypt
3755         const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
3756                                OP_ALG_AAI_CTR_MOD128);
3757         const bool is_rfc3686 = alg->caam.rfc3686;
3758 +       struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
3759  
3760         if (!ctx->cdata.keylen || !ctx->authsize)
3761                 return 0;
3762 @@ -124,7 +126,7 @@ static int aead_set_sh_desc(struct crypt
3763  
3764         cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
3765                                ivsize, ctx->authsize, is_rfc3686, nonce,
3766 -                              ctx1_iv_off, true);
3767 +                              ctx1_iv_off, true, ctrlpriv->era);
3768  
3769  skip_enc:
3770         /* aead_decrypt shared descriptor */
3771 @@ -149,7 +151,8 @@ skip_enc:
3772  
3773         cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
3774                                ivsize, ctx->authsize, alg->caam.geniv,
3775 -                              is_rfc3686, nonce, ctx1_iv_off, true);
3776 +                              is_rfc3686, nonce, ctx1_iv_off, true,
3777 +                              ctrlpriv->era);
3778  
3779         if (!alg->caam.geniv)
3780                 goto skip_givenc;
3781 @@ -176,7 +179,7 @@ skip_enc:
3782  
3783         cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
3784                                   ivsize, ctx->authsize, is_rfc3686, nonce,
3785 -                                 ctx1_iv_off, true);
3786 +                                 ctx1_iv_off, true, ctrlpriv->era);
3787  
3788  skip_givenc:
3789         return 0;
3790 @@ -197,6 +200,7 @@ static int aead_setkey(struct crypto_aea
3791  {
3792         struct caam_ctx *ctx = crypto_aead_ctx(aead);
3793         struct device *jrdev = ctx->jrdev;
3794 +       struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
3795         struct crypto_authenc_keys keys;
3796         int ret = 0;
3797  
3798 @@ -211,6 +215,27 @@ static int aead_setkey(struct crypto_aea
3799                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
3800  #endif
3801  
3802 +       /*
3803 +        * If DKP is supported, use it in the shared descriptor to generate
3804 +        * the split key.
3805 +        */
3806 +       if (ctrlpriv->era >= 6) {
3807 +               ctx->adata.keylen = keys.authkeylen;
3808 +               ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3809 +                                                     OP_ALG_ALGSEL_MASK);
3810 +
3811 +               if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
3812 +                       goto badkey;
3813 +
3814 +               memcpy(ctx->key, keys.authkey, keys.authkeylen);
3815 +               memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
3816 +                      keys.enckeylen);
3817 +               dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
3818 +                                          ctx->adata.keylen_pad +
3819 +                                          keys.enckeylen, ctx->dir);
3820 +               goto skip_split_key;
3821 +       }
3822 +
3823         ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
3824                             keys.authkeylen, CAAM_MAX_KEY_SIZE -
3825                             keys.enckeylen);
3826 @@ -220,13 +245,14 @@ static int aead_setkey(struct crypto_aea
3827         /* postpend encryption key to auth split key */
3828         memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
3829         dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
3830 -                                  keys.enckeylen, DMA_TO_DEVICE);
3831 +                                  keys.enckeylen, ctx->dir);
3832  #ifdef DEBUG
3833         print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
3834                        DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
3835                        ctx->adata.keylen_pad + keys.enckeylen, 1);
3836  #endif
3837  
3838 +skip_split_key:
3839         ctx->cdata.keylen = keys.enckeylen;
3840  
3841         ret = aead_set_sh_desc(aead);
3842 @@ -258,6 +284,468 @@ badkey:
3843         return -EINVAL;
3844  }
3845  
3846 +static int tls_set_sh_desc(struct crypto_aead *tls)
3847 +{
3848 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
3849 +       unsigned int ivsize = crypto_aead_ivsize(tls);
3850 +       unsigned int blocksize = crypto_aead_blocksize(tls);
3851 +       unsigned int assoclen = 13; /* always 13 bytes for TLS */
3852 +       unsigned int data_len[2];
3853 +       u32 inl_mask;
3854 +       struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
3855 +
3856 +       if (!ctx->cdata.keylen || !ctx->authsize)
3857 +               return 0;
3858 +
3859 +       /*
3860 +        * TLS 1.0 encrypt shared descriptor
3861 +        * Job Descriptor and Shared Descriptor
3862 +        * must fit into the 64-word Descriptor h/w Buffer
3863 +        */
3864 +       data_len[0] = ctx->adata.keylen_pad;
3865 +       data_len[1] = ctx->cdata.keylen;
3866 +
3867 +       if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
3868 +                             &inl_mask, ARRAY_SIZE(data_len)) < 0)
3869 +               return -EINVAL;
3870 +
3871 +       if (inl_mask & 1)
3872 +               ctx->adata.key_virt = ctx->key;
3873 +       else
3874 +               ctx->adata.key_dma = ctx->key_dma;
3875 +
3876 +       if (inl_mask & 2)
3877 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
3878 +       else
3879 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
3880 +
3881 +       ctx->adata.key_inline = !!(inl_mask & 1);
3882 +       ctx->cdata.key_inline = !!(inl_mask & 2);
3883 +
3884 +       cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
3885 +                             assoclen, ivsize, ctx->authsize, blocksize,
3886 +                             ctrlpriv->era);
3887 +
3888 +       /*
3889 +        * TLS 1.0 decrypt shared descriptor
3890 +        * Keys do not fit inline, regardless of algorithms used
3891 +        */
3892 +       ctx->adata.key_inline = false;
3893 +       ctx->adata.key_dma = ctx->key_dma;
3894 +       ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
3895 +
3896 +       cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
3897 +                             assoclen, ivsize, ctx->authsize, blocksize,
3898 +                             ctrlpriv->era);
3899 +
3900 +       return 0;
3901 +}
3902 +
3903 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
3904 +{
3905 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
3906 +
3907 +       ctx->authsize = authsize;
3908 +       tls_set_sh_desc(tls);
3909 +
3910 +       return 0;
3911 +}
3912 +
3913 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
3914 +                     unsigned int keylen)
3915 +{
3916 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
3917 +       struct device *jrdev = ctx->jrdev;
3918 +       struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
3919 +       struct crypto_authenc_keys keys;
3920 +       int ret = 0;
3921 +
3922 +       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3923 +               goto badkey;
3924 +
3925 +#ifdef DEBUG
3926 +       dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
3927 +               keys.authkeylen + keys.enckeylen, keys.enckeylen,
3928 +               keys.authkeylen);
3929 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
3930 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
3931 +#endif
3932 +
3933 +       /*
3934 +        * If DKP is supported, use it in the shared descriptor to generate
3935 +        * the split key.
3936 +        */
3937 +       if (ctrlpriv->era >= 6) {
3938 +               ctx->adata.keylen = keys.authkeylen;
3939 +               ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3940 +                                                     OP_ALG_ALGSEL_MASK);
3941 +
3942 +               if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
3943 +                       goto badkey;
3944 +
3945 +               memcpy(ctx->key, keys.authkey, keys.authkeylen);
3946 +               memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
3947 +                      keys.enckeylen);
3948 +               dma_sync_single_for_device(jrdev, ctx->key_dma,
3949 +                                          ctx->adata.keylen_pad +
3950 +                                          keys.enckeylen, ctx->dir);
3951 +               goto skip_split_key;
3952 +       }
3953 +
3954 +       ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
3955 +                           keys.authkeylen, CAAM_MAX_KEY_SIZE -
3956 +                           keys.enckeylen);
3957 +       if (ret)
3958 +               goto badkey;
3959 +
3960 +       /* postpend encryption key to auth split key */
3961 +       memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
3962 +       dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
3963 +                                  keys.enckeylen, ctx->dir);
3964 +
3965 +#ifdef DEBUG
3966 +       dev_err(jrdev, "split keylen %d split keylen padded %d\n",
3967 +               ctx->adata.keylen, ctx->adata.keylen_pad);
3968 +       print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
3969 +                      DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
3970 +                      ctx->adata.keylen_pad + keys.enckeylen, 1);
3971 +#endif
3972 +
3973 +skip_split_key:
3974 +       ctx->cdata.keylen = keys.enckeylen;
3975 +
3976 +       ret = tls_set_sh_desc(tls);
3977 +       if (ret)
3978 +               goto badkey;
3979 +
3980 +       /* Now update the driver contexts with the new shared descriptor */
3981 +       if (ctx->drv_ctx[ENCRYPT]) {
3982 +               ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
3983 +                                         ctx->sh_desc_enc);
3984 +               if (ret) {
3985 +                       dev_err(jrdev, "driver enc context update failed\n");
3986 +                       goto badkey;
3987 +               }
3988 +       }
3989 +
3990 +       if (ctx->drv_ctx[DECRYPT]) {
3991 +               ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
3992 +                                         ctx->sh_desc_dec);
3993 +               if (ret) {
3994 +                       dev_err(jrdev, "driver dec context update failed\n");
3995 +                       goto badkey;
3996 +               }
3997 +       }
3998 +
3999 +       return ret;
4000 +badkey:
4001 +       crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
4002 +       return -EINVAL;
4003 +}
4004 +
4005 +static int gcm_set_sh_desc(struct crypto_aead *aead)
4006 +{
4007 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
4008 +       unsigned int ivsize = crypto_aead_ivsize(aead);
4009 +       int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
4010 +                       ctx->cdata.keylen;
4011 +
4012 +       if (!ctx->cdata.keylen || !ctx->authsize)
4013 +               return 0;
4014 +
4015 +       /*
4016 +        * Job Descriptor and Shared Descriptor
4017 +        * must fit into the 64-word Descriptor h/w Buffer
4018 +        */
4019 +       if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
4020 +               ctx->cdata.key_inline = true;
4021 +               ctx->cdata.key_virt = ctx->key;
4022 +       } else {
4023 +               ctx->cdata.key_inline = false;
4024 +               ctx->cdata.key_dma = ctx->key_dma;
4025 +       }
4026 +
4027 +       cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
4028 +                             ctx->authsize, true);
4029 +
4030 +       /*
4031 +        * Job Descriptor and Shared Descriptor
4032 +        * must fit into the 64-word Descriptor h/w Buffer
4033 +        */
4034 +       if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
4035 +               ctx->cdata.key_inline = true;
4036 +               ctx->cdata.key_virt = ctx->key;
4037 +       } else {
4038 +               ctx->cdata.key_inline = false;
4039 +               ctx->cdata.key_dma = ctx->key_dma;
4040 +       }
4041 +
4042 +       cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
4043 +                             ctx->authsize, true);
4044 +
4045 +       return 0;
4046 +}
4047 +
4048 +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
4049 +{
4050 +       struct caam_ctx *ctx = crypto_aead_ctx(authenc);
4051 +
4052 +       ctx->authsize = authsize;
4053 +       gcm_set_sh_desc(authenc);
4054 +
4055 +       return 0;
4056 +}
4057 +
4058 +static int gcm_setkey(struct crypto_aead *aead,
4059 +                     const u8 *key, unsigned int keylen)
4060 +{
4061 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
4062 +       struct device *jrdev = ctx->jrdev;
4063 +       int ret;
4064 +
4065 +#ifdef DEBUG
4066 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
4067 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
4068 +#endif
4069 +
4070 +       memcpy(ctx->key, key, keylen);
4071 +       dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
4072 +       ctx->cdata.keylen = keylen;
4073 +
4074 +       ret = gcm_set_sh_desc(aead);
4075 +       if (ret)
4076 +               return ret;
4077 +
4078 +       /* Now update the driver contexts with the new shared descriptor */
4079 +       if (ctx->drv_ctx[ENCRYPT]) {
4080 +               ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
4081 +                                         ctx->sh_desc_enc);
4082 +               if (ret) {
4083 +                       dev_err(jrdev, "driver enc context update failed\n");
4084 +                       return ret;
4085 +               }
4086 +       }
4087 +
4088 +       if (ctx->drv_ctx[DECRYPT]) {
4089 +               ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
4090 +                                         ctx->sh_desc_dec);
4091 +               if (ret) {
4092 +                       dev_err(jrdev, "driver dec context update failed\n");
4093 +                       return ret;
4094 +               }
4095 +       }
4096 +
4097 +       return 0;
4098 +}
4099 +
4100 +static int rfc4106_set_sh_desc(struct crypto_aead *aead)
4101 +{
4102 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
4103 +       unsigned int ivsize = crypto_aead_ivsize(aead);
4104 +       int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
4105 +                       ctx->cdata.keylen;
4106 +
4107 +       if (!ctx->cdata.keylen || !ctx->authsize)
4108 +               return 0;
4109 +
4110 +       ctx->cdata.key_virt = ctx->key;
4111 +
4112 +       /*
4113 +        * Job Descriptor and Shared Descriptor
4114 +        * must fit into the 64-word Descriptor h/w Buffer
4115 +        */
4116 +       if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
4117 +               ctx->cdata.key_inline = true;
4118 +       } else {
4119 +               ctx->cdata.key_inline = false;
4120 +               ctx->cdata.key_dma = ctx->key_dma;
4121 +       }
4122 +
4123 +       cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
4124 +                                 ctx->authsize, true);
4125 +
4126 +       /*
4127 +        * Job Descriptor and Shared Descriptor
4128 +        * must fit into the 64-word Descriptor h/w Buffer
4129 +        */
4130 +       if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
4131 +               ctx->cdata.key_inline = true;
4132 +       } else {
4133 +               ctx->cdata.key_inline = false;
4134 +               ctx->cdata.key_dma = ctx->key_dma;
4135 +       }
4136 +
4137 +       cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
4138 +                                 ctx->authsize, true);
4139 +
4140 +       return 0;
4141 +}
4142 +
4143 +static int rfc4106_setauthsize(struct crypto_aead *authenc,
4144 +                              unsigned int authsize)
4145 +{
4146 +       struct caam_ctx *ctx = crypto_aead_ctx(authenc);
4147 +
4148 +       ctx->authsize = authsize;
4149 +       rfc4106_set_sh_desc(authenc);
4150 +
4151 +       return 0;
4152 +}
4153 +
4154 +static int rfc4106_setkey(struct crypto_aead *aead,
4155 +                         const u8 *key, unsigned int keylen)
4156 +{
4157 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
4158 +       struct device *jrdev = ctx->jrdev;
4159 +       int ret;
4160 +
4161 +       if (keylen < 4)
4162 +               return -EINVAL;
4163 +
4164 +#ifdef DEBUG
4165 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
4166 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
4167 +#endif
4168 +
4169 +       memcpy(ctx->key, key, keylen);
4170 +       /*
4171 +        * The last four bytes of the key material are used as the salt value
4172 +        * in the nonce. Update the AES key length.
4173 +        */
4174 +       ctx->cdata.keylen = keylen - 4;
4175 +       dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
4176 +                                  ctx->dir);
4177 +
4178 +       ret = rfc4106_set_sh_desc(aead);
4179 +       if (ret)
4180 +               return ret;
4181 +
4182 +       /* Now update the driver contexts with the new shared descriptor */
4183 +       if (ctx->drv_ctx[ENCRYPT]) {
4184 +               ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
4185 +                                         ctx->sh_desc_enc);
4186 +               if (ret) {
4187 +                       dev_err(jrdev, "driver enc context update failed\n");
4188 +                       return ret;
4189 +               }
4190 +       }
4191 +
4192 +       if (ctx->drv_ctx[DECRYPT]) {
4193 +               ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
4194 +                                         ctx->sh_desc_dec);
4195 +               if (ret) {
4196 +                       dev_err(jrdev, "driver dec context update failed\n");
4197 +                       return ret;
4198 +               }
4199 +       }
4200 +
4201 +       return 0;
4202 +}
4203 +
4204 +static int rfc4543_set_sh_desc(struct crypto_aead *aead)
4205 +{
4206 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
4207 +       unsigned int ivsize = crypto_aead_ivsize(aead);
4208 +       int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
4209 +                       ctx->cdata.keylen;
4210 +
4211 +       if (!ctx->cdata.keylen || !ctx->authsize)
4212 +               return 0;
4213 +
4214 +       ctx->cdata.key_virt = ctx->key;
4215 +
4216 +       /*
4217 +        * Job Descriptor and Shared Descriptor
4218 +        * must fit into the 64-word Descriptor h/w Buffer
4219 +        */
4220 +       if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
4221 +               ctx->cdata.key_inline = true;
4222 +       } else {
4223 +               ctx->cdata.key_inline = false;
4224 +               ctx->cdata.key_dma = ctx->key_dma;
4225 +       }
4226 +
4227 +       cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
4228 +                                 ctx->authsize, true);
4229 +
4230 +       /*
4231 +        * Job Descriptor and Shared Descriptor
4232 +        * must fit into the 64-word Descriptor h/w Buffer
4233 +        */
4234 +       if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
4235 +               ctx->cdata.key_inline = true;
4236 +       } else {
4237 +               ctx->cdata.key_inline = false;
4238 +               ctx->cdata.key_dma = ctx->key_dma;
4239 +       }
4240 +
4241 +       cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
4242 +                                 ctx->authsize, true);
4243 +
4244 +       return 0;
4245 +}
4246 +
4247 +static int rfc4543_setauthsize(struct crypto_aead *authenc,
4248 +                              unsigned int authsize)
4249 +{
4250 +       struct caam_ctx *ctx = crypto_aead_ctx(authenc);
4251 +
4252 +       ctx->authsize = authsize;
4253 +       rfc4543_set_sh_desc(authenc);
4254 +
4255 +       return 0;
4256 +}
4257 +
4258 +static int rfc4543_setkey(struct crypto_aead *aead,
4259 +                         const u8 *key, unsigned int keylen)
4260 +{
4261 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
4262 +       struct device *jrdev = ctx->jrdev;
4263 +       int ret;
4264 +
4265 +       if (keylen < 4)
4266 +               return -EINVAL;
4267 +
4268 +#ifdef DEBUG
4269 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
4270 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
4271 +#endif
4272 +
4273 +       memcpy(ctx->key, key, keylen);
4274 +       /*
4275 +        * The last four bytes of the key material are used as the salt value
4276 +        * in the nonce. Update the AES key length.
4277 +        */
4278 +       ctx->cdata.keylen = keylen - 4;
4279 +       dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
4280 +                                  ctx->dir);
4281 +
4282 +       ret = rfc4543_set_sh_desc(aead);
4283 +       if (ret)
4284 +               return ret;
4285 +
4286 +       /* Now update the driver contexts with the new shared descriptor */
4287 +       if (ctx->drv_ctx[ENCRYPT]) {
4288 +               ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
4289 +                                         ctx->sh_desc_enc);
4290 +               if (ret) {
4291 +                       dev_err(jrdev, "driver enc context update failed\n");
4292 +                       return ret;
4293 +               }
4294 +       }
4295 +
4296 +       if (ctx->drv_ctx[DECRYPT]) {
4297 +               ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
4298 +                                         ctx->sh_desc_dec);
4299 +               if (ret) {
4300 +                       dev_err(jrdev, "driver dec context update failed\n");
4301 +                       return ret;
4302 +               }
4303 +       }
4304 +
4305 +       return 0;
4306 +}
4307 +
4308  static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
4309                              const u8 *key, unsigned int keylen)
4310  {
4311 @@ -414,6 +902,29 @@ struct aead_edesc {
4312  };
4313  
4314  /*
4315 + * tls_edesc - s/w-extended tls descriptor
4316 + * @src_nents: number of segments in input scatterlist
4317 + * @dst_nents: number of segments in output scatterlist
4318 + * @iv_dma: dma address of iv for checking continuity and link table
4319 + * @qm_sg_bytes: length of dma mapped h/w link table
4320 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
4321 + * @qm_sg_dma: bus physical mapped address of h/w link table
4322 + * @drv_req: driver-specific request structure
4323 + * @sgt: the h/w link table, followed by IV
4324 + */
4325 +struct tls_edesc {
4326 +       int src_nents;
4327 +       int dst_nents;
4328 +       dma_addr_t iv_dma;
4329 +       int qm_sg_bytes;
4330 +       dma_addr_t qm_sg_dma;
4331 +       struct scatterlist tmp[2];
4332 +       struct scatterlist *dst;
4333 +       struct caam_drv_req drv_req;
4334 +       struct qm_sg_entry sgt[0];
4335 +};
4336 +
4337 +/*
4338   * ablkcipher_edesc - s/w-extended ablkcipher descriptor
4339   * @src_nents: number of segments in input scatterlist
4340   * @dst_nents: number of segments in output scatterlist
4341 @@ -508,6 +1019,19 @@ static void aead_unmap(struct device *de
4342         dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
4343  }
4344  
4345 +static void tls_unmap(struct device *dev,
4346 +                     struct tls_edesc *edesc,
4347 +                     struct aead_request *req)
4348 +{
4349 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
4350 +       int ivsize = crypto_aead_ivsize(aead);
4351 +
4352 +       caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
4353 +                  edesc->dst_nents, edesc->iv_dma, ivsize,
4354 +                  edesc->drv_req.drv_ctx->op_type, edesc->qm_sg_dma,
4355 +                  edesc->qm_sg_bytes);
4356 +}
4357 +
4358  static void ablkcipher_unmap(struct device *dev,
4359                              struct ablkcipher_edesc *edesc,
4360                              struct ablkcipher_request *req)
4361 @@ -532,8 +1056,18 @@ static void aead_done(struct caam_drv_re
4362         qidev = caam_ctx->qidev;
4363  
4364         if (unlikely(status)) {
4365 +               u32 ssrc = status & JRSTA_SSRC_MASK;
4366 +               u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
4367 +
4368                 caam_jr_strstatus(qidev, status);
4369 -               ecode = -EIO;
4370 +               /*
4371 +                * verify hw auth check passed else return -EBADMSG
4372 +                */
4373 +               if (ssrc == JRSTA_SSRC_CCB_ERROR &&
4374 +                   err_id == JRSTA_CCBERR_ERRID_ICVCHK)
4375 +                       ecode = -EBADMSG;
4376 +               else
4377 +                       ecode = -EIO;
4378         }
4379  
4380         edesc = container_of(drv_req, typeof(*edesc), drv_req);
4381 @@ -647,9 +1181,24 @@ static struct aead_edesc *aead_edesc_all
4382         /*
4383          * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
4384          * Input is not contiguous.
4385 +        * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
4386 +        * the end of the table by allocating more S/G entries. Logic:
4387 +        * if (src != dst && output S/G)
4388 +        *      pad output S/G, if needed
4389 +        * else if (src == dst && S/G)
4390 +        *      overlapping S/Gs; pad one of them
4391 +        * else if (input S/G) ...
4392 +        *      pad input S/G, if needed
4393          */
4394 -       qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
4395 -                    (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
4396 +       qm_sg_ents = 1 + !!ivsize + mapped_src_nents;
4397 +       if (mapped_dst_nents > 1)
4398 +               qm_sg_ents += ALIGN(mapped_dst_nents, 4);
4399 +       else if ((req->src == req->dst) && (mapped_src_nents > 1))
4400 +               qm_sg_ents = max(ALIGN(qm_sg_ents, 4),
4401 +                                1 + !!ivsize + ALIGN(mapped_src_nents, 4));
4402 +       else
4403 +               qm_sg_ents = ALIGN(qm_sg_ents, 4);
4404 +
4405         sg_table = &edesc->sgt[0];
4406         qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
4407         if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
4408 @@ -785,6 +1334,260 @@ static int aead_decrypt(struct aead_requ
4409         return aead_crypt(req, false);
4410  }
4411  
4412 +static int ipsec_gcm_encrypt(struct aead_request *req)
4413 +{
4414 +       if (req->assoclen < 8)
4415 +               return -EINVAL;
4416 +
4417 +       return aead_crypt(req, true);
4418 +}
4419 +
4420 +static int ipsec_gcm_decrypt(struct aead_request *req)
4421 +{
4422 +       if (req->assoclen < 8)
4423 +               return -EINVAL;
4424 +
4425 +       return aead_crypt(req, false);
4426 +}
4427 +
4428 +static void tls_done(struct caam_drv_req *drv_req, u32 status)
4429 +{
4430 +       struct device *qidev;
4431 +       struct tls_edesc *edesc;
4432 +       struct aead_request *aead_req = drv_req->app_ctx;
4433 +       struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
4434 +       struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
4435 +       int ecode = 0;
4436 +
4437 +       qidev = caam_ctx->qidev;
4438 +
4439 +       if (unlikely(status)) {
4440 +               caam_jr_strstatus(qidev, status);
4441 +               ecode = -EIO;
4442 +       }
4443 +
4444 +       edesc = container_of(drv_req, typeof(*edesc), drv_req);
4445 +       tls_unmap(qidev, edesc, aead_req);
4446 +
4447 +       aead_request_complete(aead_req, ecode);
4448 +       qi_cache_free(edesc);
4449 +}
4450 +
4451 +/*
4452 + * allocate and map the tls extended descriptor
4453 + */
4454 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
4455 +{
4456 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
4457 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
4458 +       unsigned int blocksize = crypto_aead_blocksize(aead);
4459 +       unsigned int padsize, authsize;
4460 +       struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
4461 +                                                typeof(*alg), aead);
4462 +       struct device *qidev = ctx->qidev;
4463 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4464 +                     GFP_KERNEL : GFP_ATOMIC;
4465 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
4466 +       struct tls_edesc *edesc;
4467 +       dma_addr_t qm_sg_dma, iv_dma = 0;
4468 +       int ivsize = 0;
4469 +       u8 *iv;
4470 +       int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
4471 +       int in_len, out_len;
4472 +       struct qm_sg_entry *sg_table, *fd_sgt;
4473 +       struct caam_drv_ctx *drv_ctx;
4474 +       enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
4475 +       struct scatterlist *dst;
4476 +
4477 +       if (encrypt) {
4478 +               padsize = blocksize - ((req->cryptlen + ctx->authsize) %
4479 +                                       blocksize);
4480 +               authsize = ctx->authsize + padsize;
4481 +       } else {
4482 +               authsize = ctx->authsize;
4483 +       }
4484 +
4485 +       drv_ctx = get_drv_ctx(ctx, op_type);
4486 +       if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
4487 +               return (struct tls_edesc *)drv_ctx;
4488 +
4489 +       /* allocate space for base edesc, link tables and IV */
4490 +       edesc = qi_cache_alloc(GFP_DMA | flags);
4491 +       if (unlikely(!edesc)) {
4492 +               dev_err(qidev, "could not allocate extended descriptor\n");
4493 +               return ERR_PTR(-ENOMEM);
4494 +       }
4495 +
4496 +       if (likely(req->src == req->dst)) {
4497 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
4498 +                                            req->cryptlen +
4499 +                                            (encrypt ? authsize : 0));
4500 +               if (unlikely(src_nents < 0)) {
4501 +                       dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
4502 +                               req->assoclen + req->cryptlen +
4503 +                               (encrypt ? authsize : 0));
4504 +                       qi_cache_free(edesc);
4505 +                       return ERR_PTR(src_nents);
4506 +               }
4507 +
4508 +               mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
4509 +                                             DMA_BIDIRECTIONAL);
4510 +               if (unlikely(!mapped_src_nents)) {
4511 +                       dev_err(qidev, "unable to map source\n");
4512 +                       qi_cache_free(edesc);
4513 +                       return ERR_PTR(-ENOMEM);
4514 +               }
4515 +               dst = req->dst;
4516 +       } else {
4517 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
4518 +                                            req->cryptlen);
4519 +               if (unlikely(src_nents < 0)) {
4520 +                       dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
4521 +                               req->assoclen + req->cryptlen);
4522 +                       qi_cache_free(edesc);
4523 +                       return ERR_PTR(src_nents);
4524 +               }
4525 +
4526 +               dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
4527 +               dst_nents = sg_nents_for_len(dst, req->cryptlen +
4528 +                                            (encrypt ? authsize : 0));
4529 +               if (unlikely(dst_nents < 0)) {
4530 +                       dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
4531 +                               req->cryptlen +
4532 +                               (encrypt ? authsize : 0));
4533 +                       qi_cache_free(edesc);
4534 +                       return ERR_PTR(dst_nents);
4535 +               }
4536 +
4537 +               if (src_nents) {
4538 +                       mapped_src_nents = dma_map_sg(qidev, req->src,
4539 +                                                     src_nents, DMA_TO_DEVICE);
4540 +                       if (unlikely(!mapped_src_nents)) {
4541 +                               dev_err(qidev, "unable to map source\n");
4542 +                               qi_cache_free(edesc);
4543 +                               return ERR_PTR(-ENOMEM);
4544 +                       }
4545 +               } else {
4546 +                       mapped_src_nents = 0;
4547 +               }
4548 +
4549 +               mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
4550 +                                             DMA_FROM_DEVICE);
4551 +               if (unlikely(!mapped_dst_nents)) {
4552 +                       dev_err(qidev, "unable to map destination\n");
4553 +                       dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
4554 +                       qi_cache_free(edesc);
4555 +                       return ERR_PTR(-ENOMEM);
4556 +               }
4557 +       }
4558 +
4559 +       /*
4560 +        * Create S/G table: IV, src, dst.
4561 +        * Input is not contiguous.
4562 +        */
4563 +       qm_sg_ents = 1 + mapped_src_nents +
4564 +                    (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
4565 +       sg_table = &edesc->sgt[0];
4566 +       qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
4567 +
4568 +       ivsize = crypto_aead_ivsize(aead);
4569 +       iv = (u8 *)(sg_table + qm_sg_ents);
4570 +       /* Make sure IV is located in a DMAable area */
4571 +       memcpy(iv, req->iv, ivsize);
4572 +       iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
4573 +       if (dma_mapping_error(qidev, iv_dma)) {
4574 +               dev_err(qidev, "unable to map IV\n");
4575 +               caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0, 0,
4576 +                          0, 0);
4577 +               qi_cache_free(edesc);
4578 +               return ERR_PTR(-ENOMEM);
4579 +       }
4580 +
4581 +       edesc->src_nents = src_nents;
4582 +       edesc->dst_nents = dst_nents;
4583 +       edesc->dst = dst;
4584 +       edesc->iv_dma = iv_dma;
4585 +       edesc->drv_req.app_ctx = req;
4586 +       edesc->drv_req.cbk = tls_done;
4587 +       edesc->drv_req.drv_ctx = drv_ctx;
4588 +
4589 +       dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
4590 +       qm_sg_index = 1;
4591 +
4592 +       sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
4593 +       qm_sg_index += mapped_src_nents;
4594 +
4595 +       if (mapped_dst_nents > 1)
4596 +               sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
4597 +                                qm_sg_index, 0);
4598 +
4599 +       qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
4600 +       if (dma_mapping_error(qidev, qm_sg_dma)) {
4601 +               dev_err(qidev, "unable to map S/G table\n");
4602 +               caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
4603 +                          ivsize, op_type, 0, 0);
4604 +               qi_cache_free(edesc);
4605 +               return ERR_PTR(-ENOMEM);
4606 +       }
4607 +
4608 +       edesc->qm_sg_dma = qm_sg_dma;
4609 +       edesc->qm_sg_bytes = qm_sg_bytes;
4610 +
4611 +       out_len = req->cryptlen + (encrypt ? authsize : 0);
4612 +       in_len = ivsize + req->assoclen + req->cryptlen;
4613 +
4614 +       fd_sgt = &edesc->drv_req.fd_sgt[0];
4615 +
4616 +       dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
4617 +
4618 +       if (req->dst == req->src)
4619 +               dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
4620 +                                   (sg_nents_for_len(req->src, req->assoclen) +
4621 +                                    1) * sizeof(*sg_table), out_len, 0);
4622 +       else if (mapped_dst_nents == 1)
4623 +               dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
4624 +       else
4625 +               dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
4626 +                                    qm_sg_index, out_len, 0);
4627 +
4628 +       return edesc;
4629 +}
4630 +
4631 +static int tls_crypt(struct aead_request *req, bool encrypt)
4632 +{
4633 +       struct tls_edesc *edesc;
4634 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
4635 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
4636 +       int ret;
4637 +
4638 +       if (unlikely(caam_congested))
4639 +               return -EAGAIN;
4640 +
4641 +       edesc = tls_edesc_alloc(req, encrypt);
4642 +       if (IS_ERR_OR_NULL(edesc))
4643 +               return PTR_ERR(edesc);
4644 +
4645 +       ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
4646 +       if (!ret) {
4647 +               ret = -EINPROGRESS;
4648 +       } else {
4649 +               tls_unmap(ctx->qidev, edesc, req);
4650 +               qi_cache_free(edesc);
4651 +       }
4652 +
4653 +       return ret;
4654 +}
4655 +
4656 +static int tls_encrypt(struct aead_request *req)
4657 +{
4658 +       return tls_crypt(req, true);
4659 +}
4660 +
4661 +static int tls_decrypt(struct aead_request *req)
4662 +{
4663 +       return tls_crypt(req, false);
4664 +}
4665 +
4666  static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
4667  {
4668         struct ablkcipher_edesc *edesc;
4669 @@ -900,7 +1703,24 @@ static struct ablkcipher_edesc *ablkciph
4670         qm_sg_ents = 1 + mapped_src_nents;
4671         dst_sg_idx = qm_sg_ents;
4672  
4673 -       qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
4674 +       /*
4675 +        * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
4676 +        * the end of the table by allocating more S/G entries. Logic:
4677 +        * if (src != dst && output S/G)
4678 +        *      pad output S/G, if needed
4679 +        * else if (src == dst && S/G)
4680 +        *      overlapping S/Gs; pad one of them
4681 +        * else if (input S/G) ...
4682 +        *      pad input S/G, if needed
4683 +        */
4684 +       if (mapped_dst_nents > 1)
4685 +               qm_sg_ents += ALIGN(mapped_dst_nents, 4);
4686 +       else if ((req->src == req->dst) && (mapped_src_nents > 1))
4687 +               qm_sg_ents = max(ALIGN(qm_sg_ents, 4),
4688 +                                1 + ALIGN(mapped_src_nents, 4));
4689 +       else
4690 +               qm_sg_ents = ALIGN(qm_sg_ents, 4);
4691 +
4692         qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
4693         if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
4694                      ivsize > CAAM_QI_MEMCACHE_SIZE)) {
4695 @@ -1308,6 +2128,61 @@ static struct caam_alg_template driver_a
4696  };
4697  
4698  static struct caam_aead_alg driver_aeads[] = {
4699 +       {
4700 +               .aead = {
4701 +                       .base = {
4702 +                               .cra_name = "rfc4106(gcm(aes))",
4703 +                               .cra_driver_name = "rfc4106-gcm-aes-caam-qi",
4704 +                               .cra_blocksize = 1,
4705 +                       },
4706 +                       .setkey = rfc4106_setkey,
4707 +                       .setauthsize = rfc4106_setauthsize,
4708 +                       .encrypt = ipsec_gcm_encrypt,
4709 +                       .decrypt = ipsec_gcm_decrypt,
4710 +                       .ivsize = 8,
4711 +                       .maxauthsize = AES_BLOCK_SIZE,
4712 +               },
4713 +               .caam = {
4714 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4715 +               },
4716 +       },
4717 +       {
4718 +               .aead = {
4719 +                       .base = {
4720 +                               .cra_name = "rfc4543(gcm(aes))",
4721 +                               .cra_driver_name = "rfc4543-gcm-aes-caam-qi",
4722 +                               .cra_blocksize = 1,
4723 +                       },
4724 +                       .setkey = rfc4543_setkey,
4725 +                       .setauthsize = rfc4543_setauthsize,
4726 +                       .encrypt = ipsec_gcm_encrypt,
4727 +                       .decrypt = ipsec_gcm_decrypt,
4728 +                       .ivsize = 8,
4729 +                       .maxauthsize = AES_BLOCK_SIZE,
4730 +               },
4731 +               .caam = {
4732 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4733 +               },
4734 +       },
4735 +       /* Galois Counter Mode */
4736 +       {
4737 +               .aead = {
4738 +                       .base = {
4739 +                               .cra_name = "gcm(aes)",
4740 +                               .cra_driver_name = "gcm-aes-caam-qi",
4741 +                               .cra_blocksize = 1,
4742 +                       },
4743 +                       .setkey = gcm_setkey,
4744 +                       .setauthsize = gcm_setauthsize,
4745 +                       .encrypt = aead_encrypt,
4746 +                       .decrypt = aead_decrypt,
4747 +                       .ivsize = 12,
4748 +                       .maxauthsize = AES_BLOCK_SIZE,
4749 +               },
4750 +               .caam = {
4751 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4752 +               }
4753 +       },
4754         /* single-pass ipsec_esp descriptor */
4755         {
4756                 .aead = {
4757 @@ -2118,6 +2993,26 @@ static struct caam_aead_alg driver_aeads
4758                         .geniv = true,
4759                 }
4760         },
4761 +       {
4762 +               .aead = {
4763 +                       .base = {
4764 +                               .cra_name = "tls10(hmac(sha1),cbc(aes))",
4765 +                               .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi",
4766 +                               .cra_blocksize = AES_BLOCK_SIZE,
4767 +                       },
4768 +                       .setkey = tls_setkey,
4769 +                       .setauthsize = tls_setauthsize,
4770 +                       .encrypt = tls_encrypt,
4771 +                       .decrypt = tls_decrypt,
4772 +                       .ivsize = AES_BLOCK_SIZE,
4773 +                       .maxauthsize = SHA1_DIGEST_SIZE,
4774 +               },
4775 +               .caam = {
4776 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
4777 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4778 +                                          OP_ALG_AAI_HMAC_PRECOMP,
4779 +               }
4780 +       }
4781  };
4782  
4783  struct caam_crypto_alg {
4784 @@ -2126,9 +3021,21 @@ struct caam_crypto_alg {
4785         struct caam_alg_entry caam;
4786  };
4787  
4788 -static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
4789 +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
4790 +                          bool uses_dkp)
4791  {
4792         struct caam_drv_private *priv;
4793 +       struct device *dev;
4794 +       /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
4795 +       static const u8 digest_size[] = {
4796 +               MD5_DIGEST_SIZE,
4797 +               SHA1_DIGEST_SIZE,
4798 +               SHA224_DIGEST_SIZE,
4799 +               SHA256_DIGEST_SIZE,
4800 +               SHA384_DIGEST_SIZE,
4801 +               SHA512_DIGEST_SIZE
4802 +       };
4803 +       u8 op_id;
4804  
4805         /*
4806          * distribute tfms across job rings to ensure in-order
4807 @@ -2140,10 +3047,19 @@ static int caam_init_common(struct caam_
4808                 return PTR_ERR(ctx->jrdev);
4809         }
4810  
4811 -       ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
4812 -                                     DMA_TO_DEVICE);
4813 -       if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
4814 -               dev_err(ctx->jrdev, "unable to map key\n");
4815 +       priv = dev_get_drvdata(ctx->jrdev->parent);
4816 +       if (priv->era >= 6 && uses_dkp) {
4817 +               ctx->dir = DMA_BIDIRECTIONAL;
4818 +               dev = ctx->jrdev->parent;
4819 +       } else {
4820 +               ctx->dir = DMA_TO_DEVICE;
4821 +               dev = ctx->jrdev;
4822 +       }
4823 +
4824 +       ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key),
4825 +                                     ctx->dir);
4826 +       if (dma_mapping_error(dev, ctx->key_dma)) {
4827 +               dev_err(dev, "unable to map key\n");
4828                 caam_jr_free(ctx->jrdev);
4829                 return -ENOMEM;
4830         }
4831 @@ -2152,8 +3068,23 @@ static int caam_init_common(struct caam_
4832         ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
4833         ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
4834  
4835 -       priv = dev_get_drvdata(ctx->jrdev->parent);
4836 -       ctx->qidev = priv->qidev;
4837 +       if (ctx->adata.algtype) {
4838 +               op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK)
4839 +                               >> OP_ALG_ALGSEL_SHIFT;
4840 +               if (op_id < ARRAY_SIZE(digest_size)) {
4841 +                       ctx->authsize = digest_size[op_id];
4842 +               } else {
4843 +                       dev_err(ctx->jrdev,
4844 +                               "incorrect op_id %d; must be less than %zu\n",
4845 +                               op_id, ARRAY_SIZE(digest_size));
4846 +                       caam_jr_free(ctx->jrdev);
4847 +                       return -EINVAL;
4848 +               }
4849 +       } else {
4850 +               ctx->authsize = 0;
4851 +       }
4852 +
4853 +       ctx->qidev = ctx->jrdev->parent;
4854  
4855         spin_lock_init(&ctx->lock);
4856         ctx->drv_ctx[ENCRYPT] = NULL;
4857 @@ -2170,7 +3101,7 @@ static int caam_cra_init(struct crypto_t
4858                                                         crypto_alg);
4859         struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
4860  
4861 -       return caam_init_common(ctx, &caam_alg->caam);
4862 +       return caam_init_common(ctx, &caam_alg->caam, false);
4863  }
4864  
4865  static int caam_aead_init(struct crypto_aead *tfm)
4866 @@ -2180,17 +3111,25 @@ static int caam_aead_init(struct crypto_
4867                                                       aead);
4868         struct caam_ctx *ctx = crypto_aead_ctx(tfm);
4869  
4870 -       return caam_init_common(ctx, &caam_alg->caam);
4871 +       return caam_init_common(ctx, &caam_alg->caam,
4872 +                               (alg->setkey == aead_setkey) ||
4873 +                               (alg->setkey == tls_setkey));
4874  }
4875  
4876  static void caam_exit_common(struct caam_ctx *ctx)
4877  {
4878 +       struct device *dev;
4879 +
4880         caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
4881         caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
4882         caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
4883  
4884 -       dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
4885 -                        DMA_TO_DEVICE);
4886 +       if (ctx->dir == DMA_BIDIRECTIONAL)
4887 +               dev = ctx->jrdev->parent;
4888 +       else
4889 +               dev = ctx->jrdev;
4890 +
4891 +       dma_unmap_single(dev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
4892  
4893         caam_jr_free(ctx->jrdev);
4894  }
4895 @@ -2206,7 +3145,7 @@ static void caam_aead_exit(struct crypto
4896  }
4897  
4898  static struct list_head alg_list;
4899 -static void __exit caam_qi_algapi_exit(void)
4900 +void caam_qi_algapi_exit(void)
4901  {
4902         struct caam_crypto_alg *t_alg, *n;
4903         int i;
4904 @@ -2282,53 +3221,48 @@ static void caam_aead_alg_init(struct ca
4905         alg->exit = caam_aead_exit;
4906  }
4907  
4908 -static int __init caam_qi_algapi_init(void)
4909 +int caam_qi_algapi_init(struct device *ctrldev)
4910  {
4911 -       struct device_node *dev_node;
4912 -       struct platform_device *pdev;
4913 -       struct device *ctrldev;
4914 -       struct caam_drv_private *priv;
4915 +       struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
4916         int i = 0, err = 0;
4917 -       u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
4918 +       u32 aes_vid, aes_inst, des_inst, md_vid, md_inst;
4919         unsigned int md_limit = SHA512_DIGEST_SIZE;
4920         bool registered = false;
4921  
4922 -       dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
4923 -       if (!dev_node) {
4924 -               dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
4925 -               if (!dev_node)
4926 -                       return -ENODEV;
4927 -       }
4928 -
4929 -       pdev = of_find_device_by_node(dev_node);
4930 -       of_node_put(dev_node);
4931 -       if (!pdev)
4932 -               return -ENODEV;
4933 -
4934 -       ctrldev = &pdev->dev;
4935 -       priv = dev_get_drvdata(ctrldev);
4936 -
4937 -       /*
4938 -        * If priv is NULL, it's probably because the caam driver wasn't
4939 -        * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
4940 -        */
4941 -       if (!priv || !priv->qi_present)
4942 -               return -ENODEV;
4943 -
4944         INIT_LIST_HEAD(&alg_list);
4945  
4946         /*
4947          * Register crypto algorithms the device supports.
4948          * First, detect presence and attributes of DES, AES, and MD blocks.
4949          */
4950 -       cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
4951 -       cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
4952 -       des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
4953 -       aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
4954 -       md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
4955 +       if (priv->era < 10) {
4956 +               u32 cha_vid, cha_inst;
4957 +
4958 +               cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
4959 +               aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
4960 +               md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
4961 +
4962 +               cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
4963 +               des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
4964 +                          CHA_ID_LS_DES_SHIFT;
4965 +               aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
4966 +               md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
4967 +       } else {
4968 +               u32 aesa, mdha;
4969 +
4970 +               aesa = rd_reg32(&priv->ctrl->vreg.aesa);
4971 +               mdha = rd_reg32(&priv->ctrl->vreg.mdha);
4972 +
4973 +               aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
4974 +               md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
4975 +
4976 +               des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
4977 +               aes_inst = aesa & CHA_VER_NUM_MASK;
4978 +               md_inst = mdha & CHA_VER_NUM_MASK;
4979 +       }
4980  
4981         /* If MD is present, limit digest size based on LP256 */
4982 -       if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
4983 +       if (md_inst && md_vid  == CHA_VER_VID_MD_LP256)
4984                 md_limit = SHA256_DIGEST_SIZE;
4985  
4986         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4987 @@ -2349,14 +3283,14 @@ static int __init caam_qi_algapi_init(vo
4988                 t_alg = caam_alg_alloc(alg);
4989                 if (IS_ERR(t_alg)) {
4990                         err = PTR_ERR(t_alg);
4991 -                       dev_warn(priv->qidev, "%s alg allocation failed\n",
4992 +                       dev_warn(ctrldev, "%s alg allocation failed\n",
4993                                  alg->driver_name);
4994                         continue;
4995                 }
4996  
4997                 err = crypto_register_alg(&t_alg->crypto_alg);
4998                 if (err) {
4999 -                       dev_warn(priv->qidev, "%s alg registration failed\n",
5000 +                       dev_warn(ctrldev, "%s alg registration failed\n",
5001                                  t_alg->crypto_alg.cra_driver_name);
5002                         kfree(t_alg);
5003                         continue;
5004 @@ -2388,8 +3322,7 @@ static int __init caam_qi_algapi_init(vo
5005                  * Check support for AES algorithms not available
5006                  * on LP devices.
5007                  */
5008 -               if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
5009 -                   (alg_aai == OP_ALG_AAI_GCM))
5010 +               if (aes_vid  == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
5011                         continue;
5012  
5013                 /*
5014 @@ -2414,14 +3347,7 @@ static int __init caam_qi_algapi_init(vo
5015         }
5016  
5017         if (registered)
5018 -               dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
5019 +               dev_info(ctrldev, "algorithms registered in /proc/crypto\n");
5020  
5021         return err;
5022  }
5023 -
5024 -module_init(caam_qi_algapi_init);
5025 -module_exit(caam_qi_algapi_exit);
5026 -
5027 -MODULE_LICENSE("GPL");
5028 -MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
5029 -MODULE_AUTHOR("Freescale Semiconductor");
5030 --- /dev/null
5031 +++ b/drivers/crypto/caam/caamalg_qi2.c
5032 @@ -0,0 +1,5843 @@
5033 +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
5034 +/*
5035 + * Copyright 2015-2016 Freescale Semiconductor Inc.
5036 + * Copyright 2017-2018 NXP
5037 + */
5038 +
5039 +#include <linux/fsl/mc.h>
5040 +#include "compat.h"
5041 +#include "regs.h"
5042 +#include "caamalg_qi2.h"
5043 +#include "dpseci_cmd.h"
5044 +#include "desc_constr.h"
5045 +#include "error.h"
5046 +#include "sg_sw_sec4.h"
5047 +#include "sg_sw_qm2.h"
5048 +#include "key_gen.h"
5049 +#include "caamalg_desc.h"
5050 +#include "caamhash_desc.h"
5051 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
5052 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
5053 +
5054 +#define CAAM_CRA_PRIORITY      2000
5055 +
5056 +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
5057 +#define CAAM_MAX_KEY_SIZE      (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
5058 +                                SHA512_DIGEST_SIZE * 2)
5059 +
5060 +/*
5061 + * This is a a cache of buffers, from which the users of CAAM QI driver
5062 + * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
5063 + * NOTE: A more elegant solution would be to have some headroom in the frames
5064 + *       being processed. This can be added by the dpaa2-eth driver. This would
5065 + *       pose a problem for userspace application processing which cannot
5066 + *       know of this limitation. So for now, this will work.
5067 + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
5068 + */
5069 +static struct kmem_cache *qi_cache;
5070 +
5071 +struct caam_alg_entry {
5072 +       struct device *dev;
5073 +       int class1_alg_type;
5074 +       int class2_alg_type;
5075 +       bool rfc3686;
5076 +       bool geniv;
5077 +};
5078 +
5079 +struct caam_aead_alg {
5080 +       struct aead_alg aead;
5081 +       struct caam_alg_entry caam;
5082 +       bool registered;
5083 +};
5084 +
5085 +struct caam_skcipher_alg {
5086 +       struct skcipher_alg skcipher;
5087 +       struct caam_alg_entry caam;
5088 +       bool registered;
5089 +};
5090 +
5091 +/**
5092 + * caam_ctx - per-session context
5093 + * @flc: Flow Contexts array
5094 + * @key:  virtual address of the key(s): [authentication key], encryption key
5095 + * @flc_dma: I/O virtual addresses of the Flow Contexts
5096 + * @key_dma: I/O virtual address of the key
5097 + * @dir: DMA direction for mapping key and Flow Contexts
5098 + * @dev: dpseci device
5099 + * @adata: authentication algorithm details
5100 + * @cdata: encryption algorithm details
5101 + * @authsize: authentication tag (a.k.a. ICV / MAC) size
5102 + */
5103 +struct caam_ctx {
5104 +       struct caam_flc flc[NUM_OP];
5105 +       u8 key[CAAM_MAX_KEY_SIZE];
5106 +       dma_addr_t flc_dma[NUM_OP];
5107 +       dma_addr_t key_dma;
5108 +       enum dma_data_direction dir;
5109 +       struct device *dev;
5110 +       struct alginfo adata;
5111 +       struct alginfo cdata;
5112 +       unsigned int authsize;
5113 +};
5114 +
5115 +void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
5116 +                             dma_addr_t iova_addr)
5117 +{
5118 +       phys_addr_t phys_addr;
5119 +
5120 +       phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
5121 +                                  iova_addr;
5122 +
5123 +       return phys_to_virt(phys_addr);
5124 +}
5125 +
5126 +/*
5127 + * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
5128 + *
5129 + * Allocate data on the hotpath. Instead of using kzalloc, one can use the
5130 + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
5131 + * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
5132 + * hosting 16 SG entries.
5133 + *
5134 + * @flags - flags that would be used for the equivalent kmalloc(..) call
5135 + *
5136 + * Returns a pointer to a retrieved buffer on success or NULL on failure.
5137 + */
5138 +static inline void *qi_cache_zalloc(gfp_t flags)
5139 +{
5140 +       return kmem_cache_zalloc(qi_cache, flags);
5141 +}
5142 +
5143 +/*
5144 + * qi_cache_free - Frees buffers allocated from CAAM-QI cache
5145 + *
5146 + * @obj - buffer previously allocated by qi_cache_zalloc
5147 + *
5148 + * No checking is being done, the call is a passthrough call to
5149 + * kmem_cache_free(...)
5150 + */
5151 +static inline void qi_cache_free(void *obj)
5152 +{
5153 +       kmem_cache_free(qi_cache, obj);
5154 +}
5155 +
5156 +static struct caam_request *to_caam_req(struct crypto_async_request *areq)
5157 +{
5158 +       switch (crypto_tfm_alg_type(areq->tfm)) {
5159 +       case CRYPTO_ALG_TYPE_SKCIPHER:
5160 +               return skcipher_request_ctx(skcipher_request_cast(areq));
5161 +       case CRYPTO_ALG_TYPE_AEAD:
5162 +               return aead_request_ctx(container_of(areq, struct aead_request,
5163 +                                                    base));
5164 +       case CRYPTO_ALG_TYPE_AHASH:
5165 +               return ahash_request_ctx(ahash_request_cast(areq));
5166 +       default:
5167 +               return ERR_PTR(-EINVAL);
5168 +       }
5169 +}
5170 +
5171 +static void caam_unmap(struct device *dev, struct scatterlist *src,
5172 +                      struct scatterlist *dst, int src_nents,
5173 +                      int dst_nents, dma_addr_t iv_dma, int ivsize,
5174 +                      dma_addr_t qm_sg_dma, int qm_sg_bytes)
5175 +{
5176 +       if (dst != src) {
5177 +               if (src_nents)
5178 +                       dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
5179 +               dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
5180 +       } else {
5181 +               dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
5182 +       }
5183 +
5184 +       if (iv_dma)
5185 +               dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
5186 +
5187 +       if (qm_sg_bytes)
5188 +               dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
5189 +}
5190 +
5191 +static int aead_set_sh_desc(struct crypto_aead *aead)
5192 +{
5193 +       struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
5194 +                                                typeof(*alg), aead);
5195 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
5196 +       unsigned int ivsize = crypto_aead_ivsize(aead);
5197 +       struct device *dev = ctx->dev;
5198 +       struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5199 +       struct caam_flc *flc;
5200 +       u32 *desc;
5201 +       u32 ctx1_iv_off = 0;
5202 +       u32 *nonce = NULL;
5203 +       unsigned int data_len[2];
5204 +       u32 inl_mask;
5205 +       const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
5206 +                              OP_ALG_AAI_CTR_MOD128);
5207 +       const bool is_rfc3686 = alg->caam.rfc3686;
5208 +
5209 +       if (!ctx->cdata.keylen || !ctx->authsize)
5210 +               return 0;
5211 +
5212 +       /*
5213 +        * AES-CTR needs to load IV in CONTEXT1 reg
5214 +        * at an offset of 128bits (16bytes)
5215 +        * CONTEXT1[255:128] = IV
5216 +        */
5217 +       if (ctr_mode)
5218 +               ctx1_iv_off = 16;
5219 +
5220 +       /*
5221 +        * RFC3686 specific:
5222 +        *      CONTEXT1[255:128] = {NONCE, IV, COUNTER}
5223 +        */
5224 +       if (is_rfc3686) {
5225 +               ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
5226 +               nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
5227 +                               ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
5228 +       }
5229 +
5230 +       data_len[0] = ctx->adata.keylen_pad;
5231 +       data_len[1] = ctx->cdata.keylen;
5232 +
5233 +       /* aead_encrypt shared descriptor */
5234 +       if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
5235 +                                                DESC_QI_AEAD_ENC_LEN) +
5236 +                             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
5237 +                             DESC_JOB_IO_LEN, data_len, &inl_mask,
5238 +                             ARRAY_SIZE(data_len)) < 0)
5239 +               return -EINVAL;
5240 +
5241 +       if (inl_mask & 1)
5242 +               ctx->adata.key_virt = ctx->key;
5243 +       else
5244 +               ctx->adata.key_dma = ctx->key_dma;
5245 +
5246 +       if (inl_mask & 2)
5247 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
5248 +       else
5249 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
5250 +
5251 +       ctx->adata.key_inline = !!(inl_mask & 1);
5252 +       ctx->cdata.key_inline = !!(inl_mask & 2);
5253 +
5254 +       flc = &ctx->flc[ENCRYPT];
5255 +       desc = flc->sh_desc;
5256 +
5257 +       if (alg->caam.geniv)
5258 +               cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
5259 +                                         ivsize, ctx->authsize, is_rfc3686,
5260 +                                         nonce, ctx1_iv_off, true,
5261 +                                         priv->sec_attr.era);
5262 +       else
5263 +               cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
5264 +                                      ivsize, ctx->authsize, is_rfc3686, nonce,
5265 +                                      ctx1_iv_off, true, priv->sec_attr.era);
5266 +
5267 +       flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5268 +       dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
5269 +                                  sizeof(flc->flc) + desc_bytes(desc),
5270 +                                  ctx->dir);
5271 +
5272 +       /* aead_decrypt shared descriptor */
5273 +       if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
5274 +                             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
5275 +                             DESC_JOB_IO_LEN, data_len, &inl_mask,
5276 +                             ARRAY_SIZE(data_len)) < 0)
5277 +               return -EINVAL;
5278 +
5279 +       if (inl_mask & 1)
5280 +               ctx->adata.key_virt = ctx->key;
5281 +       else
5282 +               ctx->adata.key_dma = ctx->key_dma;
5283 +
5284 +       if (inl_mask & 2)
5285 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
5286 +       else
5287 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
5288 +
5289 +       ctx->adata.key_inline = !!(inl_mask & 1);
5290 +       ctx->cdata.key_inline = !!(inl_mask & 2);
5291 +
5292 +       flc = &ctx->flc[DECRYPT];
5293 +       desc = flc->sh_desc;
5294 +       cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
5295 +                              ivsize, ctx->authsize, alg->caam.geniv,
5296 +                              is_rfc3686, nonce, ctx1_iv_off, true,
5297 +                              priv->sec_attr.era);
5298 +       flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5299 +       dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
5300 +                                  sizeof(flc->flc) + desc_bytes(desc),
5301 +                                  ctx->dir);
5302 +
5303 +       return 0;
5304 +}
5305 +
5306 +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
5307 +{
5308 +       struct caam_ctx *ctx = crypto_aead_ctx(authenc);
5309 +
5310 +       ctx->authsize = authsize;
5311 +       aead_set_sh_desc(authenc);
5312 +
5313 +       return 0;
5314 +}
5315 +
5316 +struct split_key_sh_result {
5317 +       struct completion completion;
5318 +       int err;
5319 +       struct device *dev;
5320 +};
5321 +
5322 +static void split_key_sh_done(void *cbk_ctx, u32 err)
5323 +{
5324 +       struct split_key_sh_result *res = cbk_ctx;
5325 +
5326 +#ifdef DEBUG
5327 +       dev_err(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
5328 +#endif
5329 +
5330 +       if (err)
5331 +               caam_qi2_strstatus(res->dev, err);
5332 +
5333 +       res->err = err;
5334 +       complete(&res->completion);
5335 +}
5336 +
5337 +static int aead_setkey(struct crypto_aead *aead, const u8 *key,
5338 +                      unsigned int keylen)
5339 +{
5340 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
5341 +       struct device *dev = ctx->dev;
5342 +       struct crypto_authenc_keys keys;
5343 +
5344 +       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
5345 +               goto badkey;
5346 +
5347 +#ifdef DEBUG
5348 +       dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
5349 +               keys.authkeylen + keys.enckeylen, keys.enckeylen,
5350 +               keys.authkeylen);
5351 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
5352 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
5353 +#endif
5354 +
5355 +       ctx->adata.keylen = keys.authkeylen;
5356 +       ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
5357 +                                             OP_ALG_ALGSEL_MASK);
5358 +
5359 +       if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
5360 +               goto badkey;
5361 +
5362 +       memcpy(ctx->key, keys.authkey, keys.authkeylen);
5363 +       memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
5364 +       dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
5365 +                                  keys.enckeylen, ctx->dir);
5366 +#ifdef DEBUG
5367 +       print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
5368 +                      DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
5369 +                      ctx->adata.keylen_pad + keys.enckeylen, 1);
5370 +#endif
5371 +
5372 +       ctx->cdata.keylen = keys.enckeylen;
5373 +
5374 +       return aead_set_sh_desc(aead);
5375 +badkey:
5376 +       crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
5377 +       return -EINVAL;
5378 +}
5379 +
5380 +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
5381 +                                          bool encrypt)
5382 +{
5383 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
5384 +       struct caam_request *req_ctx = aead_request_ctx(req);
5385 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
5386 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
5387 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
5388 +       struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
5389 +                                                typeof(*alg), aead);
5390 +       struct device *dev = ctx->dev;
5391 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
5392 +                     GFP_KERNEL : GFP_ATOMIC;
5393 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
5394 +       struct aead_edesc *edesc;
5395 +       dma_addr_t qm_sg_dma, iv_dma = 0;
5396 +       int ivsize = 0;
5397 +       unsigned int authsize = ctx->authsize;
5398 +       int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
5399 +       int in_len, out_len;
5400 +       struct dpaa2_sg_entry *sg_table;
5401 +
5402 +       /* allocate space for base edesc, link tables and IV */
5403 +       edesc = qi_cache_zalloc(GFP_DMA | flags);
5404 +       if (unlikely(!edesc)) {
5405 +               dev_err(dev, "could not allocate extended descriptor\n");
5406 +               return ERR_PTR(-ENOMEM);
5407 +       }
5408 +
5409 +       if (unlikely(req->dst != req->src)) {
5410 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
5411 +                                            req->cryptlen);
5412 +               if (unlikely(src_nents < 0)) {
5413 +                       dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
5414 +                               req->assoclen + req->cryptlen);
5415 +                       qi_cache_free(edesc);
5416 +                       return ERR_PTR(src_nents);
5417 +               }
5418 +
5419 +               dst_nents = sg_nents_for_len(req->dst, req->assoclen +
5420 +                                            req->cryptlen +
5421 +                                            (encrypt ? authsize :
5422 +                                                       (-authsize)));
5423 +               if (unlikely(dst_nents < 0)) {
5424 +                       dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
5425 +                               req->assoclen + req->cryptlen +
5426 +                               (encrypt ? authsize : (-authsize)));
5427 +                       qi_cache_free(edesc);
5428 +                       return ERR_PTR(dst_nents);
5429 +               }
5430 +
5431 +               if (src_nents) {
5432 +                       mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
5433 +                                                     DMA_TO_DEVICE);
5434 +                       if (unlikely(!mapped_src_nents)) {
5435 +                               dev_err(dev, "unable to map source\n");
5436 +                               qi_cache_free(edesc);
5437 +                               return ERR_PTR(-ENOMEM);
5438 +                       }
5439 +               } else {
5440 +                       mapped_src_nents = 0;
5441 +               }
5442 +
5443 +               mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
5444 +                                             DMA_FROM_DEVICE);
5445 +               if (unlikely(!mapped_dst_nents)) {
5446 +                       dev_err(dev, "unable to map destination\n");
5447 +                       dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
5448 +                       qi_cache_free(edesc);
5449 +                       return ERR_PTR(-ENOMEM);
5450 +               }
5451 +       } else {
5452 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
5453 +                                            req->cryptlen +
5454 +                                               (encrypt ? authsize : 0));
5455 +               if (unlikely(src_nents < 0)) {
5456 +                       dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
5457 +                               req->assoclen + req->cryptlen +
5458 +                               (encrypt ? authsize : 0));
5459 +                       qi_cache_free(edesc);
5460 +                       return ERR_PTR(src_nents);
5461 +               }
5462 +
5463 +               mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
5464 +                                             DMA_BIDIRECTIONAL);
5465 +               if (unlikely(!mapped_src_nents)) {
5466 +                       dev_err(dev, "unable to map source\n");
5467 +                       qi_cache_free(edesc);
5468 +                       return ERR_PTR(-ENOMEM);
5469 +               }
5470 +       }
5471 +
5472 +       if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
5473 +               ivsize = crypto_aead_ivsize(aead);
5474 +
5475 +       /*
5476 +        * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
5477 +        * Input is not contiguous.
5478 +        */
5479 +       qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
5480 +                     (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
5481 +       sg_table = &edesc->sgt[0];
5482 +       qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
5483 +       if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
5484 +                    CAAM_QI_MEMCACHE_SIZE)) {
5485 +               dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
5486 +                       qm_sg_nents, ivsize);
5487 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
5488 +                          0, 0, 0);
5489 +               qi_cache_free(edesc);
5490 +               return ERR_PTR(-ENOMEM);
5491 +       }
5492 +
5493 +       if (ivsize) {
5494 +               u8 *iv = (u8 *)(sg_table + qm_sg_nents);
5495 +
5496 +               /* Make sure IV is located in a DMAable area */
5497 +               memcpy(iv, req->iv, ivsize);
5498 +
5499 +               iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
5500 +               if (dma_mapping_error(dev, iv_dma)) {
5501 +                       dev_err(dev, "unable to map IV\n");
5502 +                       caam_unmap(dev, req->src, req->dst, src_nents,
5503 +                                  dst_nents, 0, 0, 0, 0);
5504 +                       qi_cache_free(edesc);
5505 +                       return ERR_PTR(-ENOMEM);
5506 +               }
5507 +       }
5508 +
5509 +       edesc->src_nents = src_nents;
5510 +       edesc->dst_nents = dst_nents;
5511 +       edesc->iv_dma = iv_dma;
5512 +
5513 +       if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
5514 +           OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
5515 +               /*
5516 +                * The associated data comes already with the IV but we need
5517 +                * to skip it when we authenticate or encrypt...
5518 +                */
5519 +               edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
5520 +       else
5521 +               edesc->assoclen = cpu_to_caam32(req->assoclen);
5522 +       edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
5523 +                                            DMA_TO_DEVICE);
5524 +       if (dma_mapping_error(dev, edesc->assoclen_dma)) {
5525 +               dev_err(dev, "unable to map assoclen\n");
5526 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
5527 +                          iv_dma, ivsize, 0, 0);
5528 +               qi_cache_free(edesc);
5529 +               return ERR_PTR(-ENOMEM);
5530 +       }
5531 +
5532 +       dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
5533 +       qm_sg_index++;
5534 +       if (ivsize) {
5535 +               dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
5536 +               qm_sg_index++;
5537 +       }
5538 +       sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
5539 +       qm_sg_index += mapped_src_nents;
5540 +
5541 +       if (mapped_dst_nents > 1)
5542 +               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
5543 +                                qm_sg_index, 0);
5544 +
5545 +       qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
5546 +       if (dma_mapping_error(dev, qm_sg_dma)) {
5547 +               dev_err(dev, "unable to map S/G table\n");
5548 +               dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
5549 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
5550 +                          iv_dma, ivsize, 0, 0);
5551 +               qi_cache_free(edesc);
5552 +               return ERR_PTR(-ENOMEM);
5553 +       }
5554 +
5555 +       edesc->qm_sg_dma = qm_sg_dma;
5556 +       edesc->qm_sg_bytes = qm_sg_bytes;
5557 +
5558 +       out_len = req->assoclen + req->cryptlen +
5559 +                 (encrypt ? ctx->authsize : (-ctx->authsize));
5560 +       in_len = 4 + ivsize + req->assoclen + req->cryptlen;
5561 +
5562 +       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
5563 +       dpaa2_fl_set_final(in_fle, true);
5564 +       dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
5565 +       dpaa2_fl_set_addr(in_fle, qm_sg_dma);
5566 +       dpaa2_fl_set_len(in_fle, in_len);
5567 +
5568 +       if (req->dst == req->src) {
5569 +               if (mapped_src_nents == 1) {
5570 +                       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
5571 +                       dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
5572 +               } else {
5573 +                       dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
5574 +                       dpaa2_fl_set_addr(out_fle, qm_sg_dma +
5575 +                                         (1 + !!ivsize) * sizeof(*sg_table));
5576 +               }
5577 +       } else if (mapped_dst_nents == 1) {
5578 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
5579 +               dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
5580 +       } else {
5581 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
5582 +               dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
5583 +                                 sizeof(*sg_table));
5584 +       }
5585 +
5586 +       dpaa2_fl_set_len(out_fle, out_len);
5587 +
5588 +       return edesc;
5589 +}
5590 +
5591 +static int chachapoly_set_sh_desc(struct crypto_aead *aead)
5592 +{
5593 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
5594 +       unsigned int ivsize = crypto_aead_ivsize(aead);
5595 +       struct device *dev = ctx->dev;
5596 +       struct caam_flc *flc;
5597 +       u32 *desc;
5598 +
5599 +       if (!ctx->cdata.keylen || !ctx->authsize)
5600 +               return 0;
5601 +
5602 +       flc = &ctx->flc[ENCRYPT];
5603 +       desc = flc->sh_desc;
5604 +       cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
5605 +                              ctx->authsize, true, true);
5606 +       flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5607 +       dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
5608 +                                  sizeof(flc->flc) + desc_bytes(desc),
5609 +                                  ctx->dir);
5610 +
5611 +       flc = &ctx->flc[DECRYPT];
5612 +       desc = flc->sh_desc;
5613 +       cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
5614 +                              ctx->authsize, false, true);
5615 +       flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5616 +       dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
5617 +                                  sizeof(flc->flc) + desc_bytes(desc),
5618 +                                  ctx->dir);
5619 +
5620 +       return 0;
5621 +}
5622 +
5623 +static int chachapoly_setauthsize(struct crypto_aead *aead,
5624 +                                 unsigned int authsize)
5625 +{
5626 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
5627 +
5628 +       if (authsize != POLY1305_DIGEST_SIZE)
5629 +               return -EINVAL;
5630 +
5631 +       ctx->authsize = authsize;
5632 +       return chachapoly_set_sh_desc(aead);
5633 +}
5634 +
5635 +static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
5636 +                            unsigned int keylen)
5637 +{
5638 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
5639 +       unsigned int ivsize = crypto_aead_ivsize(aead);
5640 +       unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
5641 +
5642 +       if (keylen != CHACHA20_KEY_SIZE + saltlen) {
5643 +               crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
5644 +               return -EINVAL;
5645 +       }
5646 +
5647 +       ctx->cdata.key_virt = key;
5648 +       ctx->cdata.keylen = keylen - saltlen;
5649 +
5650 +       return chachapoly_set_sh_desc(aead);
5651 +}
5652 +
5653 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req,
5654 +                                        bool encrypt)
5655 +{
5656 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
5657 +       unsigned int blocksize = crypto_aead_blocksize(tls);
5658 +       unsigned int padsize, authsize;
5659 +       struct caam_request *req_ctx = aead_request_ctx(req);
5660 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
5661 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
5662 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
5663 +       struct caam_aead_alg *alg = container_of(crypto_aead_alg(tls),
5664 +                                                typeof(*alg), aead);
5665 +       struct device *dev = ctx->dev;
5666 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
5667 +                     GFP_KERNEL : GFP_ATOMIC;
5668 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
5669 +       struct tls_edesc *edesc;
5670 +       dma_addr_t qm_sg_dma, iv_dma = 0;
5671 +       int ivsize = 0;
5672 +       u8 *iv;
5673 +       int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
5674 +       int in_len, out_len;
5675 +       struct dpaa2_sg_entry *sg_table;
5676 +       struct scatterlist *dst;
5677 +
5678 +       if (encrypt) {
5679 +               padsize = blocksize - ((req->cryptlen + ctx->authsize) %
5680 +                                       blocksize);
5681 +               authsize = ctx->authsize + padsize;
5682 +       } else {
5683 +               authsize = ctx->authsize;
5684 +       }
5685 +
5686 +       /* allocate space for base edesc, link tables and IV */
5687 +       edesc = qi_cache_zalloc(GFP_DMA | flags);
5688 +       if (unlikely(!edesc)) {
5689 +               dev_err(dev, "could not allocate extended descriptor\n");
5690 +               return ERR_PTR(-ENOMEM);
5691 +       }
5692 +
5693 +       if (likely(req->src == req->dst)) {
5694 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
5695 +                                            req->cryptlen +
5696 +                                            (encrypt ? authsize : 0));
5697 +               if (unlikely(src_nents < 0)) {
5698 +                       dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
5699 +                               req->assoclen + req->cryptlen +
5700 +                               (encrypt ? authsize : 0));
5701 +                       qi_cache_free(edesc);
5702 +                       return ERR_PTR(src_nents);
5703 +               }
5704 +
5705 +               mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
5706 +                                             DMA_BIDIRECTIONAL);
5707 +               if (unlikely(!mapped_src_nents)) {
5708 +                       dev_err(dev, "unable to map source\n");
5709 +                       qi_cache_free(edesc);
5710 +                       return ERR_PTR(-ENOMEM);
5711 +               }
5712 +               dst = req->dst;
5713 +       } else {
5714 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
5715 +                                            req->cryptlen);
5716 +               if (unlikely(src_nents < 0)) {
5717 +                       dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
5718 +                               req->assoclen + req->cryptlen);
5719 +                       qi_cache_free(edesc);
5720 +                       return ERR_PTR(src_nents);
5721 +               }
5722 +
5723 +               dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
5724 +               dst_nents = sg_nents_for_len(dst, req->cryptlen +
5725 +                                            (encrypt ? authsize : 0));
5726 +               if (unlikely(dst_nents < 0)) {
5727 +                       dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
5728 +                               req->cryptlen +
5729 +                               (encrypt ? authsize : 0));
5730 +                       qi_cache_free(edesc);
5731 +                       return ERR_PTR(dst_nents);
5732 +               }
5733 +
5734 +               if (src_nents) {
5735 +                       mapped_src_nents = dma_map_sg(dev, req->src,
5736 +                                                     src_nents, DMA_TO_DEVICE);
5737 +                       if (unlikely(!mapped_src_nents)) {
5738 +                               dev_err(dev, "unable to map source\n");
5739 +                               qi_cache_free(edesc);
5740 +                               return ERR_PTR(-ENOMEM);
5741 +                       }
5742 +               } else {
5743 +                       mapped_src_nents = 0;
5744 +               }
5745 +
5746 +               mapped_dst_nents = dma_map_sg(dev, dst, dst_nents,
5747 +                                             DMA_FROM_DEVICE);
5748 +               if (unlikely(!mapped_dst_nents)) {
5749 +                       dev_err(dev, "unable to map destination\n");
5750 +                       dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
5751 +                       qi_cache_free(edesc);
5752 +                       return ERR_PTR(-ENOMEM);
5753 +               }
5754 +       }
5755 +
5756 +       /*
5757 +        * Create S/G table: IV, src, dst.
5758 +        * Input is not contiguous.
5759 +        */
5760 +       qm_sg_ents = 1 + mapped_src_nents +
5761 +                    (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
5762 +       sg_table = &edesc->sgt[0];
5763 +       qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
5764 +
5765 +       ivsize = crypto_aead_ivsize(tls);
5766 +       iv = (u8 *)(sg_table + qm_sg_ents);
5767 +       /* Make sure IV is located in a DMAable area */
5768 +       memcpy(iv, req->iv, ivsize);
5769 +       iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
5770 +       if (dma_mapping_error(dev, iv_dma)) {
5771 +               dev_err(dev, "unable to map IV\n");
5772 +               caam_unmap(dev, req->src, dst, src_nents, dst_nents, 0, 0, 0,
5773 +                          0);
5774 +               qi_cache_free(edesc);
5775 +               return ERR_PTR(-ENOMEM);
5776 +       }
5777 +
5778 +       edesc->src_nents = src_nents;
5779 +       edesc->dst_nents = dst_nents;
5780 +       edesc->dst = dst;
5781 +       edesc->iv_dma = iv_dma;
5782 +
5783 +       dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
5784 +       qm_sg_index = 1;
5785 +
5786 +       sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
5787 +       qm_sg_index += mapped_src_nents;
5788 +
5789 +       if (mapped_dst_nents > 1)
5790 +               sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
5791 +                                qm_sg_index, 0);
5792 +
5793 +       qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
5794 +       if (dma_mapping_error(dev, qm_sg_dma)) {
5795 +               dev_err(dev, "unable to map S/G table\n");
5796 +               caam_unmap(dev, req->src, dst, src_nents, dst_nents, iv_dma,
5797 +                          ivsize, 0, 0);
5798 +               qi_cache_free(edesc);
5799 +               return ERR_PTR(-ENOMEM);
5800 +       }
5801 +
5802 +       edesc->qm_sg_dma = qm_sg_dma;
5803 +       edesc->qm_sg_bytes = qm_sg_bytes;
5804 +
5805 +       out_len = req->cryptlen + (encrypt ? authsize : 0);
5806 +       in_len = ivsize + req->assoclen + req->cryptlen;
5807 +
5808 +       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
5809 +       dpaa2_fl_set_final(in_fle, true);
5810 +       dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
5811 +       dpaa2_fl_set_addr(in_fle, qm_sg_dma);
5812 +       dpaa2_fl_set_len(in_fle, in_len);
5813 +
5814 +       if (req->dst == req->src) {
5815 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
5816 +               dpaa2_fl_set_addr(out_fle, qm_sg_dma +
5817 +                                 (sg_nents_for_len(req->src, req->assoclen) +
5818 +                                  1) * sizeof(*sg_table));
5819 +       } else if (mapped_dst_nents == 1) {
5820 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
5821 +               dpaa2_fl_set_addr(out_fle, sg_dma_address(dst));
5822 +       } else {
5823 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
5824 +               dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
5825 +                                 sizeof(*sg_table));
5826 +       }
5827 +
5828 +       dpaa2_fl_set_len(out_fle, out_len);
5829 +
5830 +       return edesc;
5831 +}
5832 +
5833 +static int tls_set_sh_desc(struct crypto_aead *tls)
5834 +{
5835 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
5836 +       unsigned int ivsize = crypto_aead_ivsize(tls);
5837 +       unsigned int blocksize = crypto_aead_blocksize(tls);
5838 +       struct device *dev = ctx->dev;
5839 +       struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5840 +       struct caam_flc *flc;
5841 +       u32 *desc;
5842 +       unsigned int assoclen = 13; /* always 13 bytes for TLS */
5843 +       unsigned int data_len[2];
5844 +       u32 inl_mask;
5845 +
5846 +       if (!ctx->cdata.keylen || !ctx->authsize)
5847 +               return 0;
5848 +
5849 +       /*
5850 +        * TLS 1.0 encrypt shared descriptor
5851 +        * Job Descriptor and Shared Descriptor
5852 +        * must fit into the 64-word Descriptor h/w Buffer
5853 +        */
5854 +       data_len[0] = ctx->adata.keylen_pad;
5855 +       data_len[1] = ctx->cdata.keylen;
5856 +
5857 +       if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
5858 +                             &inl_mask, ARRAY_SIZE(data_len)) < 0)
5859 +               return -EINVAL;
5860 +
5861 +       if (inl_mask & 1)
5862 +               ctx->adata.key_virt = ctx->key;
5863 +       else
5864 +               ctx->adata.key_dma = ctx->key_dma;
5865 +
5866 +       if (inl_mask & 2)
5867 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
5868 +       else
5869 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
5870 +
5871 +       ctx->adata.key_inline = !!(inl_mask & 1);
5872 +       ctx->cdata.key_inline = !!(inl_mask & 2);
5873 +
5874 +       flc = &ctx->flc[ENCRYPT];
5875 +       desc = flc->sh_desc;
5876 +       cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata,
5877 +                             assoclen, ivsize, ctx->authsize, blocksize,
5878 +                             priv->sec_attr.era);
5879 +       flc->flc[1] = cpu_to_caam32(desc_len(desc));
5880 +       dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
5881 +                                  sizeof(flc->flc) + desc_bytes(desc),
5882 +                                  ctx->dir);
5883 +
5884 +       /*
5885 +        * TLS 1.0 decrypt shared descriptor
5886 +        * Keys do not fit inline, regardless of algorithms used
5887 +        */
5888 +       ctx->adata.key_inline = false;
5889 +       ctx->adata.key_dma = ctx->key_dma;
5890 +       ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
5891 +
5892 +       flc = &ctx->flc[DECRYPT];
5893 +       desc = flc->sh_desc;
5894 +       cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize,
5895 +                             ctx->authsize, blocksize, priv->sec_attr.era);
5896 +       flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5897 +       dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
5898 +                                  sizeof(flc->flc) + desc_bytes(desc),
5899 +                                  ctx->dir);
5900 +
5901 +       return 0;
5902 +}
5903 +
5904 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
5905 +                     unsigned int keylen)
5906 +{
5907 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
5908 +       struct device *dev = ctx->dev;
5909 +       struct crypto_authenc_keys keys;
5910 +
5911 +       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
5912 +               goto badkey;
5913 +
5914 +#ifdef DEBUG
5915 +       dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
5916 +               keys.authkeylen + keys.enckeylen, keys.enckeylen,
5917 +               keys.authkeylen);
5918 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
5919 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
5920 +#endif
5921 +
5922 +       ctx->adata.keylen = keys.authkeylen;
5923 +       ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
5924 +                                             OP_ALG_ALGSEL_MASK);
5925 +
5926 +       if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
5927 +               goto badkey;
5928 +
5929 +       memcpy(ctx->key, keys.authkey, keys.authkeylen);
5930 +       memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
5931 +       dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
5932 +                                  keys.enckeylen, ctx->dir);
5933 +#ifdef DEBUG
5934 +       print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
5935 +                      DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
5936 +                      ctx->adata.keylen_pad + keys.enckeylen, 1);
5937 +#endif
5938 +
5939 +       ctx->cdata.keylen = keys.enckeylen;
5940 +
5941 +       return tls_set_sh_desc(tls);
5942 +badkey:
5943 +       crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
5944 +       return -EINVAL;
5945 +}
5946 +
5947 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
5948 +{
5949 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
5950 +
5951 +       ctx->authsize = authsize;
5952 +       tls_set_sh_desc(tls);
5953 +
5954 +       return 0;
5955 +}
5956 +
5957 +static int gcm_set_sh_desc(struct crypto_aead *aead)
5958 +{
5959 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
5960 +       struct device *dev = ctx->dev;
5961 +       unsigned int ivsize = crypto_aead_ivsize(aead);
5962 +       struct caam_flc *flc;
5963 +       u32 *desc;
5964 +       int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
5965 +                       ctx->cdata.keylen;
5966 +
5967 +       if (!ctx->cdata.keylen || !ctx->authsize)
5968 +               return 0;
5969 +
5970 +       /*
5971 +        * AES GCM encrypt shared descriptor
5972 +        * Job Descriptor and Shared Descriptor
5973 +        * must fit into the 64-word Descriptor h/w Buffer
5974 +        */
5975 +       if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
5976 +               ctx->cdata.key_inline = true;
5977 +               ctx->cdata.key_virt = ctx->key;
5978 +       } else {
5979 +               ctx->cdata.key_inline = false;
5980 +               ctx->cdata.key_dma = ctx->key_dma;
5981 +       }
5982 +
5983 +       flc = &ctx->flc[ENCRYPT];
5984 +       desc = flc->sh_desc;
5985 +       cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
5986 +       flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5987 +       dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
5988 +                                  sizeof(flc->flc) + desc_bytes(desc),
5989 +                                  ctx->dir);
5990 +
5991 +       /*
5992 +        * Job Descriptor and Shared Descriptors
5993 +        * must all fit into the 64-word Descriptor h/w Buffer
5994 +        */
5995 +       if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
5996 +               ctx->cdata.key_inline = true;
5997 +               ctx->cdata.key_virt = ctx->key;
5998 +       } else {
5999 +               ctx->cdata.key_inline = false;
6000 +               ctx->cdata.key_dma = ctx->key_dma;
6001 +       }
6002 +
6003 +       flc = &ctx->flc[DECRYPT];
6004 +       desc = flc->sh_desc;
6005 +       cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
6006 +       flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6007 +       dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
6008 +                                  sizeof(flc->flc) + desc_bytes(desc),
6009 +                                  ctx->dir);
6010 +
6011 +       return 0;
6012 +}
6013 +
6014 +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
6015 +{
6016 +       struct caam_ctx *ctx = crypto_aead_ctx(authenc);
6017 +
6018 +       ctx->authsize = authsize;
6019 +       gcm_set_sh_desc(authenc);
6020 +
6021 +       return 0;
6022 +}
6023 +
6024 +static int gcm_setkey(struct crypto_aead *aead,
6025 +                     const u8 *key, unsigned int keylen)
6026 +{
6027 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
6028 +       struct device *dev = ctx->dev;
6029 +
6030 +#ifdef DEBUG
6031 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
6032 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
6033 +#endif
6034 +
6035 +       memcpy(ctx->key, key, keylen);
6036 +       dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
6037 +       ctx->cdata.keylen = keylen;
6038 +
6039 +       return gcm_set_sh_desc(aead);
6040 +}
6041 +
6042 +static int rfc4106_set_sh_desc(struct crypto_aead *aead)
6043 +{
6044 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
6045 +       struct device *dev = ctx->dev;
6046 +       unsigned int ivsize = crypto_aead_ivsize(aead);
6047 +       struct caam_flc *flc;
6048 +       u32 *desc;
6049 +       int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
6050 +                       ctx->cdata.keylen;
6051 +
6052 +       if (!ctx->cdata.keylen || !ctx->authsize)
6053 +               return 0;
6054 +
6055 +       ctx->cdata.key_virt = ctx->key;
6056 +
6057 +       /*
6058 +        * RFC4106 encrypt shared descriptor
6059 +        * Job Descriptor and Shared Descriptor
6060 +        * must fit into the 64-word Descriptor h/w Buffer
6061 +        */
6062 +       if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
6063 +               ctx->cdata.key_inline = true;
6064 +       } else {
6065 +               ctx->cdata.key_inline = false;
6066 +               ctx->cdata.key_dma = ctx->key_dma;
6067 +       }
6068 +
6069 +       flc = &ctx->flc[ENCRYPT];
6070 +       desc = flc->sh_desc;
6071 +       cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
6072 +                                 true);
6073 +       flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6074 +       dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
6075 +                                  sizeof(flc->flc) + desc_bytes(desc),
6076 +                                  ctx->dir);
6077 +
6078 +       /*
6079 +        * Job Descriptor and Shared Descriptors
6080 +        * must all fit into the 64-word Descriptor h/w Buffer
6081 +        */
6082 +       if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
6083 +               ctx->cdata.key_inline = true;
6084 +       } else {
6085 +               ctx->cdata.key_inline = false;
6086 +               ctx->cdata.key_dma = ctx->key_dma;
6087 +       }
6088 +
6089 +       flc = &ctx->flc[DECRYPT];
6090 +       desc = flc->sh_desc;
6091 +       cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
6092 +                                 true);
6093 +       flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6094 +       dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
6095 +                                  sizeof(flc->flc) + desc_bytes(desc),
6096 +                                  ctx->dir);
6097 +
6098 +       return 0;
6099 +}
6100 +
6101 +static int rfc4106_setauthsize(struct crypto_aead *authenc,
6102 +                              unsigned int authsize)
6103 +{
6104 +       struct caam_ctx *ctx = crypto_aead_ctx(authenc);
6105 +
6106 +       ctx->authsize = authsize;
6107 +       rfc4106_set_sh_desc(authenc);
6108 +
6109 +       return 0;
6110 +}
6111 +
6112 +static int rfc4106_setkey(struct crypto_aead *aead,
6113 +                         const u8 *key, unsigned int keylen)
6114 +{
6115 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
6116 +       struct device *dev = ctx->dev;
6117 +
6118 +       if (keylen < 4)
6119 +               return -EINVAL;
6120 +
6121 +#ifdef DEBUG
6122 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
6123 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
6124 +#endif
6125 +
6126 +       memcpy(ctx->key, key, keylen);
6127 +       /*
6128 +        * The last four bytes of the key material are used as the salt value
6129 +        * in the nonce. Update the AES key length.
6130 +        */
6131 +       ctx->cdata.keylen = keylen - 4;
6132 +       dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
6133 +                                  ctx->dir);
6134 +
6135 +       return rfc4106_set_sh_desc(aead);
6136 +}
6137 +
6138 +static int rfc4543_set_sh_desc(struct crypto_aead *aead)
6139 +{
6140 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
6141 +       struct device *dev = ctx->dev;
6142 +       unsigned int ivsize = crypto_aead_ivsize(aead);
6143 +       struct caam_flc *flc;
6144 +       u32 *desc;
6145 +       int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
6146 +                       ctx->cdata.keylen;
6147 +
6148 +       if (!ctx->cdata.keylen || !ctx->authsize)
6149 +               return 0;
6150 +
6151 +       ctx->cdata.key_virt = ctx->key;
6152 +
6153 +       /*
6154 +        * RFC4543 encrypt shared descriptor
6155 +        * Job Descriptor and Shared Descriptor
6156 +        * must fit into the 64-word Descriptor h/w Buffer
6157 +        */
6158 +       if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
6159 +               ctx->cdata.key_inline = true;
6160 +       } else {
6161 +               ctx->cdata.key_inline = false;
6162 +               ctx->cdata.key_dma = ctx->key_dma;
6163 +       }
6164 +
6165 +       flc = &ctx->flc[ENCRYPT];
6166 +       desc = flc->sh_desc;
6167 +       cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
6168 +                                 true);
6169 +       flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6170 +       dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
6171 +                                  sizeof(flc->flc) + desc_bytes(desc),
6172 +                                  ctx->dir);
6173 +
6174 +       /*
6175 +        * Job Descriptor and Shared Descriptors
6176 +        * must all fit into the 64-word Descriptor h/w Buffer
6177 +        */
6178 +       if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
6179 +               ctx->cdata.key_inline = true;
6180 +       } else {
6181 +               ctx->cdata.key_inline = false;
6182 +               ctx->cdata.key_dma = ctx->key_dma;
6183 +       }
6184 +
6185 +       flc = &ctx->flc[DECRYPT];
6186 +       desc = flc->sh_desc;
6187 +       cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
6188 +                                 true);
6189 +       flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6190 +       dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
6191 +                                  sizeof(flc->flc) + desc_bytes(desc),
6192 +                                  ctx->dir);
6193 +
6194 +       return 0;
6195 +}
6196 +
6197 +static int rfc4543_setauthsize(struct crypto_aead *authenc,
6198 +                              unsigned int authsize)
6199 +{
6200 +       struct caam_ctx *ctx = crypto_aead_ctx(authenc);
6201 +
6202 +       ctx->authsize = authsize;
6203 +       rfc4543_set_sh_desc(authenc);
6204 +
6205 +       return 0;
6206 +}
6207 +
6208 +static int rfc4543_setkey(struct crypto_aead *aead,
6209 +                         const u8 *key, unsigned int keylen)
6210 +{
6211 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
6212 +       struct device *dev = ctx->dev;
6213 +
6214 +       if (keylen < 4)
6215 +               return -EINVAL;
6216 +
6217 +#ifdef DEBUG
6218 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
6219 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
6220 +#endif
6221 +
6222 +       memcpy(ctx->key, key, keylen);
6223 +       /*
6224 +        * The last four bytes of the key material are used as the salt value
6225 +        * in the nonce. Update the AES key length.
6226 +        */
6227 +       ctx->cdata.keylen = keylen - 4;
6228 +       dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
6229 +                                  ctx->dir);
6230 +
6231 +       return rfc4543_set_sh_desc(aead);
6232 +}
6233 +
6234 +static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
6235 +                          unsigned int keylen)
6236 +{
6237 +       struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6238 +       struct caam_skcipher_alg *alg =
6239 +               container_of(crypto_skcipher_alg(skcipher),
6240 +                            struct caam_skcipher_alg, skcipher);
6241 +       struct device *dev = ctx->dev;
6242 +       struct caam_flc *flc;
6243 +       unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
6244 +       u32 *desc;
6245 +       u32 ctx1_iv_off = 0;
6246 +       const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
6247 +                              OP_ALG_AAI_CTR_MOD128) &&
6248 +                              ((ctx->cdata.algtype & OP_ALG_ALGSEL_MASK) !=
6249 +                              OP_ALG_ALGSEL_CHACHA20);
6250 +       const bool is_rfc3686 = alg->caam.rfc3686;
6251 +
6252 +#ifdef DEBUG
6253 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
6254 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
6255 +#endif
6256 +       /*
6257 +        * AES-CTR needs to load IV in CONTEXT1 reg
6258 +        * at an offset of 128bits (16bytes)
6259 +        * CONTEXT1[255:128] = IV
6260 +        */
6261 +       if (ctr_mode)
6262 +               ctx1_iv_off = 16;
6263 +
6264 +       /*
6265 +        * RFC3686 specific:
6266 +        *      | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
6267 +        *      | *key = {KEY, NONCE}
6268 +        */
6269 +       if (is_rfc3686) {
6270 +               ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
6271 +               keylen -= CTR_RFC3686_NONCE_SIZE;
6272 +       }
6273 +
6274 +       ctx->cdata.keylen = keylen;
6275 +       ctx->cdata.key_virt = key;
6276 +       ctx->cdata.key_inline = true;
6277 +
6278 +       /* skcipher_encrypt shared descriptor */
6279 +       flc = &ctx->flc[ENCRYPT];
6280 +       desc = flc->sh_desc;
6281 +       cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize,
6282 +                                    is_rfc3686, ctx1_iv_off);
6283 +       flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6284 +       dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
6285 +                                  sizeof(flc->flc) + desc_bytes(desc),
6286 +                                  ctx->dir);
6287 +
6288 +       /* skcipher_decrypt shared descriptor */
6289 +       flc = &ctx->flc[DECRYPT];
6290 +       desc = flc->sh_desc;
6291 +       cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize,
6292 +                                    is_rfc3686, ctx1_iv_off);
6293 +       flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6294 +       dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
6295 +                                  sizeof(flc->flc) + desc_bytes(desc),
6296 +                                  ctx->dir);
6297 +
6298 +       return 0;
6299 +}
6300 +
6301 +static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
6302 +                              unsigned int keylen)
6303 +{
6304 +       struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6305 +       struct device *dev = ctx->dev;
6306 +       struct caam_flc *flc;
6307 +       u32 *desc;
6308 +
6309 +       if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
6310 +               dev_err(dev, "key size mismatch\n");
6311 +               crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
6312 +               return -EINVAL;
6313 +       }
6314 +
6315 +       ctx->cdata.keylen = keylen;
6316 +       ctx->cdata.key_virt = key;
6317 +       ctx->cdata.key_inline = true;
6318 +
6319 +       /* xts_skcipher_encrypt shared descriptor */
6320 +       flc = &ctx->flc[ENCRYPT];
6321 +       desc = flc->sh_desc;
6322 +       cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
6323 +       flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6324 +       dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
6325 +                                  sizeof(flc->flc) + desc_bytes(desc),
6326 +                                  ctx->dir);
6327 +
6328 +       /* xts_skcipher_decrypt shared descriptor */
6329 +       flc = &ctx->flc[DECRYPT];
6330 +       desc = flc->sh_desc;
6331 +       cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
6332 +       flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6333 +       dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
6334 +                                  sizeof(flc->flc) + desc_bytes(desc),
6335 +                                  ctx->dir);
6336 +
6337 +       return 0;
6338 +}
6339 +
6340 +static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
6341 +{
6342 +       struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
6343 +       struct caam_request *req_ctx = skcipher_request_ctx(req);
6344 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
6345 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
6346 +       struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6347 +       struct device *dev = ctx->dev;
6348 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
6349 +                      GFP_KERNEL : GFP_ATOMIC;
6350 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
6351 +       struct skcipher_edesc *edesc;
6352 +       dma_addr_t iv_dma;
6353 +       u8 *iv;
6354 +       int ivsize = crypto_skcipher_ivsize(skcipher);
6355 +       int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
6356 +       struct dpaa2_sg_entry *sg_table;
6357 +
6358 +       src_nents = sg_nents_for_len(req->src, req->cryptlen);
6359 +       if (unlikely(src_nents < 0)) {
6360 +               dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
6361 +                       req->cryptlen);
6362 +               return ERR_PTR(src_nents);
6363 +       }
6364 +
6365 +       if (unlikely(req->dst != req->src)) {
6366 +               dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
6367 +               if (unlikely(dst_nents < 0)) {
6368 +                       dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
6369 +                               req->cryptlen);
6370 +                       return ERR_PTR(dst_nents);
6371 +               }
6372 +
6373 +               mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
6374 +                                             DMA_TO_DEVICE);
6375 +               if (unlikely(!mapped_src_nents)) {
6376 +                       dev_err(dev, "unable to map source\n");
6377 +                       return ERR_PTR(-ENOMEM);
6378 +               }
6379 +
6380 +               mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
6381 +                                             DMA_FROM_DEVICE);
6382 +               if (unlikely(!mapped_dst_nents)) {
6383 +                       dev_err(dev, "unable to map destination\n");
6384 +                       dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
6385 +                       return ERR_PTR(-ENOMEM);
6386 +               }
6387 +       } else {
6388 +               mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
6389 +                                             DMA_BIDIRECTIONAL);
6390 +               if (unlikely(!mapped_src_nents)) {
6391 +                       dev_err(dev, "unable to map source\n");
6392 +                       return ERR_PTR(-ENOMEM);
6393 +               }
6394 +       }
6395 +
6396 +       qm_sg_ents = 1 + mapped_src_nents;
6397 +       dst_sg_idx = qm_sg_ents;
6398 +
6399 +       qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
6400 +       qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
6401 +       if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
6402 +                    ivsize > CAAM_QI_MEMCACHE_SIZE)) {
6403 +               dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
6404 +                       qm_sg_ents, ivsize);
6405 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
6406 +                          0, 0, 0);
6407 +               return ERR_PTR(-ENOMEM);
6408 +       }
6409 +
6410 +       /* allocate space for base edesc, link tables and IV */
6411 +       edesc = qi_cache_zalloc(GFP_DMA | flags);
6412 +       if (unlikely(!edesc)) {
6413 +               dev_err(dev, "could not allocate extended descriptor\n");
6414 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
6415 +                          0, 0, 0);
6416 +               return ERR_PTR(-ENOMEM);
6417 +       }
6418 +
6419 +       /* Make sure IV is located in a DMAable area */
6420 +       sg_table = &edesc->sgt[0];
6421 +       iv = (u8 *)(sg_table + qm_sg_ents);
6422 +       memcpy(iv, req->iv, ivsize);
6423 +
6424 +       iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
6425 +       if (dma_mapping_error(dev, iv_dma)) {
6426 +               dev_err(dev, "unable to map IV\n");
6427 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
6428 +                          0, 0, 0);
6429 +               qi_cache_free(edesc);
6430 +               return ERR_PTR(-ENOMEM);
6431 +       }
6432 +
6433 +       edesc->src_nents = src_nents;
6434 +       edesc->dst_nents = dst_nents;
6435 +       edesc->iv_dma = iv_dma;
6436 +       edesc->qm_sg_bytes = qm_sg_bytes;
6437 +
6438 +       dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
6439 +       sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
6440 +
6441 +       if (mapped_dst_nents > 1)
6442 +               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
6443 +                                dst_sg_idx, 0);
6444 +
6445 +       edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
6446 +                                         DMA_TO_DEVICE);
6447 +       if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
6448 +               dev_err(dev, "unable to map S/G table\n");
6449 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
6450 +                          iv_dma, ivsize, 0, 0);
6451 +               qi_cache_free(edesc);
6452 +               return ERR_PTR(-ENOMEM);
6453 +       }
6454 +
6455 +       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
6456 +       dpaa2_fl_set_final(in_fle, true);
6457 +       dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
6458 +       dpaa2_fl_set_len(out_fle, req->cryptlen);
6459 +
6460 +       dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
6461 +       dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
6462 +
6463 +       if (req->src == req->dst) {
6464 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
6465 +               dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
6466 +                                 sizeof(*sg_table));
6467 +       } else if (mapped_dst_nents > 1) {
6468 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
6469 +               dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
6470 +                                 sizeof(*sg_table));
6471 +       } else {
6472 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
6473 +               dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
6474 +       }
6475 +
6476 +       return edesc;
6477 +}
6478 +
6479 +static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
6480 +                      struct aead_request *req)
6481 +{
6482 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
6483 +       int ivsize = crypto_aead_ivsize(aead);
6484 +
6485 +       caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
6486 +                  edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
6487 +       dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
6488 +}
6489 +
6490 +static void tls_unmap(struct device *dev, struct tls_edesc *edesc,
6491 +                     struct aead_request *req)
6492 +{
6493 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
6494 +       int ivsize = crypto_aead_ivsize(tls);
6495 +
6496 +       caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
6497 +                  edesc->dst_nents, edesc->iv_dma, ivsize, edesc->qm_sg_dma,
6498 +                  edesc->qm_sg_bytes);
6499 +}
6500 +
6501 +static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
6502 +                          struct skcipher_request *req)
6503 +{
6504 +       struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
6505 +       int ivsize = crypto_skcipher_ivsize(skcipher);
6506 +
6507 +       caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
6508 +                  edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
6509 +}
6510 +
6511 +static void aead_encrypt_done(void *cbk_ctx, u32 status)
6512 +{
6513 +       struct crypto_async_request *areq = cbk_ctx;
6514 +       struct aead_request *req = container_of(areq, struct aead_request,
6515 +                                               base);
6516 +       struct caam_request *req_ctx = to_caam_req(areq);
6517 +       struct aead_edesc *edesc = req_ctx->edesc;
6518 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
6519 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
6520 +       int ecode = 0;
6521 +
6522 +#ifdef DEBUG
6523 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
6524 +#endif
6525 +
6526 +       if (unlikely(status)) {
6527 +               caam_qi2_strstatus(ctx->dev, status);
6528 +               ecode = -EIO;
6529 +       }
6530 +
6531 +       aead_unmap(ctx->dev, edesc, req);
6532 +       qi_cache_free(edesc);
6533 +       aead_request_complete(req, ecode);
6534 +}
6535 +
6536 +static void aead_decrypt_done(void *cbk_ctx, u32 status)
6537 +{
6538 +       struct crypto_async_request *areq = cbk_ctx;
6539 +       struct aead_request *req = container_of(areq, struct aead_request,
6540 +                                               base);
6541 +       struct caam_request *req_ctx = to_caam_req(areq);
6542 +       struct aead_edesc *edesc = req_ctx->edesc;
6543 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
6544 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
6545 +       int ecode = 0;
6546 +
6547 +#ifdef DEBUG
6548 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
6549 +#endif
6550 +
6551 +       if (unlikely(status)) {
6552 +               caam_qi2_strstatus(ctx->dev, status);
6553 +               /*
6554 +                * verify hw auth check passed else return -EBADMSG
6555 +                */
6556 +               if ((status & JRSTA_CCBERR_ERRID_MASK) ==
6557 +                    JRSTA_CCBERR_ERRID_ICVCHK)
6558 +                       ecode = -EBADMSG;
6559 +               else
6560 +                       ecode = -EIO;
6561 +       }
6562 +
6563 +       aead_unmap(ctx->dev, edesc, req);
6564 +       qi_cache_free(edesc);
6565 +       aead_request_complete(req, ecode);
6566 +}
6567 +
6568 +static int aead_encrypt(struct aead_request *req)
6569 +{
6570 +       struct aead_edesc *edesc;
6571 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
6572 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
6573 +       struct caam_request *caam_req = aead_request_ctx(req);
6574 +       int ret;
6575 +
6576 +       /* allocate extended descriptor */
6577 +       edesc = aead_edesc_alloc(req, true);
6578 +       if (IS_ERR(edesc))
6579 +               return PTR_ERR(edesc);
6580 +
6581 +       caam_req->flc = &ctx->flc[ENCRYPT];
6582 +       caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
6583 +       caam_req->cbk = aead_encrypt_done;
6584 +       caam_req->ctx = &req->base;
6585 +       caam_req->edesc = edesc;
6586 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
6587 +       if (ret != -EINPROGRESS &&
6588 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
6589 +               aead_unmap(ctx->dev, edesc, req);
6590 +               qi_cache_free(edesc);
6591 +       }
6592 +
6593 +       return ret;
6594 +}
6595 +
6596 +static int aead_decrypt(struct aead_request *req)
6597 +{
6598 +       struct aead_edesc *edesc;
6599 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
6600 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
6601 +       struct caam_request *caam_req = aead_request_ctx(req);
6602 +       int ret;
6603 +
6604 +       /* allocate extended descriptor */
6605 +       edesc = aead_edesc_alloc(req, false);
6606 +       if (IS_ERR(edesc))
6607 +               return PTR_ERR(edesc);
6608 +
6609 +       caam_req->flc = &ctx->flc[DECRYPT];
6610 +       caam_req->flc_dma = ctx->flc_dma[DECRYPT];
6611 +       caam_req->cbk = aead_decrypt_done;
6612 +       caam_req->ctx = &req->base;
6613 +       caam_req->edesc = edesc;
6614 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
6615 +       if (ret != -EINPROGRESS &&
6616 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
6617 +               aead_unmap(ctx->dev, edesc, req);
6618 +               qi_cache_free(edesc);
6619 +       }
6620 +
6621 +       return ret;
6622 +}
6623 +
6624 +static void tls_encrypt_done(void *cbk_ctx, u32 status)
6625 +{
6626 +       struct crypto_async_request *areq = cbk_ctx;
6627 +       struct aead_request *req = container_of(areq, struct aead_request,
6628 +                                               base);
6629 +       struct caam_request *req_ctx = to_caam_req(areq);
6630 +       struct tls_edesc *edesc = req_ctx->edesc;
6631 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
6632 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
6633 +       int ecode = 0;
6634 +
6635 +#ifdef DEBUG
6636 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
6637 +#endif
6638 +
6639 +       if (unlikely(status)) {
6640 +               caam_qi2_strstatus(ctx->dev, status);
6641 +               ecode = -EIO;
6642 +       }
6643 +
6644 +       tls_unmap(ctx->dev, edesc, req);
6645 +       qi_cache_free(edesc);
6646 +       aead_request_complete(req, ecode);
6647 +}
6648 +
6649 +static void tls_decrypt_done(void *cbk_ctx, u32 status)
6650 +{
6651 +       struct crypto_async_request *areq = cbk_ctx;
6652 +       struct aead_request *req = container_of(areq, struct aead_request,
6653 +                                               base);
6654 +       struct caam_request *req_ctx = to_caam_req(areq);
6655 +       struct tls_edesc *edesc = req_ctx->edesc;
6656 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
6657 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
6658 +       int ecode = 0;
6659 +
6660 +#ifdef DEBUG
6661 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
6662 +#endif
6663 +
6664 +       if (unlikely(status)) {
6665 +               caam_qi2_strstatus(ctx->dev, status);
6666 +               /*
6667 +                * verify hw auth check passed else return -EBADMSG
6668 +                */
6669 +               if ((status & JRSTA_CCBERR_ERRID_MASK) ==
6670 +                    JRSTA_CCBERR_ERRID_ICVCHK)
6671 +                       ecode = -EBADMSG;
6672 +               else
6673 +                       ecode = -EIO;
6674 +       }
6675 +
6676 +       tls_unmap(ctx->dev, edesc, req);
6677 +       qi_cache_free(edesc);
6678 +       aead_request_complete(req, ecode);
6679 +}
6680 +
6681 +static int tls_encrypt(struct aead_request *req)
6682 +{
6683 +       struct tls_edesc *edesc;
6684 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
6685 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
6686 +       struct caam_request *caam_req = aead_request_ctx(req);
6687 +       int ret;
6688 +
6689 +       /* allocate extended descriptor */
6690 +       edesc = tls_edesc_alloc(req, true);
6691 +       if (IS_ERR(edesc))
6692 +               return PTR_ERR(edesc);
6693 +
6694 +       caam_req->flc = &ctx->flc[ENCRYPT];
6695 +       caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
6696 +       caam_req->cbk = tls_encrypt_done;
6697 +       caam_req->ctx = &req->base;
6698 +       caam_req->edesc = edesc;
6699 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
6700 +       if (ret != -EINPROGRESS &&
6701 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
6702 +               tls_unmap(ctx->dev, edesc, req);
6703 +               qi_cache_free(edesc);
6704 +       }
6705 +
6706 +       return ret;
6707 +}
6708 +
6709 +static int tls_decrypt(struct aead_request *req)
6710 +{
6711 +       struct tls_edesc *edesc;
6712 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
6713 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
6714 +       struct caam_request *caam_req = aead_request_ctx(req);
6715 +       int ret;
6716 +
6717 +       /* allocate extended descriptor */
6718 +       edesc = tls_edesc_alloc(req, false);
6719 +       if (IS_ERR(edesc))
6720 +               return PTR_ERR(edesc);
6721 +
6722 +       caam_req->flc = &ctx->flc[DECRYPT];
6723 +       caam_req->flc_dma = ctx->flc_dma[DECRYPT];
6724 +       caam_req->cbk = tls_decrypt_done;
6725 +       caam_req->ctx = &req->base;
6726 +       caam_req->edesc = edesc;
6727 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
6728 +       if (ret != -EINPROGRESS &&
6729 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
6730 +               tls_unmap(ctx->dev, edesc, req);
6731 +               qi_cache_free(edesc);
6732 +       }
6733 +
6734 +       return ret;
6735 +}
6736 +
6737 +static int ipsec_gcm_encrypt(struct aead_request *req)
6738 +{
6739 +       if (req->assoclen < 8)
6740 +               return -EINVAL;
6741 +
6742 +       return aead_encrypt(req);
6743 +}
6744 +
6745 +static int ipsec_gcm_decrypt(struct aead_request *req)
6746 +{
6747 +       if (req->assoclen < 8)
6748 +               return -EINVAL;
6749 +
6750 +       return aead_decrypt(req);
6751 +}
6752 +
6753 +static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
6754 +{
6755 +       struct crypto_async_request *areq = cbk_ctx;
6756 +       struct skcipher_request *req = skcipher_request_cast(areq);
6757 +       struct caam_request *req_ctx = to_caam_req(areq);
6758 +       struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
6759 +       struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6760 +       struct skcipher_edesc *edesc = req_ctx->edesc;
6761 +       int ecode = 0;
6762 +       int ivsize = crypto_skcipher_ivsize(skcipher);
6763 +
6764 +#ifdef DEBUG
6765 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
6766 +#endif
6767 +
6768 +       if (unlikely(status)) {
6769 +               caam_qi2_strstatus(ctx->dev, status);
6770 +               ecode = -EIO;
6771 +       }
6772 +
6773 +#ifdef DEBUG
6774 +       print_hex_dump(KERN_ERR, "dstiv  @" __stringify(__LINE__)": ",
6775 +                      DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
6776 +                      edesc->src_nents > 1 ? 100 : ivsize, 1);
6777 +       caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
6778 +                    DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
6779 +                    edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
6780 +#endif
6781 +
6782 +       skcipher_unmap(ctx->dev, edesc, req);
6783 +
6784 +       /*
6785 +        * The crypto API expects us to set the IV (req->iv) to the last
6786 +        * ciphertext block. This is used e.g. by the CTS mode.
6787 +        */
6788 +       scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
6789 +                                ivsize, 0);
6790 +
6791 +       qi_cache_free(edesc);
6792 +       skcipher_request_complete(req, ecode);
6793 +}
6794 +
6795 +static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
6796 +{
6797 +       struct crypto_async_request *areq = cbk_ctx;
6798 +       struct skcipher_request *req = skcipher_request_cast(areq);
6799 +       struct caam_request *req_ctx = to_caam_req(areq);
6800 +       struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
6801 +       struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6802 +       struct skcipher_edesc *edesc = req_ctx->edesc;
6803 +       int ecode = 0;
6804 +#ifdef DEBUG
6805 +       int ivsize = crypto_skcipher_ivsize(skcipher);
6806 +
6807 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
6808 +#endif
6809 +
6810 +       if (unlikely(status)) {
6811 +               caam_qi2_strstatus(ctx->dev, status);
6812 +               ecode = -EIO;
6813 +       }
6814 +
6815 +#ifdef DEBUG
6816 +       print_hex_dump(KERN_ERR, "dstiv  @" __stringify(__LINE__)": ",
6817 +                      DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
6818 +                      edesc->src_nents > 1 ? 100 : ivsize, 1);
6819 +       caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
6820 +                    DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
6821 +                    edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
6822 +#endif
6823 +
6824 +       skcipher_unmap(ctx->dev, edesc, req);
6825 +       qi_cache_free(edesc);
6826 +       skcipher_request_complete(req, ecode);
6827 +}
6828 +
6829 +static int skcipher_encrypt(struct skcipher_request *req)
6830 +{
6831 +       struct skcipher_edesc *edesc;
6832 +       struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
6833 +       struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6834 +       struct caam_request *caam_req = skcipher_request_ctx(req);
6835 +       int ret;
6836 +
6837 +       /* allocate extended descriptor */
6838 +       edesc = skcipher_edesc_alloc(req);
6839 +       if (IS_ERR(edesc))
6840 +               return PTR_ERR(edesc);
6841 +
6842 +       caam_req->flc = &ctx->flc[ENCRYPT];
6843 +       caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
6844 +       caam_req->cbk = skcipher_encrypt_done;
6845 +       caam_req->ctx = &req->base;
6846 +       caam_req->edesc = edesc;
6847 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
6848 +       if (ret != -EINPROGRESS &&
6849 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
6850 +               skcipher_unmap(ctx->dev, edesc, req);
6851 +               qi_cache_free(edesc);
6852 +       }
6853 +
6854 +       return ret;
6855 +}
6856 +
6857 +static int skcipher_decrypt(struct skcipher_request *req)
6858 +{
6859 +       struct skcipher_edesc *edesc;
6860 +       struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
6861 +       struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6862 +       struct caam_request *caam_req = skcipher_request_ctx(req);
6863 +       int ivsize = crypto_skcipher_ivsize(skcipher);
6864 +       int ret;
6865 +
6866 +       /* allocate extended descriptor */
6867 +       edesc = skcipher_edesc_alloc(req);
6868 +       if (IS_ERR(edesc))
6869 +               return PTR_ERR(edesc);
6870 +
6871 +       /*
6872 +        * The crypto API expects us to set the IV (req->iv) to the last
6873 +        * ciphertext block.
6874 +        */
6875 +       scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
6876 +                                ivsize, 0);
6877 +
6878 +       caam_req->flc = &ctx->flc[DECRYPT];
6879 +       caam_req->flc_dma = ctx->flc_dma[DECRYPT];
6880 +       caam_req->cbk = skcipher_decrypt_done;
6881 +       caam_req->ctx = &req->base;
6882 +       caam_req->edesc = edesc;
6883 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
6884 +       if (ret != -EINPROGRESS &&
6885 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
6886 +               skcipher_unmap(ctx->dev, edesc, req);
6887 +               qi_cache_free(edesc);
6888 +       }
6889 +
6890 +       return ret;
6891 +}
6892 +
6893 +static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
6894 +                        bool uses_dkp)
6895 +{
6896 +       dma_addr_t dma_addr;
6897 +       int i;
6898 +
6899 +       /* copy descriptor header template value */
6900 +       ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
6901 +       ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
6902 +
6903 +       ctx->dev = caam->dev;
6904 +       ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
6905 +
6906 +       dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
6907 +                                       offsetof(struct caam_ctx, flc_dma),
6908 +                                       ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
6909 +       if (dma_mapping_error(ctx->dev, dma_addr)) {
6910 +               dev_err(ctx->dev, "unable to map key, shared descriptors\n");
6911 +               return -ENOMEM;
6912 +       }
6913 +
6914 +       for (i = 0; i < NUM_OP; i++)
6915 +               ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
6916 +       ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
6917 +
6918 +       return 0;
6919 +}
6920 +
6921 +static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
6922 +{
6923 +       struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
6924 +       struct caam_skcipher_alg *caam_alg =
6925 +               container_of(alg, typeof(*caam_alg), skcipher);
6926 +
6927 +       crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
6928 +       return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
6929 +}
6930 +
6931 +static int caam_cra_init_aead(struct crypto_aead *tfm)
6932 +{
6933 +       struct aead_alg *alg = crypto_aead_alg(tfm);
6934 +       struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
6935 +                                                     aead);
6936 +
6937 +       crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
6938 +       return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
6939 +                            (alg->setkey == aead_setkey) ||
6940 +                            (alg->setkey == tls_setkey));
6941 +}
6942 +
6943 +static void caam_exit_common(struct caam_ctx *ctx)
6944 +{
6945 +       dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
6946 +                              offsetof(struct caam_ctx, flc_dma), ctx->dir,
6947 +                              DMA_ATTR_SKIP_CPU_SYNC);
6948 +}
6949 +
6950 +static void caam_cra_exit(struct crypto_skcipher *tfm)
6951 +{
6952 +       caam_exit_common(crypto_skcipher_ctx(tfm));
6953 +}
6954 +
6955 +static void caam_cra_exit_aead(struct crypto_aead *tfm)
6956 +{
6957 +       caam_exit_common(crypto_aead_ctx(tfm));
6958 +}
6959 +
6960 +static struct caam_skcipher_alg driver_algs[] = {
6961 +       {
6962 +               .skcipher = {
6963 +                       .base = {
6964 +                               .cra_name = "cbc(aes)",
6965 +                               .cra_driver_name = "cbc-aes-caam-qi2",
6966 +                               .cra_blocksize = AES_BLOCK_SIZE,
6967 +                       },
6968 +                       .setkey = skcipher_setkey,
6969 +                       .encrypt = skcipher_encrypt,
6970 +                       .decrypt = skcipher_decrypt,
6971 +                       .min_keysize = AES_MIN_KEY_SIZE,
6972 +                       .max_keysize = AES_MAX_KEY_SIZE,
6973 +                       .ivsize = AES_BLOCK_SIZE,
6974 +               },
6975 +               .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
6976 +       },
6977 +       {
6978 +               .skcipher = {
6979 +                       .base = {
6980 +                               .cra_name = "cbc(des3_ede)",
6981 +                               .cra_driver_name = "cbc-3des-caam-qi2",
6982 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
6983 +                       },
6984 +                       .setkey = skcipher_setkey,
6985 +                       .encrypt = skcipher_encrypt,
6986 +                       .decrypt = skcipher_decrypt,
6987 +                       .min_keysize = DES3_EDE_KEY_SIZE,
6988 +                       .max_keysize = DES3_EDE_KEY_SIZE,
6989 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
6990 +               },
6991 +               .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
6992 +       },
6993 +       {
6994 +               .skcipher = {
6995 +                       .base = {
6996 +                               .cra_name = "cbc(des)",
6997 +                               .cra_driver_name = "cbc-des-caam-qi2",
6998 +                               .cra_blocksize = DES_BLOCK_SIZE,
6999 +                       },
7000 +                       .setkey = skcipher_setkey,
7001 +                       .encrypt = skcipher_encrypt,
7002 +                       .decrypt = skcipher_decrypt,
7003 +                       .min_keysize = DES_KEY_SIZE,
7004 +                       .max_keysize = DES_KEY_SIZE,
7005 +                       .ivsize = DES_BLOCK_SIZE,
7006 +               },
7007 +               .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7008 +       },
7009 +       {
7010 +               .skcipher = {
7011 +                       .base = {
7012 +                               .cra_name = "ctr(aes)",
7013 +                               .cra_driver_name = "ctr-aes-caam-qi2",
7014 +                               .cra_blocksize = 1,
7015 +                       },
7016 +                       .setkey = skcipher_setkey,
7017 +                       .encrypt = skcipher_encrypt,
7018 +                       .decrypt = skcipher_decrypt,
7019 +                       .min_keysize = AES_MIN_KEY_SIZE,
7020 +                       .max_keysize = AES_MAX_KEY_SIZE,
7021 +                       .ivsize = AES_BLOCK_SIZE,
7022 +                       .chunksize = AES_BLOCK_SIZE,
7023 +               },
7024 +               .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
7025 +                                       OP_ALG_AAI_CTR_MOD128,
7026 +       },
7027 +       {
7028 +               .skcipher = {
7029 +                       .base = {
7030 +                               .cra_name = "rfc3686(ctr(aes))",
7031 +                               .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
7032 +                               .cra_blocksize = 1,
7033 +                       },
7034 +                       .setkey = skcipher_setkey,
7035 +                       .encrypt = skcipher_encrypt,
7036 +                       .decrypt = skcipher_decrypt,
7037 +                       .min_keysize = AES_MIN_KEY_SIZE +
7038 +                                      CTR_RFC3686_NONCE_SIZE,
7039 +                       .max_keysize = AES_MAX_KEY_SIZE +
7040 +                                      CTR_RFC3686_NONCE_SIZE,
7041 +                       .ivsize = CTR_RFC3686_IV_SIZE,
7042 +                       .chunksize = AES_BLOCK_SIZE,
7043 +               },
7044 +               .caam = {
7045 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
7046 +                                          OP_ALG_AAI_CTR_MOD128,
7047 +                       .rfc3686 = true,
7048 +               },
7049 +       },
7050 +       {
7051 +               .skcipher = {
7052 +                       .base = {
7053 +                               .cra_name = "xts(aes)",
7054 +                               .cra_driver_name = "xts-aes-caam-qi2",
7055 +                               .cra_blocksize = AES_BLOCK_SIZE,
7056 +                       },
7057 +                       .setkey = xts_skcipher_setkey,
7058 +                       .encrypt = skcipher_encrypt,
7059 +                       .decrypt = skcipher_decrypt,
7060 +                       .min_keysize = 2 * AES_MIN_KEY_SIZE,
7061 +                       .max_keysize = 2 * AES_MAX_KEY_SIZE,
7062 +                       .ivsize = AES_BLOCK_SIZE,
7063 +               },
7064 +               .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
7065 +       },
7066 +       {
7067 +               .skcipher = {
7068 +                       .base = {
7069 +                               .cra_name = "chacha20",
7070 +                               .cra_driver_name = "chacha20-caam-qi2",
7071 +                               .cra_blocksize = 1,
7072 +                       },
7073 +                       .setkey = skcipher_setkey,
7074 +                       .encrypt = skcipher_encrypt,
7075 +                       .decrypt = skcipher_decrypt,
7076 +                       .min_keysize = CHACHA20_KEY_SIZE,
7077 +                       .max_keysize = CHACHA20_KEY_SIZE,
7078 +                       .ivsize = CHACHA20_IV_SIZE,
7079 +               },
7080 +               .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
7081 +       },
7082 +};
7083 +
7084 +static struct caam_aead_alg driver_aeads[] = {
7085 +       {
7086 +               .aead = {
7087 +                       .base = {
7088 +                               .cra_name = "rfc4106(gcm(aes))",
7089 +                               .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
7090 +                               .cra_blocksize = 1,
7091 +                       },
7092 +                       .setkey = rfc4106_setkey,
7093 +                       .setauthsize = rfc4106_setauthsize,
7094 +                       .encrypt = ipsec_gcm_encrypt,
7095 +                       .decrypt = ipsec_gcm_decrypt,
7096 +                       .ivsize = 8,
7097 +                       .maxauthsize = AES_BLOCK_SIZE,
7098 +               },
7099 +               .caam = {
7100 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
7101 +               },
7102 +       },
7103 +       {
7104 +               .aead = {
7105 +                       .base = {
7106 +                               .cra_name = "rfc4543(gcm(aes))",
7107 +                               .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
7108 +                               .cra_blocksize = 1,
7109 +                       },
7110 +                       .setkey = rfc4543_setkey,
7111 +                       .setauthsize = rfc4543_setauthsize,
7112 +                       .encrypt = ipsec_gcm_encrypt,
7113 +                       .decrypt = ipsec_gcm_decrypt,
7114 +                       .ivsize = 8,
7115 +                       .maxauthsize = AES_BLOCK_SIZE,
7116 +               },
7117 +               .caam = {
7118 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
7119 +               },
7120 +       },
7121 +       /* Galois Counter Mode */
7122 +       {
7123 +               .aead = {
7124 +                       .base = {
7125 +                               .cra_name = "gcm(aes)",
7126 +                               .cra_driver_name = "gcm-aes-caam-qi2",
7127 +                               .cra_blocksize = 1,
7128 +                       },
7129 +                       .setkey = gcm_setkey,
7130 +                       .setauthsize = gcm_setauthsize,
7131 +                       .encrypt = aead_encrypt,
7132 +                       .decrypt = aead_decrypt,
7133 +                       .ivsize = 12,
7134 +                       .maxauthsize = AES_BLOCK_SIZE,
7135 +               },
7136 +               .caam = {
7137 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
7138 +               }
7139 +       },
7140 +       /* single-pass ipsec_esp descriptor */
7141 +       {
7142 +               .aead = {
7143 +                       .base = {
7144 +                               .cra_name = "authenc(hmac(md5),cbc(aes))",
7145 +                               .cra_driver_name = "authenc-hmac-md5-"
7146 +                                                  "cbc-aes-caam-qi2",
7147 +                               .cra_blocksize = AES_BLOCK_SIZE,
7148 +                       },
7149 +                       .setkey = aead_setkey,
7150 +                       .setauthsize = aead_setauthsize,
7151 +                       .encrypt = aead_encrypt,
7152 +                       .decrypt = aead_decrypt,
7153 +                       .ivsize = AES_BLOCK_SIZE,
7154 +                       .maxauthsize = MD5_DIGEST_SIZE,
7155 +               },
7156 +               .caam = {
7157 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7158 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7159 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7160 +               }
7161 +       },
7162 +       {
7163 +               .aead = {
7164 +                       .base = {
7165 +                               .cra_name = "echainiv(authenc(hmac(md5),"
7166 +                                           "cbc(aes)))",
7167 +                               .cra_driver_name = "echainiv-authenc-hmac-md5-"
7168 +                                                  "cbc-aes-caam-qi2",
7169 +                               .cra_blocksize = AES_BLOCK_SIZE,
7170 +                       },
7171 +                       .setkey = aead_setkey,
7172 +                       .setauthsize = aead_setauthsize,
7173 +                       .encrypt = aead_encrypt,
7174 +                       .decrypt = aead_decrypt,
7175 +                       .ivsize = AES_BLOCK_SIZE,
7176 +                       .maxauthsize = MD5_DIGEST_SIZE,
7177 +               },
7178 +               .caam = {
7179 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7180 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7181 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7182 +                       .geniv = true,
7183 +               }
7184 +       },
7185 +       {
7186 +               .aead = {
7187 +                       .base = {
7188 +                               .cra_name = "authenc(hmac(sha1),cbc(aes))",
7189 +                               .cra_driver_name = "authenc-hmac-sha1-"
7190 +                                                  "cbc-aes-caam-qi2",
7191 +                               .cra_blocksize = AES_BLOCK_SIZE,
7192 +                       },
7193 +                       .setkey = aead_setkey,
7194 +                       .setauthsize = aead_setauthsize,
7195 +                       .encrypt = aead_encrypt,
7196 +                       .decrypt = aead_decrypt,
7197 +                       .ivsize = AES_BLOCK_SIZE,
7198 +                       .maxauthsize = SHA1_DIGEST_SIZE,
7199 +               },
7200 +               .caam = {
7201 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7202 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7203 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7204 +               }
7205 +       },
7206 +       {
7207 +               .aead = {
7208 +                       .base = {
7209 +                               .cra_name = "echainiv(authenc(hmac(sha1),"
7210 +                                           "cbc(aes)))",
7211 +                               .cra_driver_name = "echainiv-authenc-"
7212 +                                                  "hmac-sha1-cbc-aes-caam-qi2",
7213 +                               .cra_blocksize = AES_BLOCK_SIZE,
7214 +                       },
7215 +                       .setkey = aead_setkey,
7216 +                       .setauthsize = aead_setauthsize,
7217 +                       .encrypt = aead_encrypt,
7218 +                       .decrypt = aead_decrypt,
7219 +                       .ivsize = AES_BLOCK_SIZE,
7220 +                       .maxauthsize = SHA1_DIGEST_SIZE,
7221 +               },
7222 +               .caam = {
7223 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7224 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7225 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7226 +                       .geniv = true,
7227 +               },
7228 +       },
7229 +       {
7230 +               .aead = {
7231 +                       .base = {
7232 +                               .cra_name = "authenc(hmac(sha224),cbc(aes))",
7233 +                               .cra_driver_name = "authenc-hmac-sha224-"
7234 +                                                  "cbc-aes-caam-qi2",
7235 +                               .cra_blocksize = AES_BLOCK_SIZE,
7236 +                       },
7237 +                       .setkey = aead_setkey,
7238 +                       .setauthsize = aead_setauthsize,
7239 +                       .encrypt = aead_encrypt,
7240 +                       .decrypt = aead_decrypt,
7241 +                       .ivsize = AES_BLOCK_SIZE,
7242 +                       .maxauthsize = SHA224_DIGEST_SIZE,
7243 +               },
7244 +               .caam = {
7245 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7246 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
7247 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7248 +               }
7249 +       },
7250 +       {
7251 +               .aead = {
7252 +                       .base = {
7253 +                               .cra_name = "echainiv(authenc(hmac(sha224),"
7254 +                                           "cbc(aes)))",
7255 +                               .cra_driver_name = "echainiv-authenc-"
7256 +                                                  "hmac-sha224-cbc-aes-caam-qi2",
7257 +                               .cra_blocksize = AES_BLOCK_SIZE,
7258 +                       },
7259 +                       .setkey = aead_setkey,
7260 +                       .setauthsize = aead_setauthsize,
7261 +                       .encrypt = aead_encrypt,
7262 +                       .decrypt = aead_decrypt,
7263 +                       .ivsize = AES_BLOCK_SIZE,
7264 +                       .maxauthsize = SHA224_DIGEST_SIZE,
7265 +               },
7266 +               .caam = {
7267 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7268 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
7269 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7270 +                       .geniv = true,
7271 +               }
7272 +       },
7273 +       {
7274 +               .aead = {
7275 +                       .base = {
7276 +                               .cra_name = "authenc(hmac(sha256),cbc(aes))",
7277 +                               .cra_driver_name = "authenc-hmac-sha256-"
7278 +                                                  "cbc-aes-caam-qi2",
7279 +                               .cra_blocksize = AES_BLOCK_SIZE,
7280 +                       },
7281 +                       .setkey = aead_setkey,
7282 +                       .setauthsize = aead_setauthsize,
7283 +                       .encrypt = aead_encrypt,
7284 +                       .decrypt = aead_decrypt,
7285 +                       .ivsize = AES_BLOCK_SIZE,
7286 +                       .maxauthsize = SHA256_DIGEST_SIZE,
7287 +               },
7288 +               .caam = {
7289 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7290 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
7291 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7292 +               }
7293 +       },
7294 +       {
7295 +               .aead = {
7296 +                       .base = {
7297 +                               .cra_name = "echainiv(authenc(hmac(sha256),"
7298 +                                           "cbc(aes)))",
7299 +                               .cra_driver_name = "echainiv-authenc-"
7300 +                                                  "hmac-sha256-cbc-aes-"
7301 +                                                  "caam-qi2",
7302 +                               .cra_blocksize = AES_BLOCK_SIZE,
7303 +                       },
7304 +                       .setkey = aead_setkey,
7305 +                       .setauthsize = aead_setauthsize,
7306 +                       .encrypt = aead_encrypt,
7307 +                       .decrypt = aead_decrypt,
7308 +                       .ivsize = AES_BLOCK_SIZE,
7309 +                       .maxauthsize = SHA256_DIGEST_SIZE,
7310 +               },
7311 +               .caam = {
7312 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7313 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
7314 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7315 +                       .geniv = true,
7316 +               }
7317 +       },
7318 +       {
7319 +               .aead = {
7320 +                       .base = {
7321 +                               .cra_name = "authenc(hmac(sha384),cbc(aes))",
7322 +                               .cra_driver_name = "authenc-hmac-sha384-"
7323 +                                                  "cbc-aes-caam-qi2",
7324 +                               .cra_blocksize = AES_BLOCK_SIZE,
7325 +                       },
7326 +                       .setkey = aead_setkey,
7327 +                       .setauthsize = aead_setauthsize,
7328 +                       .encrypt = aead_encrypt,
7329 +                       .decrypt = aead_decrypt,
7330 +                       .ivsize = AES_BLOCK_SIZE,
7331 +                       .maxauthsize = SHA384_DIGEST_SIZE,
7332 +               },
7333 +               .caam = {
7334 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7335 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
7336 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7337 +               }
7338 +       },
7339 +       {
7340 +               .aead = {
7341 +                       .base = {
7342 +                               .cra_name = "echainiv(authenc(hmac(sha384),"
7343 +                                           "cbc(aes)))",
7344 +                               .cra_driver_name = "echainiv-authenc-"
7345 +                                                  "hmac-sha384-cbc-aes-"
7346 +                                                  "caam-qi2",
7347 +                               .cra_blocksize = AES_BLOCK_SIZE,
7348 +                       },
7349 +                       .setkey = aead_setkey,
7350 +                       .setauthsize = aead_setauthsize,
7351 +                       .encrypt = aead_encrypt,
7352 +                       .decrypt = aead_decrypt,
7353 +                       .ivsize = AES_BLOCK_SIZE,
7354 +                       .maxauthsize = SHA384_DIGEST_SIZE,
7355 +               },
7356 +               .caam = {
7357 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7358 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
7359 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7360 +                       .geniv = true,
7361 +               }
7362 +       },
7363 +       {
7364 +               .aead = {
7365 +                       .base = {
7366 +                               .cra_name = "authenc(hmac(sha512),cbc(aes))",
7367 +                               .cra_driver_name = "authenc-hmac-sha512-"
7368 +                                                  "cbc-aes-caam-qi2",
7369 +                               .cra_blocksize = AES_BLOCK_SIZE,
7370 +                       },
7371 +                       .setkey = aead_setkey,
7372 +                       .setauthsize = aead_setauthsize,
7373 +                       .encrypt = aead_encrypt,
7374 +                       .decrypt = aead_decrypt,
7375 +                       .ivsize = AES_BLOCK_SIZE,
7376 +                       .maxauthsize = SHA512_DIGEST_SIZE,
7377 +               },
7378 +               .caam = {
7379 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7380 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
7381 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7382 +               }
7383 +       },
7384 +       {
7385 +               .aead = {
7386 +                       .base = {
7387 +                               .cra_name = "echainiv(authenc(hmac(sha512),"
7388 +                                           "cbc(aes)))",
7389 +                               .cra_driver_name = "echainiv-authenc-"
7390 +                                                  "hmac-sha512-cbc-aes-"
7391 +                                                  "caam-qi2",
7392 +                               .cra_blocksize = AES_BLOCK_SIZE,
7393 +                       },
7394 +                       .setkey = aead_setkey,
7395 +                       .setauthsize = aead_setauthsize,
7396 +                       .encrypt = aead_encrypt,
7397 +                       .decrypt = aead_decrypt,
7398 +                       .ivsize = AES_BLOCK_SIZE,
7399 +                       .maxauthsize = SHA512_DIGEST_SIZE,
7400 +               },
7401 +               .caam = {
7402 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7403 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
7404 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7405 +                       .geniv = true,
7406 +               }
7407 +       },
7408 +       {
7409 +               .aead = {
7410 +                       .base = {
7411 +                               .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
7412 +                               .cra_driver_name = "authenc-hmac-md5-"
7413 +                                                  "cbc-des3_ede-caam-qi2",
7414 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7415 +                       },
7416 +                       .setkey = aead_setkey,
7417 +                       .setauthsize = aead_setauthsize,
7418 +                       .encrypt = aead_encrypt,
7419 +                       .decrypt = aead_decrypt,
7420 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
7421 +                       .maxauthsize = MD5_DIGEST_SIZE,
7422 +               },
7423 +               .caam = {
7424 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7425 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7426 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7427 +               }
7428 +       },
7429 +       {
7430 +               .aead = {
7431 +                       .base = {
7432 +                               .cra_name = "echainiv(authenc(hmac(md5),"
7433 +                                           "cbc(des3_ede)))",
7434 +                               .cra_driver_name = "echainiv-authenc-hmac-md5-"
7435 +                                                  "cbc-des3_ede-caam-qi2",
7436 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7437 +                       },
7438 +                       .setkey = aead_setkey,
7439 +                       .setauthsize = aead_setauthsize,
7440 +                       .encrypt = aead_encrypt,
7441 +                       .decrypt = aead_decrypt,
7442 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
7443 +                       .maxauthsize = MD5_DIGEST_SIZE,
7444 +               },
7445 +               .caam = {
7446 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7447 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7448 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7449 +                       .geniv = true,
7450 +               }
7451 +       },
7452 +       {
7453 +               .aead = {
7454 +                       .base = {
7455 +                               .cra_name = "authenc(hmac(sha1),"
7456 +                                           "cbc(des3_ede))",
7457 +                               .cra_driver_name = "authenc-hmac-sha1-"
7458 +                                                  "cbc-des3_ede-caam-qi2",
7459 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7460 +                       },
7461 +                       .setkey = aead_setkey,
7462 +                       .setauthsize = aead_setauthsize,
7463 +                       .encrypt = aead_encrypt,
7464 +                       .decrypt = aead_decrypt,
7465 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
7466 +                       .maxauthsize = SHA1_DIGEST_SIZE,
7467 +               },
7468 +               .caam = {
7469 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7470 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7471 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7472 +               },
7473 +       },
7474 +       {
7475 +               .aead = {
7476 +                       .base = {
7477 +                               .cra_name = "echainiv(authenc(hmac(sha1),"
7478 +                                           "cbc(des3_ede)))",
7479 +                               .cra_driver_name = "echainiv-authenc-"
7480 +                                                  "hmac-sha1-"
7481 +                                                  "cbc-des3_ede-caam-qi2",
7482 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7483 +                       },
7484 +                       .setkey = aead_setkey,
7485 +                       .setauthsize = aead_setauthsize,
7486 +                       .encrypt = aead_encrypt,
7487 +                       .decrypt = aead_decrypt,
7488 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
7489 +                       .maxauthsize = SHA1_DIGEST_SIZE,
7490 +               },
7491 +               .caam = {
7492 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7493 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7494 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7495 +                       .geniv = true,
7496 +               }
7497 +       },
7498 +       {
7499 +               .aead = {
7500 +                       .base = {
7501 +                               .cra_name = "authenc(hmac(sha224),"
7502 +                                           "cbc(des3_ede))",
7503 +                               .cra_driver_name = "authenc-hmac-sha224-"
7504 +                                                  "cbc-des3_ede-caam-qi2",
7505 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7506 +                       },
7507 +                       .setkey = aead_setkey,
7508 +                       .setauthsize = aead_setauthsize,
7509 +                       .encrypt = aead_encrypt,
7510 +                       .decrypt = aead_decrypt,
7511 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
7512 +                       .maxauthsize = SHA224_DIGEST_SIZE,
7513 +               },
7514 +               .caam = {
7515 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7516 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
7517 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7518 +               },
7519 +       },
7520 +       {
7521 +               .aead = {
7522 +                       .base = {
7523 +                               .cra_name = "echainiv(authenc(hmac(sha224),"
7524 +                                           "cbc(des3_ede)))",
7525 +                               .cra_driver_name = "echainiv-authenc-"
7526 +                                                  "hmac-sha224-"
7527 +                                                  "cbc-des3_ede-caam-qi2",
7528 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7529 +                       },
7530 +                       .setkey = aead_setkey,
7531 +                       .setauthsize = aead_setauthsize,
7532 +                       .encrypt = aead_encrypt,
7533 +                       .decrypt = aead_decrypt,
7534 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
7535 +                       .maxauthsize = SHA224_DIGEST_SIZE,
7536 +               },
7537 +               .caam = {
7538 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7539 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
7540 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7541 +                       .geniv = true,
7542 +               }
7543 +       },
7544 +       {
7545 +               .aead = {
7546 +                       .base = {
7547 +                               .cra_name = "authenc(hmac(sha256),"
7548 +                                           "cbc(des3_ede))",
7549 +                               .cra_driver_name = "authenc-hmac-sha256-"
7550 +                                                  "cbc-des3_ede-caam-qi2",
7551 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7552 +                       },
7553 +                       .setkey = aead_setkey,
7554 +                       .setauthsize = aead_setauthsize,
7555 +                       .encrypt = aead_encrypt,
7556 +                       .decrypt = aead_decrypt,
7557 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
7558 +                       .maxauthsize = SHA256_DIGEST_SIZE,
7559 +               },
7560 +               .caam = {
7561 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7562 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
7563 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7564 +               },
7565 +       },
7566 +       {
7567 +               .aead = {
7568 +                       .base = {
7569 +                               .cra_name = "echainiv(authenc(hmac(sha256),"
7570 +                                           "cbc(des3_ede)))",
7571 +                               .cra_driver_name = "echainiv-authenc-"
7572 +                                                  "hmac-sha256-"
7573 +                                                  "cbc-des3_ede-caam-qi2",
7574 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7575 +                       },
7576 +                       .setkey = aead_setkey,
7577 +                       .setauthsize = aead_setauthsize,
7578 +                       .encrypt = aead_encrypt,
7579 +                       .decrypt = aead_decrypt,
7580 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
7581 +                       .maxauthsize = SHA256_DIGEST_SIZE,
7582 +               },
7583 +               .caam = {
7584 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7585 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
7586 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7587 +                       .geniv = true,
7588 +               }
7589 +       },
7590 +       {
7591 +               .aead = {
7592 +                       .base = {
7593 +                               .cra_name = "authenc(hmac(sha384),"
7594 +                                           "cbc(des3_ede))",
7595 +                               .cra_driver_name = "authenc-hmac-sha384-"
7596 +                                                  "cbc-des3_ede-caam-qi2",
7597 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7598 +                       },
7599 +                       .setkey = aead_setkey,
7600 +                       .setauthsize = aead_setauthsize,
7601 +                       .encrypt = aead_encrypt,
7602 +                       .decrypt = aead_decrypt,
7603 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
7604 +                       .maxauthsize = SHA384_DIGEST_SIZE,
7605 +               },
7606 +               .caam = {
7607 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7608 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
7609 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7610 +               },
7611 +       },
7612 +       {
7613 +               .aead = {
7614 +                       .base = {
7615 +                               .cra_name = "echainiv(authenc(hmac(sha384),"
7616 +                                           "cbc(des3_ede)))",
7617 +                               .cra_driver_name = "echainiv-authenc-"
7618 +                                                  "hmac-sha384-"
7619 +                                                  "cbc-des3_ede-caam-qi2",
7620 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7621 +                       },
7622 +                       .setkey = aead_setkey,
7623 +                       .setauthsize = aead_setauthsize,
7624 +                       .encrypt = aead_encrypt,
7625 +                       .decrypt = aead_decrypt,
7626 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
7627 +                       .maxauthsize = SHA384_DIGEST_SIZE,
7628 +               },
7629 +               .caam = {
7630 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7631 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
7632 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7633 +                       .geniv = true,
7634 +               }
7635 +       },
7636 +       {
7637 +               .aead = {
7638 +                       .base = {
7639 +                               .cra_name = "authenc(hmac(sha512),"
7640 +                                           "cbc(des3_ede))",
7641 +                               .cra_driver_name = "authenc-hmac-sha512-"
7642 +                                                  "cbc-des3_ede-caam-qi2",
7643 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7644 +                       },
7645 +                       .setkey = aead_setkey,
7646 +                       .setauthsize = aead_setauthsize,
7647 +                       .encrypt = aead_encrypt,
7648 +                       .decrypt = aead_decrypt,
7649 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
7650 +                       .maxauthsize = SHA512_DIGEST_SIZE,
7651 +               },
7652 +               .caam = {
7653 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7654 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
7655 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7656 +               },
7657 +       },
7658 +       {
7659 +               .aead = {
7660 +                       .base = {
7661 +                               .cra_name = "echainiv(authenc(hmac(sha512),"
7662 +                                           "cbc(des3_ede)))",
7663 +                               .cra_driver_name = "echainiv-authenc-"
7664 +                                                  "hmac-sha512-"
7665 +                                                  "cbc-des3_ede-caam-qi2",
7666 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7667 +                       },
7668 +                       .setkey = aead_setkey,
7669 +                       .setauthsize = aead_setauthsize,
7670 +                       .encrypt = aead_encrypt,
7671 +                       .decrypt = aead_decrypt,
7672 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
7673 +                       .maxauthsize = SHA512_DIGEST_SIZE,
7674 +               },
7675 +               .caam = {
7676 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7677 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
7678 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7679 +                       .geniv = true,
7680 +               }
7681 +       },
7682 +       {
7683 +               .aead = {
7684 +                       .base = {
7685 +                               .cra_name = "authenc(hmac(md5),cbc(des))",
7686 +                               .cra_driver_name = "authenc-hmac-md5-"
7687 +                                                  "cbc-des-caam-qi2",
7688 +                               .cra_blocksize = DES_BLOCK_SIZE,
7689 +                       },
7690 +                       .setkey = aead_setkey,
7691 +                       .setauthsize = aead_setauthsize,
7692 +                       .encrypt = aead_encrypt,
7693 +                       .decrypt = aead_decrypt,
7694 +                       .ivsize = DES_BLOCK_SIZE,
7695 +                       .maxauthsize = MD5_DIGEST_SIZE,
7696 +               },
7697 +               .caam = {
7698 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7699 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7700 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7701 +               },
7702 +       },
7703 +       {
7704 +               .aead = {
7705 +                       .base = {
7706 +                               .cra_name = "echainiv(authenc(hmac(md5),"
7707 +                                           "cbc(des)))",
7708 +                               .cra_driver_name = "echainiv-authenc-hmac-md5-"
7709 +                                                  "cbc-des-caam-qi2",
7710 +                               .cra_blocksize = DES_BLOCK_SIZE,
7711 +                       },
7712 +                       .setkey = aead_setkey,
7713 +                       .setauthsize = aead_setauthsize,
7714 +                       .encrypt = aead_encrypt,
7715 +                       .decrypt = aead_decrypt,
7716 +                       .ivsize = DES_BLOCK_SIZE,
7717 +                       .maxauthsize = MD5_DIGEST_SIZE,
7718 +               },
7719 +               .caam = {
7720 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7721 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7722 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7723 +                       .geniv = true,
7724 +               }
7725 +       },
7726 +       {
7727 +               .aead = {
7728 +                       .base = {
7729 +                               .cra_name = "authenc(hmac(sha1),cbc(des))",
7730 +                               .cra_driver_name = "authenc-hmac-sha1-"
7731 +                                                  "cbc-des-caam-qi2",
7732 +                               .cra_blocksize = DES_BLOCK_SIZE,
7733 +                       },
7734 +                       .setkey = aead_setkey,
7735 +                       .setauthsize = aead_setauthsize,
7736 +                       .encrypt = aead_encrypt,
7737 +                       .decrypt = aead_decrypt,
7738 +                       .ivsize = DES_BLOCK_SIZE,
7739 +                       .maxauthsize = SHA1_DIGEST_SIZE,
7740 +               },
7741 +               .caam = {
7742 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7743 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7744 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7745 +               },
7746 +       },
7747 +       {
7748 +               .aead = {
7749 +                       .base = {
7750 +                               .cra_name = "echainiv(authenc(hmac(sha1),"
7751 +                                           "cbc(des)))",
7752 +                               .cra_driver_name = "echainiv-authenc-"
7753 +                                                  "hmac-sha1-cbc-des-caam-qi2",
7754 +                               .cra_blocksize = DES_BLOCK_SIZE,
7755 +                       },
7756 +                       .setkey = aead_setkey,
7757 +                       .setauthsize = aead_setauthsize,
7758 +                       .encrypt = aead_encrypt,
7759 +                       .decrypt = aead_decrypt,
7760 +                       .ivsize = DES_BLOCK_SIZE,
7761 +                       .maxauthsize = SHA1_DIGEST_SIZE,
7762 +               },
7763 +               .caam = {
7764 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7765 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7766 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7767 +                       .geniv = true,
7768 +               }
7769 +       },
7770 +       {
7771 +               .aead = {
7772 +                       .base = {
7773 +                               .cra_name = "authenc(hmac(sha224),cbc(des))",
7774 +                               .cra_driver_name = "authenc-hmac-sha224-"
7775 +                                                  "cbc-des-caam-qi2",
7776 +                               .cra_blocksize = DES_BLOCK_SIZE,
7777 +                       },
7778 +                       .setkey = aead_setkey,
7779 +                       .setauthsize = aead_setauthsize,
7780 +                       .encrypt = aead_encrypt,
7781 +                       .decrypt = aead_decrypt,
7782 +                       .ivsize = DES_BLOCK_SIZE,
7783 +                       .maxauthsize = SHA224_DIGEST_SIZE,
7784 +               },
7785 +               .caam = {
7786 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7787 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
7788 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7789 +               },
7790 +       },
7791 +       {
7792 +               .aead = {
7793 +                       .base = {
7794 +                               .cra_name = "echainiv(authenc(hmac(sha224),"
7795 +                                           "cbc(des)))",
7796 +                               .cra_driver_name = "echainiv-authenc-"
7797 +                                                  "hmac-sha224-cbc-des-"
7798 +                                                  "caam-qi2",
7799 +                               .cra_blocksize = DES_BLOCK_SIZE,
7800 +                       },
7801 +                       .setkey = aead_setkey,
7802 +                       .setauthsize = aead_setauthsize,
7803 +                       .encrypt = aead_encrypt,
7804 +                       .decrypt = aead_decrypt,
7805 +                       .ivsize = DES_BLOCK_SIZE,
7806 +                       .maxauthsize = SHA224_DIGEST_SIZE,
7807 +               },
7808 +               .caam = {
7809 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7810 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
7811 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7812 +                       .geniv = true,
7813 +               }
7814 +       },
7815 +       {
7816 +               .aead = {
7817 +                       .base = {
7818 +                               .cra_name = "authenc(hmac(sha256),cbc(des))",
7819 +                               .cra_driver_name = "authenc-hmac-sha256-"
7820 +                                                  "cbc-des-caam-qi2",
7821 +                               .cra_blocksize = DES_BLOCK_SIZE,
7822 +                       },
7823 +                       .setkey = aead_setkey,
7824 +                       .setauthsize = aead_setauthsize,
7825 +                       .encrypt = aead_encrypt,
7826 +                       .decrypt = aead_decrypt,
7827 +                       .ivsize = DES_BLOCK_SIZE,
7828 +                       .maxauthsize = SHA256_DIGEST_SIZE,
7829 +               },
7830 +               .caam = {
7831 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7832 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
7833 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7834 +               },
7835 +       },
7836 +       {
7837 +               .aead = {
7838 +                       .base = {
7839 +                               .cra_name = "echainiv(authenc(hmac(sha256),"
7840 +                                           "cbc(des)))",
7841 +                               .cra_driver_name = "echainiv-authenc-"
7842 +                                                  "hmac-sha256-cbc-desi-"
7843 +                                                  "caam-qi2",
7844 +                               .cra_blocksize = DES_BLOCK_SIZE,
7845 +                       },
7846 +                       .setkey = aead_setkey,
7847 +                       .setauthsize = aead_setauthsize,
7848 +                       .encrypt = aead_encrypt,
7849 +                       .decrypt = aead_decrypt,
7850 +                       .ivsize = DES_BLOCK_SIZE,
7851 +                       .maxauthsize = SHA256_DIGEST_SIZE,
7852 +               },
7853 +               .caam = {
7854 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7855 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
7856 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7857 +                       .geniv = true,
7858 +               },
7859 +       },
7860 +       {
7861 +               .aead = {
7862 +                       .base = {
7863 +                               .cra_name = "authenc(hmac(sha384),cbc(des))",
7864 +                               .cra_driver_name = "authenc-hmac-sha384-"
7865 +                                                  "cbc-des-caam-qi2",
7866 +                               .cra_blocksize = DES_BLOCK_SIZE,
7867 +                       },
7868 +                       .setkey = aead_setkey,
7869 +                       .setauthsize = aead_setauthsize,
7870 +                       .encrypt = aead_encrypt,
7871 +                       .decrypt = aead_decrypt,
7872 +                       .ivsize = DES_BLOCK_SIZE,
7873 +                       .maxauthsize = SHA384_DIGEST_SIZE,
7874 +               },
7875 +               .caam = {
7876 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7877 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
7878 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7879 +               },
7880 +       },
7881 +       {
7882 +               .aead = {
7883 +                       .base = {
7884 +                               .cra_name = "echainiv(authenc(hmac(sha384),"
7885 +                                           "cbc(des)))",
7886 +                               .cra_driver_name = "echainiv-authenc-"
7887 +                                                  "hmac-sha384-cbc-des-"
7888 +                                                  "caam-qi2",
7889 +                               .cra_blocksize = DES_BLOCK_SIZE,
7890 +                       },
7891 +                       .setkey = aead_setkey,
7892 +                       .setauthsize = aead_setauthsize,
7893 +                       .encrypt = aead_encrypt,
7894 +                       .decrypt = aead_decrypt,
7895 +                       .ivsize = DES_BLOCK_SIZE,
7896 +                       .maxauthsize = SHA384_DIGEST_SIZE,
7897 +               },
7898 +               .caam = {
7899 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7900 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
7901 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7902 +                       .geniv = true,
7903 +               }
7904 +       },
7905 +       {
7906 +               .aead = {
7907 +                       .base = {
7908 +                               .cra_name = "authenc(hmac(sha512),cbc(des))",
7909 +                               .cra_driver_name = "authenc-hmac-sha512-"
7910 +                                                  "cbc-des-caam-qi2",
7911 +                               .cra_blocksize = DES_BLOCK_SIZE,
7912 +                       },
7913 +                       .setkey = aead_setkey,
7914 +                       .setauthsize = aead_setauthsize,
7915 +                       .encrypt = aead_encrypt,
7916 +                       .decrypt = aead_decrypt,
7917 +                       .ivsize = DES_BLOCK_SIZE,
7918 +                       .maxauthsize = SHA512_DIGEST_SIZE,
7919 +               },
7920 +               .caam = {
7921 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7922 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
7923 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7924 +               }
7925 +       },
7926 +       {
7927 +               .aead = {
7928 +                       .base = {
7929 +                               .cra_name = "echainiv(authenc(hmac(sha512),"
7930 +                                           "cbc(des)))",
7931 +                               .cra_driver_name = "echainiv-authenc-"
7932 +                                                  "hmac-sha512-cbc-des-"
7933 +                                                  "caam-qi2",
7934 +                               .cra_blocksize = DES_BLOCK_SIZE,
7935 +                       },
7936 +                       .setkey = aead_setkey,
7937 +                       .setauthsize = aead_setauthsize,
7938 +                       .encrypt = aead_encrypt,
7939 +                       .decrypt = aead_decrypt,
7940 +                       .ivsize = DES_BLOCK_SIZE,
7941 +                       .maxauthsize = SHA512_DIGEST_SIZE,
7942 +               },
7943 +               .caam = {
7944 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7945 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
7946 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7947 +                       .geniv = true,
7948 +               }
7949 +       },
7950 +       {
7951 +               .aead = {
7952 +                       .base = {
7953 +                               .cra_name = "authenc(hmac(md5),"
7954 +                                           "rfc3686(ctr(aes)))",
7955 +                               .cra_driver_name = "authenc-hmac-md5-"
7956 +                                                  "rfc3686-ctr-aes-caam-qi2",
7957 +                               .cra_blocksize = 1,
7958 +                       },
7959 +                       .setkey = aead_setkey,
7960 +                       .setauthsize = aead_setauthsize,
7961 +                       .encrypt = aead_encrypt,
7962 +                       .decrypt = aead_decrypt,
7963 +                       .ivsize = CTR_RFC3686_IV_SIZE,
7964 +                       .maxauthsize = MD5_DIGEST_SIZE,
7965 +               },
7966 +               .caam = {
7967 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
7968 +                                          OP_ALG_AAI_CTR_MOD128,
7969 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7970 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7971 +                       .rfc3686 = true,
7972 +               },
7973 +       },
7974 +       {
7975 +               .aead = {
7976 +                       .base = {
7977 +                               .cra_name = "seqiv(authenc("
7978 +                                           "hmac(md5),rfc3686(ctr(aes))))",
7979 +                               .cra_driver_name = "seqiv-authenc-hmac-md5-"
7980 +                                                  "rfc3686-ctr-aes-caam-qi2",
7981 +                               .cra_blocksize = 1,
7982 +                       },
7983 +                       .setkey = aead_setkey,
7984 +                       .setauthsize = aead_setauthsize,
7985 +                       .encrypt = aead_encrypt,
7986 +                       .decrypt = aead_decrypt,
7987 +                       .ivsize = CTR_RFC3686_IV_SIZE,
7988 +                       .maxauthsize = MD5_DIGEST_SIZE,
7989 +               },
7990 +               .caam = {
7991 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
7992 +                                          OP_ALG_AAI_CTR_MOD128,
7993 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7994 +                                          OP_ALG_AAI_HMAC_PRECOMP,
7995 +                       .rfc3686 = true,
7996 +                       .geniv = true,
7997 +               },
7998 +       },
7999 +       {
8000 +               .aead = {
8001 +                       .base = {
8002 +                               .cra_name = "authenc(hmac(sha1),"
8003 +                                           "rfc3686(ctr(aes)))",
8004 +                               .cra_driver_name = "authenc-hmac-sha1-"
8005 +                                                  "rfc3686-ctr-aes-caam-qi2",
8006 +                               .cra_blocksize = 1,
8007 +                       },
8008 +                       .setkey = aead_setkey,
8009 +                       .setauthsize = aead_setauthsize,
8010 +                       .encrypt = aead_encrypt,
8011 +                       .decrypt = aead_decrypt,
8012 +                       .ivsize = CTR_RFC3686_IV_SIZE,
8013 +                       .maxauthsize = SHA1_DIGEST_SIZE,
8014 +               },
8015 +               .caam = {
8016 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
8017 +                                          OP_ALG_AAI_CTR_MOD128,
8018 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
8019 +                                          OP_ALG_AAI_HMAC_PRECOMP,
8020 +                       .rfc3686 = true,
8021 +               },
8022 +       },
8023 +       {
8024 +               .aead = {
8025 +                       .base = {
8026 +                               .cra_name = "seqiv(authenc("
8027 +                                           "hmac(sha1),rfc3686(ctr(aes))))",
8028 +                               .cra_driver_name = "seqiv-authenc-hmac-sha1-"
8029 +                                                  "rfc3686-ctr-aes-caam-qi2",
8030 +                               .cra_blocksize = 1,
8031 +                       },
8032 +                       .setkey = aead_setkey,
8033 +                       .setauthsize = aead_setauthsize,
8034 +                       .encrypt = aead_encrypt,
8035 +                       .decrypt = aead_decrypt,
8036 +                       .ivsize = CTR_RFC3686_IV_SIZE,
8037 +                       .maxauthsize = SHA1_DIGEST_SIZE,
8038 +               },
8039 +               .caam = {
8040 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
8041 +                                          OP_ALG_AAI_CTR_MOD128,
8042 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
8043 +                                          OP_ALG_AAI_HMAC_PRECOMP,
8044 +                       .rfc3686 = true,
8045 +                       .geniv = true,
8046 +               },
8047 +       },
8048 +       {
8049 +               .aead = {
8050 +                       .base = {
8051 +                               .cra_name = "authenc(hmac(sha224),"
8052 +                                           "rfc3686(ctr(aes)))",
8053 +                               .cra_driver_name = "authenc-hmac-sha224-"
8054 +                                                  "rfc3686-ctr-aes-caam-qi2",
8055 +                               .cra_blocksize = 1,
8056 +                       },
8057 +                       .setkey = aead_setkey,
8058 +                       .setauthsize = aead_setauthsize,
8059 +                       .encrypt = aead_encrypt,
8060 +                       .decrypt = aead_decrypt,
8061 +                       .ivsize = CTR_RFC3686_IV_SIZE,
8062 +                       .maxauthsize = SHA224_DIGEST_SIZE,
8063 +               },
8064 +               .caam = {
8065 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
8066 +                                          OP_ALG_AAI_CTR_MOD128,
8067 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
8068 +                                          OP_ALG_AAI_HMAC_PRECOMP,
8069 +                       .rfc3686 = true,
8070 +               },
8071 +       },
8072 +       {
8073 +               .aead = {
8074 +                       .base = {
8075 +                               .cra_name = "seqiv(authenc("
8076 +                                           "hmac(sha224),rfc3686(ctr(aes))))",
8077 +                               .cra_driver_name = "seqiv-authenc-hmac-sha224-"
8078 +                                                  "rfc3686-ctr-aes-caam-qi2",
8079 +                               .cra_blocksize = 1,
8080 +                       },
8081 +                       .setkey = aead_setkey,
8082 +                       .setauthsize = aead_setauthsize,
8083 +                       .encrypt = aead_encrypt,
8084 +                       .decrypt = aead_decrypt,
8085 +                       .ivsize = CTR_RFC3686_IV_SIZE,
8086 +                       .maxauthsize = SHA224_DIGEST_SIZE,
8087 +               },
8088 +               .caam = {
8089 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
8090 +                                          OP_ALG_AAI_CTR_MOD128,
8091 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
8092 +                                          OP_ALG_AAI_HMAC_PRECOMP,
8093 +                       .rfc3686 = true,
8094 +                       .geniv = true,
8095 +               },
8096 +       },
8097 +       {
8098 +               .aead = {
8099 +                       .base = {
8100 +                               .cra_name = "authenc(hmac(sha256),"
8101 +                                           "rfc3686(ctr(aes)))",
8102 +                               .cra_driver_name = "authenc-hmac-sha256-"
8103 +                                                  "rfc3686-ctr-aes-caam-qi2",
8104 +                               .cra_blocksize = 1,
8105 +                       },
8106 +                       .setkey = aead_setkey,
8107 +                       .setauthsize = aead_setauthsize,
8108 +                       .encrypt = aead_encrypt,
8109 +                       .decrypt = aead_decrypt,
8110 +                       .ivsize = CTR_RFC3686_IV_SIZE,
8111 +                       .maxauthsize = SHA256_DIGEST_SIZE,
8112 +               },
8113 +               .caam = {
8114 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
8115 +                                          OP_ALG_AAI_CTR_MOD128,
8116 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
8117 +                                          OP_ALG_AAI_HMAC_PRECOMP,
8118 +                       .rfc3686 = true,
8119 +               },
8120 +       },
8121 +       {
8122 +               .aead = {
8123 +                       .base = {
8124 +                               .cra_name = "seqiv(authenc(hmac(sha256),"
8125 +                                           "rfc3686(ctr(aes))))",
8126 +                               .cra_driver_name = "seqiv-authenc-hmac-sha256-"
8127 +                                                  "rfc3686-ctr-aes-caam-qi2",
8128 +                               .cra_blocksize = 1,
8129 +                       },
8130 +                       .setkey = aead_setkey,
8131 +                       .setauthsize = aead_setauthsize,
8132 +                       .encrypt = aead_encrypt,
8133 +                       .decrypt = aead_decrypt,
8134 +                       .ivsize = CTR_RFC3686_IV_SIZE,
8135 +                       .maxauthsize = SHA256_DIGEST_SIZE,
8136 +               },
8137 +               .caam = {
8138 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
8139 +                                          OP_ALG_AAI_CTR_MOD128,
8140 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
8141 +                                          OP_ALG_AAI_HMAC_PRECOMP,
8142 +                       .rfc3686 = true,
8143 +                       .geniv = true,
8144 +               },
8145 +       },
8146 +       {
8147 +               .aead = {
8148 +                       .base = {
8149 +                               .cra_name = "authenc(hmac(sha384),"
8150 +                                           "rfc3686(ctr(aes)))",
8151 +                               .cra_driver_name = "authenc-hmac-sha384-"
8152 +                                                  "rfc3686-ctr-aes-caam-qi2",
8153 +                               .cra_blocksize = 1,
8154 +                       },
8155 +                       .setkey = aead_setkey,
8156 +                       .setauthsize = aead_setauthsize,
8157 +                       .encrypt = aead_encrypt,
8158 +                       .decrypt = aead_decrypt,
8159 +                       .ivsize = CTR_RFC3686_IV_SIZE,
8160 +                       .maxauthsize = SHA384_DIGEST_SIZE,
8161 +               },
8162 +               .caam = {
8163 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
8164 +                                          OP_ALG_AAI_CTR_MOD128,
8165 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
8166 +                                          OP_ALG_AAI_HMAC_PRECOMP,
8167 +                       .rfc3686 = true,
8168 +               },
8169 +       },
8170 +       {
8171 +               .aead = {
8172 +                       .base = {
8173 +                               .cra_name = "seqiv(authenc(hmac(sha384),"
8174 +                                           "rfc3686(ctr(aes))))",
8175 +                               .cra_driver_name = "seqiv-authenc-hmac-sha384-"
8176 +                                                  "rfc3686-ctr-aes-caam-qi2",
8177 +                               .cra_blocksize = 1,
8178 +                       },
8179 +                       .setkey = aead_setkey,
8180 +                       .setauthsize = aead_setauthsize,
8181 +                       .encrypt = aead_encrypt,
8182 +                       .decrypt = aead_decrypt,
8183 +                       .ivsize = CTR_RFC3686_IV_SIZE,
8184 +                       .maxauthsize = SHA384_DIGEST_SIZE,
8185 +               },
8186 +               .caam = {
8187 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
8188 +                                          OP_ALG_AAI_CTR_MOD128,
8189 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
8190 +                                          OP_ALG_AAI_HMAC_PRECOMP,
8191 +                       .rfc3686 = true,
8192 +                       .geniv = true,
8193 +               },
8194 +       },
8195 +       {
8196 +               .aead = {
8197 +                       .base = {
8198 +                               .cra_name = "rfc7539(chacha20,poly1305)",
8199 +                               .cra_driver_name = "rfc7539-chacha20-poly1305-"
8200 +                                                  "caam-qi2",
8201 +                               .cra_blocksize = 1,
8202 +                       },
8203 +                       .setkey = chachapoly_setkey,
8204 +                       .setauthsize = chachapoly_setauthsize,
8205 +                       .encrypt = aead_encrypt,
8206 +                       .decrypt = aead_decrypt,
8207 +                       .ivsize = CHACHAPOLY_IV_SIZE,
8208 +                       .maxauthsize = POLY1305_DIGEST_SIZE,
8209 +               },
8210 +               .caam = {
8211 +                       .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
8212 +                                          OP_ALG_AAI_AEAD,
8213 +                       .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
8214 +                                          OP_ALG_AAI_AEAD,
8215 +               },
8216 +       },
8217 +       {
8218 +               .aead = {
8219 +                       .base = {
8220 +                               .cra_name = "rfc7539esp(chacha20,poly1305)",
8221 +                               .cra_driver_name = "rfc7539esp-chacha20-"
8222 +                                                  "poly1305-caam-qi2",
8223 +                               .cra_blocksize = 1,
8224 +                       },
8225 +                       .setkey = chachapoly_setkey,
8226 +                       .setauthsize = chachapoly_setauthsize,
8227 +                       .encrypt = aead_encrypt,
8228 +                       .decrypt = aead_decrypt,
8229 +                       .ivsize = 8,
8230 +                       .maxauthsize = POLY1305_DIGEST_SIZE,
8231 +               },
8232 +               .caam = {
8233 +                       .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
8234 +                                          OP_ALG_AAI_AEAD,
8235 +                       .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
8236 +                                          OP_ALG_AAI_AEAD,
8237 +               },
8238 +       },
8239 +       {
8240 +               .aead = {
8241 +                       .base = {
8242 +                               .cra_name = "authenc(hmac(sha512),"
8243 +                                           "rfc3686(ctr(aes)))",
8244 +                               .cra_driver_name = "authenc-hmac-sha512-"
8245 +                                                  "rfc3686-ctr-aes-caam-qi2",
8246 +                               .cra_blocksize = 1,
8247 +                       },
8248 +                       .setkey = aead_setkey,
8249 +                       .setauthsize = aead_setauthsize,
8250 +                       .encrypt = aead_encrypt,
8251 +                       .decrypt = aead_decrypt,
8252 +                       .ivsize = CTR_RFC3686_IV_SIZE,
8253 +                       .maxauthsize = SHA512_DIGEST_SIZE,
8254 +               },
8255 +               .caam = {
8256 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
8257 +                                          OP_ALG_AAI_CTR_MOD128,
8258 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
8259 +                                          OP_ALG_AAI_HMAC_PRECOMP,
8260 +                       .rfc3686 = true,
8261 +               },
8262 +       },
8263 +       {
8264 +               .aead = {
8265 +                       .base = {
8266 +                               .cra_name = "seqiv(authenc(hmac(sha512),"
8267 +                                           "rfc3686(ctr(aes))))",
8268 +                               .cra_driver_name = "seqiv-authenc-hmac-sha512-"
8269 +                                                  "rfc3686-ctr-aes-caam-qi2",
8270 +                               .cra_blocksize = 1,
8271 +                       },
8272 +                       .setkey = aead_setkey,
8273 +                       .setauthsize = aead_setauthsize,
8274 +                       .encrypt = aead_encrypt,
8275 +                       .decrypt = aead_decrypt,
8276 +                       .ivsize = CTR_RFC3686_IV_SIZE,
8277 +                       .maxauthsize = SHA512_DIGEST_SIZE,
8278 +               },
8279 +               .caam = {
8280 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
8281 +                                          OP_ALG_AAI_CTR_MOD128,
8282 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
8283 +                                          OP_ALG_AAI_HMAC_PRECOMP,
8284 +                       .rfc3686 = true,
8285 +                       .geniv = true,
8286 +               },
8287 +       },
8288 +       {
8289 +               .aead = {
8290 +                       .base = {
8291 +                               .cra_name = "tls10(hmac(sha1),cbc(aes))",
8292 +                               .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2",
8293 +                               .cra_blocksize = AES_BLOCK_SIZE,
8294 +                       },
8295 +                       .setkey = tls_setkey,
8296 +                       .setauthsize = tls_setauthsize,
8297 +                       .encrypt = tls_encrypt,
8298 +                       .decrypt = tls_decrypt,
8299 +                       .ivsize = AES_BLOCK_SIZE,
8300 +                       .maxauthsize = SHA1_DIGEST_SIZE,
8301 +               },
8302 +               .caam = {
8303 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
8304 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
8305 +                                          OP_ALG_AAI_HMAC_PRECOMP,
8306 +               },
8307 +       },
8308 +};
8309 +
8310 +static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
8311 +{
8312 +       struct skcipher_alg *alg = &t_alg->skcipher;
8313 +
8314 +       alg->base.cra_module = THIS_MODULE;
8315 +       alg->base.cra_priority = CAAM_CRA_PRIORITY;
8316 +       alg->base.cra_ctxsize = sizeof(struct caam_ctx);
8317 +       alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
8318 +
8319 +       alg->init = caam_cra_init_skcipher;
8320 +       alg->exit = caam_cra_exit;
8321 +}
8322 +
8323 +static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
8324 +{
8325 +       struct aead_alg *alg = &t_alg->aead;
8326 +
8327 +       alg->base.cra_module = THIS_MODULE;
8328 +       alg->base.cra_priority = CAAM_CRA_PRIORITY;
8329 +       alg->base.cra_ctxsize = sizeof(struct caam_ctx);
8330 +       alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
8331 +
8332 +       alg->init = caam_cra_init_aead;
8333 +       alg->exit = caam_cra_exit_aead;
8334 +}
8335 +
8336 +/* max hash key is max split key size */
8337 +#define CAAM_MAX_HASH_KEY_SIZE         (SHA512_DIGEST_SIZE * 2)
8338 +
8339 +#define CAAM_MAX_HASH_BLOCK_SIZE       SHA512_BLOCK_SIZE
8340 +#define CAAM_MAX_HASH_DIGEST_SIZE      SHA512_DIGEST_SIZE
8341 +
8342 +#define DESC_HASH_MAX_USED_BYTES       (DESC_AHASH_FINAL_LEN + \
8343 +                                        CAAM_MAX_HASH_KEY_SIZE)
8344 +#define DESC_HASH_MAX_USED_LEN         (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
8345 +
8346 +/* caam context sizes for hashes: running digest + 8 */
8347 +#define HASH_MSG_LEN                   8
8348 +#define MAX_CTX_LEN                    (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
8349 +
8350 +enum hash_optype {
8351 +       UPDATE = 0,
8352 +       UPDATE_FIRST,
8353 +       FINALIZE,
8354 +       DIGEST,
8355 +       HASH_NUM_OP
8356 +};
8357 +
8358 +/**
8359 + * caam_hash_ctx - ahash per-session context
8360 + * @flc: Flow Contexts array
8361 + * @flc_dma: I/O virtual addresses of the Flow Contexts
8362 + * @key:  virtual address of the authentication key
8363 + * @dev: dpseci device
8364 + * @ctx_len: size of Context Register
8365 + * @adata: hashing algorithm details
8366 + */
8367 +struct caam_hash_ctx {
8368 +       struct caam_flc flc[HASH_NUM_OP];
8369 +       dma_addr_t flc_dma[HASH_NUM_OP];
8370 +       u8 key[CAAM_MAX_HASH_KEY_SIZE];
8371 +       struct device *dev;
8372 +       int ctx_len;
8373 +       struct alginfo adata;
8374 +};
8375 +
8376 +/* ahash state */
8377 +struct caam_hash_state {
8378 +       struct caam_request caam_req;
8379 +       dma_addr_t buf_dma;
8380 +       dma_addr_t ctx_dma;
8381 +       u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
8382 +       int buflen_0;
8383 +       u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
8384 +       int buflen_1;
8385 +       u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
8386 +       int (*update)(struct ahash_request *req);
8387 +       int (*final)(struct ahash_request *req);
8388 +       int (*finup)(struct ahash_request *req);
8389 +       int current_buf;
8390 +};
8391 +
8392 +struct caam_export_state {
8393 +       u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
8394 +       u8 caam_ctx[MAX_CTX_LEN];
8395 +       int buflen;
8396 +       int (*update)(struct ahash_request *req);
8397 +       int (*final)(struct ahash_request *req);
8398 +       int (*finup)(struct ahash_request *req);
8399 +};
8400 +
8401 +static inline void switch_buf(struct caam_hash_state *state)
8402 +{
8403 +       state->current_buf ^= 1;
8404 +}
8405 +
8406 +static inline u8 *current_buf(struct caam_hash_state *state)
8407 +{
8408 +       return state->current_buf ? state->buf_1 : state->buf_0;
8409 +}
8410 +
8411 +static inline u8 *alt_buf(struct caam_hash_state *state)
8412 +{
8413 +       return state->current_buf ? state->buf_0 : state->buf_1;
8414 +}
8415 +
8416 +static inline int *current_buflen(struct caam_hash_state *state)
8417 +{
8418 +       return state->current_buf ? &state->buflen_1 : &state->buflen_0;
8419 +}
8420 +
8421 +static inline int *alt_buflen(struct caam_hash_state *state)
8422 +{
8423 +       return state->current_buf ? &state->buflen_0 : &state->buflen_1;
8424 +}
8425 +
8426 +/* Map current buffer in state (if length > 0) and put it in link table */
8427 +static inline int buf_map_to_qm_sg(struct device *dev,
8428 +                                  struct dpaa2_sg_entry *qm_sg,
8429 +                                  struct caam_hash_state *state)
8430 +{
8431 +       int buflen = *current_buflen(state);
8432 +
8433 +       if (!buflen)
8434 +               return 0;
8435 +
8436 +       state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
8437 +                                       DMA_TO_DEVICE);
8438 +       if (dma_mapping_error(dev, state->buf_dma)) {
8439 +               dev_err(dev, "unable to map buf\n");
8440 +               state->buf_dma = 0;
8441 +               return -ENOMEM;
8442 +       }
8443 +
8444 +       dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
8445 +
8446 +       return 0;
8447 +}
8448 +
8449 +/* Map state->caam_ctx, and add it to link table */
8450 +static inline int ctx_map_to_qm_sg(struct device *dev,
8451 +                                  struct caam_hash_state *state, int ctx_len,
8452 +                                  struct dpaa2_sg_entry *qm_sg, u32 flag)
8453 +{
8454 +       state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
8455 +       if (dma_mapping_error(dev, state->ctx_dma)) {
8456 +               dev_err(dev, "unable to map ctx\n");
8457 +               state->ctx_dma = 0;
8458 +               return -ENOMEM;
8459 +       }
8460 +
8461 +       dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
8462 +
8463 +       return 0;
8464 +}
8465 +
8466 +static int ahash_set_sh_desc(struct crypto_ahash *ahash)
8467 +{
8468 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8469 +       int digestsize = crypto_ahash_digestsize(ahash);
8470 +       struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
8471 +       struct caam_flc *flc;
8472 +       u32 *desc;
8473 +
8474 +       ctx->adata.key_virt = ctx->key;
8475 +       ctx->adata.key_inline = true;
8476 +
8477 +       /* ahash_update shared descriptor */
8478 +       flc = &ctx->flc[UPDATE];
8479 +       desc = flc->sh_desc;
8480 +       cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
8481 +                         ctx->ctx_len, true, priv->sec_attr.era);
8482 +       flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
8483 +       dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
8484 +                                  desc_bytes(desc), DMA_BIDIRECTIONAL);
8485 +#ifdef DEBUG
8486 +       print_hex_dump(KERN_ERR,
8487 +                      "ahash update shdesc@" __stringify(__LINE__)": ",
8488 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
8489 +#endif
8490 +
8491 +       /* ahash_update_first shared descriptor */
8492 +       flc = &ctx->flc[UPDATE_FIRST];
8493 +       desc = flc->sh_desc;
8494 +       cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
8495 +                         ctx->ctx_len, false, priv->sec_attr.era);
8496 +       flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
8497 +       dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
8498 +                                  desc_bytes(desc), DMA_BIDIRECTIONAL);
8499 +#ifdef DEBUG
8500 +       print_hex_dump(KERN_ERR,
8501 +                      "ahash update first shdesc@" __stringify(__LINE__)": ",
8502 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
8503 +#endif
8504 +
8505 +       /* ahash_final shared descriptor */
8506 +       flc = &ctx->flc[FINALIZE];
8507 +       desc = flc->sh_desc;
8508 +       cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
8509 +                         ctx->ctx_len, true, priv->sec_attr.era);
8510 +       flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
8511 +       dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
8512 +                                  desc_bytes(desc), DMA_BIDIRECTIONAL);
8513 +#ifdef DEBUG
8514 +       print_hex_dump(KERN_ERR,
8515 +                      "ahash final shdesc@" __stringify(__LINE__)": ",
8516 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
8517 +#endif
8518 +
8519 +       /* ahash_digest shared descriptor */
8520 +       flc = &ctx->flc[DIGEST];
8521 +       desc = flc->sh_desc;
8522 +       cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
8523 +                         ctx->ctx_len, false, priv->sec_attr.era);
8524 +       flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
8525 +       dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
8526 +                                  desc_bytes(desc), DMA_BIDIRECTIONAL);
8527 +#ifdef DEBUG
8528 +       print_hex_dump(KERN_ERR,
8529 +                      "ahash digest shdesc@" __stringify(__LINE__)": ",
8530 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
8531 +#endif
8532 +
8533 +       return 0;
8534 +}
8535 +
8536 +/* Digest hash size if it is too large */
8537 +static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
8538 +                          u32 *keylen, u8 *key_out, u32 digestsize)
8539 +{
8540 +       struct caam_request *req_ctx;
8541 +       u32 *desc;
8542 +       struct split_key_sh_result result;
8543 +       dma_addr_t src_dma, dst_dma;
8544 +       struct caam_flc *flc;
8545 +       dma_addr_t flc_dma;
8546 +       int ret = -ENOMEM;
8547 +       struct dpaa2_fl_entry *in_fle, *out_fle;
8548 +
8549 +       req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
8550 +       if (!req_ctx)
8551 +               return -ENOMEM;
8552 +
8553 +       in_fle = &req_ctx->fd_flt[1];
8554 +       out_fle = &req_ctx->fd_flt[0];
8555 +
8556 +       flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
8557 +       if (!flc)
8558 +               goto err_flc;
8559 +
8560 +       src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
8561 +                                DMA_TO_DEVICE);
8562 +       if (dma_mapping_error(ctx->dev, src_dma)) {
8563 +               dev_err(ctx->dev, "unable to map key input memory\n");
8564 +               goto err_src_dma;
8565 +       }
8566 +       dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
8567 +                                DMA_FROM_DEVICE);
8568 +       if (dma_mapping_error(ctx->dev, dst_dma)) {
8569 +               dev_err(ctx->dev, "unable to map key output memory\n");
8570 +               goto err_dst_dma;
8571 +       }
8572 +
8573 +       desc = flc->sh_desc;
8574 +
8575 +       init_sh_desc(desc, 0);
8576 +
8577 +       /* descriptor to perform unkeyed hash on key_in */
8578 +       append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
8579 +                        OP_ALG_AS_INITFINAL);
8580 +       append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
8581 +                            FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
8582 +       append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
8583 +                        LDST_SRCDST_BYTE_CONTEXT);
8584 +
8585 +       flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
8586 +       flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
8587 +                                desc_bytes(desc), DMA_TO_DEVICE);
8588 +       if (dma_mapping_error(ctx->dev, flc_dma)) {
8589 +               dev_err(ctx->dev, "unable to map shared descriptor\n");
8590 +               goto err_flc_dma;
8591 +       }
8592 +
8593 +       dpaa2_fl_set_final(in_fle, true);
8594 +       dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
8595 +       dpaa2_fl_set_addr(in_fle, src_dma);
8596 +       dpaa2_fl_set_len(in_fle, *keylen);
8597 +       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
8598 +       dpaa2_fl_set_addr(out_fle, dst_dma);
8599 +       dpaa2_fl_set_len(out_fle, digestsize);
8600 +
8601 +#ifdef DEBUG
8602 +       print_hex_dump(KERN_ERR, "key_in@" __stringify(__LINE__)": ",
8603 +                      DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
8604 +       print_hex_dump(KERN_ERR, "shdesc@" __stringify(__LINE__)": ",
8605 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
8606 +#endif
8607 +
8608 +       result.err = 0;
8609 +       init_completion(&result.completion);
8610 +       result.dev = ctx->dev;
8611 +
8612 +       req_ctx->flc = flc;
8613 +       req_ctx->flc_dma = flc_dma;
8614 +       req_ctx->cbk = split_key_sh_done;
8615 +       req_ctx->ctx = &result;
8616 +
8617 +       ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
8618 +       if (ret == -EINPROGRESS) {
8619 +               /* in progress */
8620 +               wait_for_completion(&result.completion);
8621 +               ret = result.err;
8622 +#ifdef DEBUG
8623 +               print_hex_dump(KERN_ERR,
8624 +                              "digested key@" __stringify(__LINE__)": ",
8625 +                              DUMP_PREFIX_ADDRESS, 16, 4, key_in, digestsize,
8626 +                              1);
8627 +#endif
8628 +       }
8629 +
8630 +       dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
8631 +                        DMA_TO_DEVICE);
8632 +err_flc_dma:
8633 +       dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
8634 +err_dst_dma:
8635 +       dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
8636 +err_src_dma:
8637 +       kfree(flc);
8638 +err_flc:
8639 +       kfree(req_ctx);
8640 +
8641 +       *keylen = digestsize;
8642 +
8643 +       return ret;
8644 +}
8645 +
8646 +static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
8647 +                       unsigned int keylen)
8648 +{
8649 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8650 +       unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
8651 +       unsigned int digestsize = crypto_ahash_digestsize(ahash);
8652 +       int ret;
8653 +       u8 *hashed_key = NULL;
8654 +
8655 +#ifdef DEBUG
8656 +       dev_err(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
8657 +#endif
8658 +
8659 +       if (keylen > blocksize) {
8660 +               hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
8661 +                                          GFP_KERNEL | GFP_DMA);
8662 +               if (!hashed_key)
8663 +                       return -ENOMEM;
8664 +               ret = hash_digest_key(ctx, key, &keylen, hashed_key,
8665 +                                     digestsize);
8666 +               if (ret)
8667 +                       goto bad_free_key;
8668 +               key = hashed_key;
8669 +       }
8670 +
8671 +       ctx->adata.keylen = keylen;
8672 +       ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
8673 +                                             OP_ALG_ALGSEL_MASK);
8674 +       if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
8675 +               goto bad_free_key;
8676 +
8677 +       memcpy(ctx->key, key, keylen);
8678 +
8679 +       kfree(hashed_key);
8680 +       return ahash_set_sh_desc(ahash);
8681 +bad_free_key:
8682 +       kfree(hashed_key);
8683 +       crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
8684 +       return -EINVAL;
8685 +}
8686 +
8687 +static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
8688 +                              struct ahash_request *req, int dst_len)
8689 +{
8690 +       struct caam_hash_state *state = ahash_request_ctx(req);
8691 +
8692 +       if (edesc->src_nents)
8693 +               dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
8694 +       if (edesc->dst_dma)
8695 +               dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
8696 +
8697 +       if (edesc->qm_sg_bytes)
8698 +               dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
8699 +                                DMA_TO_DEVICE);
8700 +
8701 +       if (state->buf_dma) {
8702 +               dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
8703 +                                DMA_TO_DEVICE);
8704 +               state->buf_dma = 0;
8705 +       }
8706 +}
8707 +
8708 +static inline void ahash_unmap_ctx(struct device *dev,
8709 +                                  struct ahash_edesc *edesc,
8710 +                                  struct ahash_request *req, int dst_len,
8711 +                                  u32 flag)
8712 +{
8713 +       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8714 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8715 +       struct caam_hash_state *state = ahash_request_ctx(req);
8716 +
8717 +       if (state->ctx_dma) {
8718 +               dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
8719 +               state->ctx_dma = 0;
8720 +       }
8721 +       ahash_unmap(dev, edesc, req, dst_len);
8722 +}
8723 +
8724 +static void ahash_done(void *cbk_ctx, u32 status)
8725 +{
8726 +       struct crypto_async_request *areq = cbk_ctx;
8727 +       struct ahash_request *req = ahash_request_cast(areq);
8728 +       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8729 +       struct caam_hash_state *state = ahash_request_ctx(req);
8730 +       struct ahash_edesc *edesc = state->caam_req.edesc;
8731 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8732 +       int digestsize = crypto_ahash_digestsize(ahash);
8733 +       int ecode = 0;
8734 +
8735 +#ifdef DEBUG
8736 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
8737 +#endif
8738 +
8739 +       if (unlikely(status)) {
8740 +               caam_qi2_strstatus(ctx->dev, status);
8741 +               ecode = -EIO;
8742 +       }
8743 +
8744 +       ahash_unmap(ctx->dev, edesc, req, digestsize);
8745 +       qi_cache_free(edesc);
8746 +
8747 +#ifdef DEBUG
8748 +       print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
8749 +                      DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
8750 +                      ctx->ctx_len, 1);
8751 +       if (req->result)
8752 +               print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
8753 +                              DUMP_PREFIX_ADDRESS, 16, 4, req->result,
8754 +                              digestsize, 1);
8755 +#endif
8756 +
8757 +       req->base.complete(&req->base, ecode);
8758 +}
8759 +
8760 +static void ahash_done_bi(void *cbk_ctx, u32 status)
8761 +{
8762 +       struct crypto_async_request *areq = cbk_ctx;
8763 +       struct ahash_request *req = ahash_request_cast(areq);
8764 +       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8765 +       struct caam_hash_state *state = ahash_request_ctx(req);
8766 +       struct ahash_edesc *edesc = state->caam_req.edesc;
8767 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8768 +       int ecode = 0;
8769 +#ifdef DEBUG
8770 +       int digestsize = crypto_ahash_digestsize(ahash);
8771 +
8772 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
8773 +#endif
8774 +
8775 +       if (unlikely(status)) {
8776 +               caam_qi2_strstatus(ctx->dev, status);
8777 +               ecode = -EIO;
8778 +       }
8779 +
8780 +       ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
8781 +       switch_buf(state);
8782 +       qi_cache_free(edesc);
8783 +
8784 +#ifdef DEBUG
8785 +       print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
8786 +                      DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
8787 +                      ctx->ctx_len, 1);
8788 +       if (req->result)
8789 +               print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
8790 +                              DUMP_PREFIX_ADDRESS, 16, 4, req->result,
8791 +                              digestsize, 1);
8792 +#endif
8793 +
8794 +       req->base.complete(&req->base, ecode);
8795 +}
8796 +
8797 +static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
8798 +{
8799 +       struct crypto_async_request *areq = cbk_ctx;
8800 +       struct ahash_request *req = ahash_request_cast(areq);
8801 +       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8802 +       struct caam_hash_state *state = ahash_request_ctx(req);
8803 +       struct ahash_edesc *edesc = state->caam_req.edesc;
8804 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8805 +       int digestsize = crypto_ahash_digestsize(ahash);
8806 +       int ecode = 0;
8807 +
8808 +#ifdef DEBUG
8809 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
8810 +#endif
8811 +
8812 +       if (unlikely(status)) {
8813 +               caam_qi2_strstatus(ctx->dev, status);
8814 +               ecode = -EIO;
8815 +       }
8816 +
8817 +       ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
8818 +       qi_cache_free(edesc);
8819 +
8820 +#ifdef DEBUG
8821 +       print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
8822 +                      DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
8823 +                      ctx->ctx_len, 1);
8824 +       if (req->result)
8825 +               print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
8826 +                              DUMP_PREFIX_ADDRESS, 16, 4, req->result,
8827 +                              digestsize, 1);
8828 +#endif
8829 +
8830 +       req->base.complete(&req->base, ecode);
8831 +}
8832 +
8833 +static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
8834 +{
8835 +       struct crypto_async_request *areq = cbk_ctx;
8836 +       struct ahash_request *req = ahash_request_cast(areq);
8837 +       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8838 +       struct caam_hash_state *state = ahash_request_ctx(req);
8839 +       struct ahash_edesc *edesc = state->caam_req.edesc;
8840 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8841 +       int ecode = 0;
8842 +#ifdef DEBUG
8843 +       int digestsize = crypto_ahash_digestsize(ahash);
8844 +
8845 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
8846 +#endif
8847 +
8848 +       if (unlikely(status)) {
8849 +               caam_qi2_strstatus(ctx->dev, status);
8850 +               ecode = -EIO;
8851 +       }
8852 +
8853 +       ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
8854 +       switch_buf(state);
8855 +       qi_cache_free(edesc);
8856 +
8857 +#ifdef DEBUG
8858 +       print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
8859 +                      DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
8860 +                      ctx->ctx_len, 1);
8861 +       if (req->result)
8862 +               print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
8863 +                              DUMP_PREFIX_ADDRESS, 16, 4, req->result,
8864 +                              digestsize, 1);
8865 +#endif
8866 +
8867 +       req->base.complete(&req->base, ecode);
8868 +}
8869 +
8870 +static int ahash_update_ctx(struct ahash_request *req)
8871 +{
8872 +       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8873 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8874 +       struct caam_hash_state *state = ahash_request_ctx(req);
8875 +       struct caam_request *req_ctx = &state->caam_req;
8876 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
8877 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
8878 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
8879 +                     GFP_KERNEL : GFP_ATOMIC;
8880 +       u8 *buf = current_buf(state);
8881 +       int *buflen = current_buflen(state);
8882 +       u8 *next_buf = alt_buf(state);
8883 +       int *next_buflen = alt_buflen(state), last_buflen;
8884 +       int in_len = *buflen + req->nbytes, to_hash;
8885 +       int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
8886 +       struct ahash_edesc *edesc;
8887 +       int ret = 0;
8888 +
8889 +       last_buflen = *next_buflen;
8890 +       *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
8891 +       to_hash = in_len - *next_buflen;
8892 +
8893 +       if (to_hash) {
8894 +               struct dpaa2_sg_entry *sg_table;
8895 +
8896 +               src_nents = sg_nents_for_len(req->src,
8897 +                                            req->nbytes - (*next_buflen));
8898 +               if (src_nents < 0) {
8899 +                       dev_err(ctx->dev, "Invalid number of src SG.\n");
8900 +                       return src_nents;
8901 +               }
8902 +
8903 +               if (src_nents) {
8904 +                       mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
8905 +                                                 DMA_TO_DEVICE);
8906 +                       if (!mapped_nents) {
8907 +                               dev_err(ctx->dev, "unable to DMA map source\n");
8908 +                               return -ENOMEM;
8909 +                       }
8910 +               } else {
8911 +                       mapped_nents = 0;
8912 +               }
8913 +
8914 +               /* allocate space for base edesc and link tables */
8915 +               edesc = qi_cache_zalloc(GFP_DMA | flags);
8916 +               if (!edesc) {
8917 +                       dma_unmap_sg(ctx->dev, req->src, src_nents,
8918 +                                    DMA_TO_DEVICE);
8919 +                       return -ENOMEM;
8920 +               }
8921 +
8922 +               edesc->src_nents = src_nents;
8923 +               qm_sg_src_index = 1 + (*buflen ? 1 : 0);
8924 +               qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
8925 +                             sizeof(*sg_table);
8926 +               sg_table = &edesc->sgt[0];
8927 +
8928 +               ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
8929 +                                      DMA_BIDIRECTIONAL);
8930 +               if (ret)
8931 +                       goto unmap_ctx;
8932 +
8933 +               ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
8934 +               if (ret)
8935 +                       goto unmap_ctx;
8936 +
8937 +               if (mapped_nents) {
8938 +                       sg_to_qm_sg_last(req->src, mapped_nents,
8939 +                                        sg_table + qm_sg_src_index, 0);
8940 +                       if (*next_buflen)
8941 +                               scatterwalk_map_and_copy(next_buf, req->src,
8942 +                                                        to_hash - *buflen,
8943 +                                                        *next_buflen, 0);
8944 +               } else {
8945 +                       dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
8946 +                                          true);
8947 +               }
8948 +
8949 +               edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
8950 +                                                 qm_sg_bytes, DMA_TO_DEVICE);
8951 +               if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
8952 +                       dev_err(ctx->dev, "unable to map S/G table\n");
8953 +                       ret = -ENOMEM;
8954 +                       goto unmap_ctx;
8955 +               }
8956 +               edesc->qm_sg_bytes = qm_sg_bytes;
8957 +
8958 +               memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
8959 +               dpaa2_fl_set_final(in_fle, true);
8960 +               dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
8961 +               dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
8962 +               dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
8963 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
8964 +               dpaa2_fl_set_addr(out_fle, state->ctx_dma);
8965 +               dpaa2_fl_set_len(out_fle, ctx->ctx_len);
8966 +
8967 +               req_ctx->flc = &ctx->flc[UPDATE];
8968 +               req_ctx->flc_dma = ctx->flc_dma[UPDATE];
8969 +               req_ctx->cbk = ahash_done_bi;
8970 +               req_ctx->ctx = &req->base;
8971 +               req_ctx->edesc = edesc;
8972 +
8973 +               ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
8974 +               if (ret != -EINPROGRESS &&
8975 +                   !(ret == -EBUSY &&
8976 +                     req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
8977 +                       goto unmap_ctx;
8978 +       } else if (*next_buflen) {
8979 +               scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
8980 +                                        req->nbytes, 0);
8981 +               *buflen = *next_buflen;
8982 +               *next_buflen = last_buflen;
8983 +       }
8984 +#ifdef DEBUG
8985 +       print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
8986 +                      DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
8987 +       print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
8988 +                      DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
8989 +                      *next_buflen, 1);
8990 +#endif
8991 +
8992 +       return ret;
8993 +unmap_ctx:
8994 +       ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
8995 +       qi_cache_free(edesc);
8996 +       return ret;
8997 +}
8998 +
8999 +static int ahash_final_ctx(struct ahash_request *req)
9000 +{
9001 +       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9002 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9003 +       struct caam_hash_state *state = ahash_request_ctx(req);
9004 +       struct caam_request *req_ctx = &state->caam_req;
9005 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9006 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9007 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9008 +                     GFP_KERNEL : GFP_ATOMIC;
9009 +       int buflen = *current_buflen(state);
9010 +       int qm_sg_bytes, qm_sg_src_index;
9011 +       int digestsize = crypto_ahash_digestsize(ahash);
9012 +       struct ahash_edesc *edesc;
9013 +       struct dpaa2_sg_entry *sg_table;
9014 +       int ret;
9015 +
9016 +       /* allocate space for base edesc and link tables */
9017 +       edesc = qi_cache_zalloc(GFP_DMA | flags);
9018 +       if (!edesc)
9019 +               return -ENOMEM;
9020 +
9021 +       qm_sg_src_index = 1 + (buflen ? 1 : 0);
9022 +       qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
9023 +       sg_table = &edesc->sgt[0];
9024 +
9025 +       ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
9026 +                              DMA_TO_DEVICE);
9027 +       if (ret)
9028 +               goto unmap_ctx;
9029 +
9030 +       ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
9031 +       if (ret)
9032 +               goto unmap_ctx;
9033 +
9034 +       dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
9035 +
9036 +       edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
9037 +                                         DMA_TO_DEVICE);
9038 +       if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
9039 +               dev_err(ctx->dev, "unable to map S/G table\n");
9040 +               ret = -ENOMEM;
9041 +               goto unmap_ctx;
9042 +       }
9043 +       edesc->qm_sg_bytes = qm_sg_bytes;
9044 +
9045 +       edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
9046 +                                       DMA_FROM_DEVICE);
9047 +       if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
9048 +               dev_err(ctx->dev, "unable to map dst\n");
9049 +               edesc->dst_dma = 0;
9050 +               ret = -ENOMEM;
9051 +               goto unmap_ctx;
9052 +       }
9053 +
9054 +       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9055 +       dpaa2_fl_set_final(in_fle, true);
9056 +       dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
9057 +       dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
9058 +       dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
9059 +       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9060 +       dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
9061 +       dpaa2_fl_set_len(out_fle, digestsize);
9062 +
9063 +       req_ctx->flc = &ctx->flc[FINALIZE];
9064 +       req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
9065 +       req_ctx->cbk = ahash_done_ctx_src;
9066 +       req_ctx->ctx = &req->base;
9067 +       req_ctx->edesc = edesc;
9068 +
9069 +       ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9070 +       if (ret == -EINPROGRESS ||
9071 +           (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
9072 +               return ret;
9073 +
9074 +unmap_ctx:
9075 +       ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
9076 +       qi_cache_free(edesc);
9077 +       return ret;
9078 +}
9079 +
9080 +static int ahash_finup_ctx(struct ahash_request *req)
9081 +{
9082 +       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9083 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9084 +       struct caam_hash_state *state = ahash_request_ctx(req);
9085 +       struct caam_request *req_ctx = &state->caam_req;
9086 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9087 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9088 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9089 +                     GFP_KERNEL : GFP_ATOMIC;
9090 +       int buflen = *current_buflen(state);
9091 +       int qm_sg_bytes, qm_sg_src_index;
9092 +       int src_nents, mapped_nents;
9093 +       int digestsize = crypto_ahash_digestsize(ahash);
9094 +       struct ahash_edesc *edesc;
9095 +       struct dpaa2_sg_entry *sg_table;
9096 +       int ret;
9097 +
9098 +       src_nents = sg_nents_for_len(req->src, req->nbytes);
9099 +       if (src_nents < 0) {
9100 +               dev_err(ctx->dev, "Invalid number of src SG.\n");
9101 +               return src_nents;
9102 +       }
9103 +
9104 +       if (src_nents) {
9105 +               mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
9106 +                                         DMA_TO_DEVICE);
9107 +               if (!mapped_nents) {
9108 +                       dev_err(ctx->dev, "unable to DMA map source\n");
9109 +                       return -ENOMEM;
9110 +               }
9111 +       } else {
9112 +               mapped_nents = 0;
9113 +       }
9114 +
9115 +       /* allocate space for base edesc and link tables */
9116 +       edesc = qi_cache_zalloc(GFP_DMA | flags);
9117 +       if (!edesc) {
9118 +               dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
9119 +               return -ENOMEM;
9120 +       }
9121 +
9122 +       edesc->src_nents = src_nents;
9123 +       qm_sg_src_index = 1 + (buflen ? 1 : 0);
9124 +       qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
9125 +       sg_table = &edesc->sgt[0];
9126 +
9127 +       ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
9128 +                              DMA_TO_DEVICE);
9129 +       if (ret)
9130 +               goto unmap_ctx;
9131 +
9132 +       ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
9133 +       if (ret)
9134 +               goto unmap_ctx;
9135 +
9136 +       sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
9137 +
9138 +       edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
9139 +                                         DMA_TO_DEVICE);
9140 +       if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
9141 +               dev_err(ctx->dev, "unable to map S/G table\n");
9142 +               ret = -ENOMEM;
9143 +               goto unmap_ctx;
9144 +       }
9145 +       edesc->qm_sg_bytes = qm_sg_bytes;
9146 +
9147 +       edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
9148 +                                       DMA_FROM_DEVICE);
9149 +       if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
9150 +               dev_err(ctx->dev, "unable to map dst\n");
9151 +               edesc->dst_dma = 0;
9152 +               ret = -ENOMEM;
9153 +               goto unmap_ctx;
9154 +       }
9155 +
9156 +       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9157 +       dpaa2_fl_set_final(in_fle, true);
9158 +       dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
9159 +       dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
9160 +       dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
9161 +       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9162 +       dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
9163 +       dpaa2_fl_set_len(out_fle, digestsize);
9164 +
9165 +       req_ctx->flc = &ctx->flc[FINALIZE];
9166 +       req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
9167 +       req_ctx->cbk = ahash_done_ctx_src;
9168 +       req_ctx->ctx = &req->base;
9169 +       req_ctx->edesc = edesc;
9170 +
9171 +       ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9172 +       if (ret == -EINPROGRESS ||
9173 +           (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
9174 +               return ret;
9175 +
9176 +unmap_ctx:
9177 +       ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
9178 +       qi_cache_free(edesc);
9179 +       return ret;
9180 +}
9181 +
9182 +static int ahash_digest(struct ahash_request *req)
9183 +{
9184 +       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9185 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9186 +       struct caam_hash_state *state = ahash_request_ctx(req);
9187 +       struct caam_request *req_ctx = &state->caam_req;
9188 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9189 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9190 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9191 +                     GFP_KERNEL : GFP_ATOMIC;
9192 +       int digestsize = crypto_ahash_digestsize(ahash);
9193 +       int src_nents, mapped_nents;
9194 +       struct ahash_edesc *edesc;
9195 +       int ret = -ENOMEM;
9196 +
9197 +       state->buf_dma = 0;
9198 +
9199 +       src_nents = sg_nents_for_len(req->src, req->nbytes);
9200 +       if (src_nents < 0) {
9201 +               dev_err(ctx->dev, "Invalid number of src SG.\n");
9202 +               return src_nents;
9203 +       }
9204 +
9205 +       if (src_nents) {
9206 +               mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
9207 +                                         DMA_TO_DEVICE);
9208 +               if (!mapped_nents) {
9209 +                       dev_err(ctx->dev, "unable to map source for DMA\n");
9210 +                       return ret;
9211 +               }
9212 +       } else {
9213 +               mapped_nents = 0;
9214 +       }
9215 +
9216 +       /* allocate space for base edesc and link tables */
9217 +       edesc = qi_cache_zalloc(GFP_DMA | flags);
9218 +       if (!edesc) {
9219 +               dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
9220 +               return ret;
9221 +       }
9222 +
9223 +       edesc->src_nents = src_nents;
9224 +       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9225 +
9226 +       if (mapped_nents > 1) {
9227 +               int qm_sg_bytes;
9228 +               struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
9229 +
9230 +               qm_sg_bytes = mapped_nents * sizeof(*sg_table);
9231 +               sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
9232 +               edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
9233 +                                                 qm_sg_bytes, DMA_TO_DEVICE);
9234 +               if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
9235 +                       dev_err(ctx->dev, "unable to map S/G table\n");
9236 +                       goto unmap;
9237 +               }
9238 +               edesc->qm_sg_bytes = qm_sg_bytes;
9239 +               dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
9240 +               dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
9241 +       } else {
9242 +               dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
9243 +               dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
9244 +       }
9245 +
9246 +       edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
9247 +                                       DMA_FROM_DEVICE);
9248 +       if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
9249 +               dev_err(ctx->dev, "unable to map dst\n");
9250 +               edesc->dst_dma = 0;
9251 +               goto unmap;
9252 +       }
9253 +
9254 +       dpaa2_fl_set_final(in_fle, true);
9255 +       dpaa2_fl_set_len(in_fle, req->nbytes);
9256 +       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9257 +       dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
9258 +       dpaa2_fl_set_len(out_fle, digestsize);
9259 +
9260 +       req_ctx->flc = &ctx->flc[DIGEST];
9261 +       req_ctx->flc_dma = ctx->flc_dma[DIGEST];
9262 +       req_ctx->cbk = ahash_done;
9263 +       req_ctx->ctx = &req->base;
9264 +       req_ctx->edesc = edesc;
9265 +       ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9266 +       if (ret == -EINPROGRESS ||
9267 +           (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
9268 +               return ret;
9269 +
9270 +unmap:
9271 +       ahash_unmap(ctx->dev, edesc, req, digestsize);
9272 +       qi_cache_free(edesc);
9273 +       return ret;
9274 +}
9275 +
9276 +static int ahash_final_no_ctx(struct ahash_request *req)
9277 +{
9278 +       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9279 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9280 +       struct caam_hash_state *state = ahash_request_ctx(req);
9281 +       struct caam_request *req_ctx = &state->caam_req;
9282 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9283 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9284 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9285 +                     GFP_KERNEL : GFP_ATOMIC;
9286 +       u8 *buf = current_buf(state);
9287 +       int buflen = *current_buflen(state);
9288 +       int digestsize = crypto_ahash_digestsize(ahash);
9289 +       struct ahash_edesc *edesc;
9290 +       int ret = -ENOMEM;
9291 +
9292 +       /* allocate space for base edesc and link tables */
9293 +       edesc = qi_cache_zalloc(GFP_DMA | flags);
9294 +       if (!edesc)
9295 +               return ret;
9296 +
9297 +       state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
9298 +       if (dma_mapping_error(ctx->dev, state->buf_dma)) {
9299 +               dev_err(ctx->dev, "unable to map src\n");
9300 +               goto unmap;
9301 +       }
9302 +
9303 +       edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
9304 +                                       DMA_FROM_DEVICE);
9305 +       if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
9306 +               dev_err(ctx->dev, "unable to map dst\n");
9307 +               edesc->dst_dma = 0;
9308 +               goto unmap;
9309 +       }
9310 +
9311 +       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9312 +       dpaa2_fl_set_final(in_fle, true);
9313 +       dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
9314 +       dpaa2_fl_set_addr(in_fle, state->buf_dma);
9315 +       dpaa2_fl_set_len(in_fle, buflen);
9316 +       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9317 +       dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
9318 +       dpaa2_fl_set_len(out_fle, digestsize);
9319 +
9320 +       req_ctx->flc = &ctx->flc[DIGEST];
9321 +       req_ctx->flc_dma = ctx->flc_dma[DIGEST];
9322 +       req_ctx->cbk = ahash_done;
9323 +       req_ctx->ctx = &req->base;
9324 +       req_ctx->edesc = edesc;
9325 +
9326 +       ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9327 +       if (ret == -EINPROGRESS ||
9328 +           (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
9329 +               return ret;
9330 +
9331 +unmap:
9332 +       ahash_unmap(ctx->dev, edesc, req, digestsize);
9333 +       qi_cache_free(edesc);
9334 +       return ret;
9335 +}
9336 +
9337 +static int ahash_update_no_ctx(struct ahash_request *req)
9338 +{
9339 +       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9340 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9341 +       struct caam_hash_state *state = ahash_request_ctx(req);
9342 +       struct caam_request *req_ctx = &state->caam_req;
9343 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9344 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9345 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9346 +                     GFP_KERNEL : GFP_ATOMIC;
9347 +       u8 *buf = current_buf(state);
9348 +       int *buflen = current_buflen(state);
9349 +       u8 *next_buf = alt_buf(state);
9350 +       int *next_buflen = alt_buflen(state);
9351 +       int in_len = *buflen + req->nbytes, to_hash;
9352 +       int qm_sg_bytes, src_nents, mapped_nents;
9353 +       struct ahash_edesc *edesc;
9354 +       int ret = 0;
9355 +
9356 +       *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
9357 +       to_hash = in_len - *next_buflen;
9358 +
9359 +       if (to_hash) {
9360 +               struct dpaa2_sg_entry *sg_table;
9361 +
9362 +               src_nents = sg_nents_for_len(req->src,
9363 +                                            req->nbytes - *next_buflen);
9364 +               if (src_nents < 0) {
9365 +                       dev_err(ctx->dev, "Invalid number of src SG.\n");
9366 +                       return src_nents;
9367 +               }
9368 +
9369 +               if (src_nents) {
9370 +                       mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
9371 +                                                 DMA_TO_DEVICE);
9372 +                       if (!mapped_nents) {
9373 +                               dev_err(ctx->dev, "unable to DMA map source\n");
9374 +                               return -ENOMEM;
9375 +                       }
9376 +               } else {
9377 +                       mapped_nents = 0;
9378 +               }
9379 +
9380 +               /* allocate space for base edesc and link tables */
9381 +               edesc = qi_cache_zalloc(GFP_DMA | flags);
9382 +               if (!edesc) {
9383 +                       dma_unmap_sg(ctx->dev, req->src, src_nents,
9384 +                                    DMA_TO_DEVICE);
9385 +                       return -ENOMEM;
9386 +               }
9387 +
9388 +               edesc->src_nents = src_nents;
9389 +               qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
9390 +               sg_table = &edesc->sgt[0];
9391 +
9392 +               ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
9393 +               if (ret)
9394 +                       goto unmap_ctx;
9395 +
9396 +               sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
9397 +
9398 +               if (*next_buflen)
9399 +                       scatterwalk_map_and_copy(next_buf, req->src,
9400 +                                                to_hash - *buflen,
9401 +                                                *next_buflen, 0);
9402 +
9403 +               edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
9404 +                                                 qm_sg_bytes, DMA_TO_DEVICE);
9405 +               if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
9406 +                       dev_err(ctx->dev, "unable to map S/G table\n");
9407 +                       ret = -ENOMEM;
9408 +                       goto unmap_ctx;
9409 +               }
9410 +               edesc->qm_sg_bytes = qm_sg_bytes;
9411 +
9412 +               state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
9413 +                                               ctx->ctx_len, DMA_FROM_DEVICE);
9414 +               if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
9415 +                       dev_err(ctx->dev, "unable to map ctx\n");
9416 +                       state->ctx_dma = 0;
9417 +                       ret = -ENOMEM;
9418 +                       goto unmap_ctx;
9419 +               }
9420 +
9421 +               memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9422 +               dpaa2_fl_set_final(in_fle, true);
9423 +               dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
9424 +               dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
9425 +               dpaa2_fl_set_len(in_fle, to_hash);
9426 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9427 +               dpaa2_fl_set_addr(out_fle, state->ctx_dma);
9428 +               dpaa2_fl_set_len(out_fle, ctx->ctx_len);
9429 +
9430 +               req_ctx->flc = &ctx->flc[UPDATE_FIRST];
9431 +               req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
9432 +               req_ctx->cbk = ahash_done_ctx_dst;
9433 +               req_ctx->ctx = &req->base;
9434 +               req_ctx->edesc = edesc;
9435 +
9436 +               ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9437 +               if (ret != -EINPROGRESS &&
9438 +                   !(ret == -EBUSY &&
9439 +                     req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
9440 +                       goto unmap_ctx;
9441 +
9442 +               state->update = ahash_update_ctx;
9443 +               state->finup = ahash_finup_ctx;
9444 +               state->final = ahash_final_ctx;
9445 +       } else if (*next_buflen) {
9446 +               scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
9447 +                                        req->nbytes, 0);
9448 +               *buflen = *next_buflen;
9449 +               *next_buflen = 0;
9450 +       }
9451 +#ifdef DEBUG
9452 +       print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
9453 +                      DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
9454 +       print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
9455 +                      DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
9456 +                      *next_buflen, 1);
9457 +#endif
9458 +
9459 +       return ret;
9460 +unmap_ctx:
9461 +       ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
9462 +       qi_cache_free(edesc);
9463 +       return ret;
9464 +}
9465 +
9466 +static int ahash_finup_no_ctx(struct ahash_request *req)
9467 +{
9468 +       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9469 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9470 +       struct caam_hash_state *state = ahash_request_ctx(req);
9471 +       struct caam_request *req_ctx = &state->caam_req;
9472 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9473 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9474 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9475 +                     GFP_KERNEL : GFP_ATOMIC;
9476 +       int buflen = *current_buflen(state);
9477 +       int qm_sg_bytes, src_nents, mapped_nents;
9478 +       int digestsize = crypto_ahash_digestsize(ahash);
9479 +       struct ahash_edesc *edesc;
9480 +       struct dpaa2_sg_entry *sg_table;
9481 +       int ret;
9482 +
9483 +       src_nents = sg_nents_for_len(req->src, req->nbytes);
9484 +       if (src_nents < 0) {
9485 +               dev_err(ctx->dev, "Invalid number of src SG.\n");
9486 +               return src_nents;
9487 +       }
9488 +
9489 +       if (src_nents) {
9490 +               mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
9491 +                                         DMA_TO_DEVICE);
9492 +               if (!mapped_nents) {
9493 +                       dev_err(ctx->dev, "unable to DMA map source\n");
9494 +                       return -ENOMEM;
9495 +               }
9496 +       } else {
9497 +               mapped_nents = 0;
9498 +       }
9499 +
9500 +       /* allocate space for base edesc and link tables */
9501 +       edesc = qi_cache_zalloc(GFP_DMA | flags);
9502 +       if (!edesc) {
9503 +               dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
9504 +               return -ENOMEM;
9505 +       }
9506 +
9507 +       edesc->src_nents = src_nents;
9508 +       qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
9509 +       sg_table = &edesc->sgt[0];
9510 +
9511 +       ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
9512 +       if (ret)
9513 +               goto unmap;
9514 +
9515 +       sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
9516 +
9517 +       edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
9518 +                                         DMA_TO_DEVICE);
9519 +       if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
9520 +               dev_err(ctx->dev, "unable to map S/G table\n");
9521 +               ret = -ENOMEM;
9522 +               goto unmap;
9523 +       }
9524 +       edesc->qm_sg_bytes = qm_sg_bytes;
9525 +
9526 +       edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
9527 +                                       DMA_FROM_DEVICE);
9528 +       if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
9529 +               dev_err(ctx->dev, "unable to map dst\n");
9530 +               edesc->dst_dma = 0;
9531 +               ret = -ENOMEM;
9532 +               goto unmap;
9533 +       }
9534 +
9535 +       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9536 +       dpaa2_fl_set_final(in_fle, true);
9537 +       dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
9538 +       dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
9539 +       dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
9540 +       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9541 +       dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
9542 +       dpaa2_fl_set_len(out_fle, digestsize);
9543 +
9544 +       req_ctx->flc = &ctx->flc[DIGEST];
9545 +       req_ctx->flc_dma = ctx->flc_dma[DIGEST];
9546 +       req_ctx->cbk = ahash_done;
9547 +       req_ctx->ctx = &req->base;
9548 +       req_ctx->edesc = edesc;
9549 +       ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9550 +       if (ret != -EINPROGRESS &&
9551 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
9552 +               goto unmap;
9553 +
9554 +       return ret;
9555 +unmap:
9556 +       ahash_unmap(ctx->dev, edesc, req, digestsize);
9557 +       qi_cache_free(edesc);
9558 +       return -ENOMEM;
9559 +}
9560 +
9561 +static int ahash_update_first(struct ahash_request *req)
9562 +{
9563 +       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9564 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9565 +       struct caam_hash_state *state = ahash_request_ctx(req);
9566 +       struct caam_request *req_ctx = &state->caam_req;
9567 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9568 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9569 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9570 +                     GFP_KERNEL : GFP_ATOMIC;
9571 +       u8 *next_buf = alt_buf(state);
9572 +       int *next_buflen = alt_buflen(state);
9573 +       int to_hash;
9574 +       int src_nents, mapped_nents;
9575 +       struct ahash_edesc *edesc;
9576 +       int ret = 0;
9577 +
9578 +       *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
9579 +                                     1);
9580 +       to_hash = req->nbytes - *next_buflen;
9581 +
9582 +       if (to_hash) {
9583 +               struct dpaa2_sg_entry *sg_table;
9584 +
9585 +               src_nents = sg_nents_for_len(req->src,
9586 +                                            req->nbytes - (*next_buflen));
9587 +               if (src_nents < 0) {
9588 +                       dev_err(ctx->dev, "Invalid number of src SG.\n");
9589 +                       return src_nents;
9590 +               }
9591 +
9592 +               if (src_nents) {
9593 +                       mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
9594 +                                                 DMA_TO_DEVICE);
9595 +                       if (!mapped_nents) {
9596 +                               dev_err(ctx->dev, "unable to map source for DMA\n");
9597 +                               return -ENOMEM;
9598 +                       }
9599 +               } else {
9600 +                       mapped_nents = 0;
9601 +               }
9602 +
9603 +               /* allocate space for base edesc and link tables */
9604 +               edesc = qi_cache_zalloc(GFP_DMA | flags);
9605 +               if (!edesc) {
9606 +                       dma_unmap_sg(ctx->dev, req->src, src_nents,
9607 +                                    DMA_TO_DEVICE);
9608 +                       return -ENOMEM;
9609 +               }
9610 +
9611 +               edesc->src_nents = src_nents;
9612 +               sg_table = &edesc->sgt[0];
9613 +
9614 +               memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9615 +               dpaa2_fl_set_final(in_fle, true);
9616 +               dpaa2_fl_set_len(in_fle, to_hash);
9617 +
9618 +               if (mapped_nents > 1) {
9619 +                       int qm_sg_bytes;
9620 +
9621 +                       sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
9622 +                       qm_sg_bytes = mapped_nents * sizeof(*sg_table);
9623 +                       edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
9624 +                                                         qm_sg_bytes,
9625 +                                                         DMA_TO_DEVICE);
9626 +                       if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
9627 +                               dev_err(ctx->dev, "unable to map S/G table\n");
9628 +                               ret = -ENOMEM;
9629 +                               goto unmap_ctx;
9630 +                       }
9631 +                       edesc->qm_sg_bytes = qm_sg_bytes;
9632 +                       dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
9633 +                       dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
9634 +               } else {
9635 +                       dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
9636 +                       dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
9637 +               }
9638 +
9639 +               if (*next_buflen)
9640 +                       scatterwalk_map_and_copy(next_buf, req->src, to_hash,
9641 +                                                *next_buflen, 0);
9642 +
9643 +               state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
9644 +                                               ctx->ctx_len, DMA_FROM_DEVICE);
9645 +               if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
9646 +                       dev_err(ctx->dev, "unable to map ctx\n");
9647 +                       state->ctx_dma = 0;
9648 +                       ret = -ENOMEM;
9649 +                       goto unmap_ctx;
9650 +               }
9651 +
9652 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9653 +               dpaa2_fl_set_addr(out_fle, state->ctx_dma);
9654 +               dpaa2_fl_set_len(out_fle, ctx->ctx_len);
9655 +
9656 +               req_ctx->flc = &ctx->flc[UPDATE_FIRST];
9657 +               req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
9658 +               req_ctx->cbk = ahash_done_ctx_dst;
9659 +               req_ctx->ctx = &req->base;
9660 +               req_ctx->edesc = edesc;
9661 +
9662 +               ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9663 +               if (ret != -EINPROGRESS &&
9664 +                   !(ret == -EBUSY && req->base.flags &
9665 +                     CRYPTO_TFM_REQ_MAY_BACKLOG))
9666 +                       goto unmap_ctx;
9667 +
9668 +               state->update = ahash_update_ctx;
9669 +               state->finup = ahash_finup_ctx;
9670 +               state->final = ahash_final_ctx;
9671 +       } else if (*next_buflen) {
9672 +               state->update = ahash_update_no_ctx;
9673 +               state->finup = ahash_finup_no_ctx;
9674 +               state->final = ahash_final_no_ctx;
9675 +               scatterwalk_map_and_copy(next_buf, req->src, 0,
9676 +                                        req->nbytes, 0);
9677 +               switch_buf(state);
9678 +       }
9679 +#ifdef DEBUG
9680 +       print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
9681 +                      DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, 1);
9682 +#endif
9683 +
9684 +       return ret;
9685 +unmap_ctx:
9686 +       ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
9687 +       qi_cache_free(edesc);
9688 +       return ret;
9689 +}
9690 +
9691 +static int ahash_finup_first(struct ahash_request *req)
9692 +{
9693 +       return ahash_digest(req);
9694 +}
9695 +
9696 +static int ahash_init(struct ahash_request *req)
9697 +{
9698 +       struct caam_hash_state *state = ahash_request_ctx(req);
9699 +
9700 +       state->update = ahash_update_first;
9701 +       state->finup = ahash_finup_first;
9702 +       state->final = ahash_final_no_ctx;
9703 +
9704 +       state->ctx_dma = 0;
9705 +       state->current_buf = 0;
9706 +       state->buf_dma = 0;
9707 +       state->buflen_0 = 0;
9708 +       state->buflen_1 = 0;
9709 +
9710 +       return 0;
9711 +}
9712 +
9713 +static int ahash_update(struct ahash_request *req)
9714 +{
9715 +       struct caam_hash_state *state = ahash_request_ctx(req);
9716 +
9717 +       return state->update(req);
9718 +}
9719 +
9720 +static int ahash_finup(struct ahash_request *req)
9721 +{
9722 +       struct caam_hash_state *state = ahash_request_ctx(req);
9723 +
9724 +       return state->finup(req);
9725 +}
9726 +
9727 +static int ahash_final(struct ahash_request *req)
9728 +{
9729 +       struct caam_hash_state *state = ahash_request_ctx(req);
9730 +
9731 +       return state->final(req);
9732 +}
9733 +
9734 +static int ahash_export(struct ahash_request *req, void *out)
9735 +{
9736 +       struct caam_hash_state *state = ahash_request_ctx(req);
9737 +       struct caam_export_state *export = out;
9738 +       int len;
9739 +       u8 *buf;
9740 +
9741 +       if (state->current_buf) {
9742 +               buf = state->buf_1;
9743 +               len = state->buflen_1;
9744 +       } else {
9745 +               buf = state->buf_0;
9746 +               len = state->buflen_0;
9747 +       }
9748 +
9749 +       memcpy(export->buf, buf, len);
9750 +       memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
9751 +       export->buflen = len;
9752 +       export->update = state->update;
9753 +       export->final = state->final;
9754 +       export->finup = state->finup;
9755 +
9756 +       return 0;
9757 +}
9758 +
9759 +static int ahash_import(struct ahash_request *req, const void *in)
9760 +{
9761 +       struct caam_hash_state *state = ahash_request_ctx(req);
9762 +       const struct caam_export_state *export = in;
9763 +
9764 +       memset(state, 0, sizeof(*state));
9765 +       memcpy(state->buf_0, export->buf, export->buflen);
9766 +       memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
9767 +       state->buflen_0 = export->buflen;
9768 +       state->update = export->update;
9769 +       state->final = export->final;
9770 +       state->finup = export->finup;
9771 +
9772 +       return 0;
9773 +}
9774 +
9775 +struct caam_hash_template {
9776 +       char name[CRYPTO_MAX_ALG_NAME];
9777 +       char driver_name[CRYPTO_MAX_ALG_NAME];
9778 +       char hmac_name[CRYPTO_MAX_ALG_NAME];
9779 +       char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
9780 +       unsigned int blocksize;
9781 +       struct ahash_alg template_ahash;
9782 +       u32 alg_type;
9783 +};
9784 +
9785 +/* ahash descriptors */
9786 +static struct caam_hash_template driver_hash[] = {
9787 +       {
9788 +               .name = "sha1",
9789 +               .driver_name = "sha1-caam-qi2",
9790 +               .hmac_name = "hmac(sha1)",
9791 +               .hmac_driver_name = "hmac-sha1-caam-qi2",
9792 +               .blocksize = SHA1_BLOCK_SIZE,
9793 +               .template_ahash = {
9794 +                       .init = ahash_init,
9795 +                       .update = ahash_update,
9796 +                       .final = ahash_final,
9797 +                       .finup = ahash_finup,
9798 +                       .digest = ahash_digest,
9799 +                       .export = ahash_export,
9800 +                       .import = ahash_import,
9801 +                       .setkey = ahash_setkey,
9802 +                       .halg = {
9803 +                               .digestsize = SHA1_DIGEST_SIZE,
9804 +                               .statesize = sizeof(struct caam_export_state),
9805 +                       },
9806 +               },
9807 +               .alg_type = OP_ALG_ALGSEL_SHA1,
9808 +       }, {
9809 +               .name = "sha224",
9810 +               .driver_name = "sha224-caam-qi2",
9811 +               .hmac_name = "hmac(sha224)",
9812 +               .hmac_driver_name = "hmac-sha224-caam-qi2",
9813 +               .blocksize = SHA224_BLOCK_SIZE,
9814 +               .template_ahash = {
9815 +                       .init = ahash_init,
9816 +                       .update = ahash_update,
9817 +                       .final = ahash_final,
9818 +                       .finup = ahash_finup,
9819 +                       .digest = ahash_digest,
9820 +                       .export = ahash_export,
9821 +                       .import = ahash_import,
9822 +                       .setkey = ahash_setkey,
9823 +                       .halg = {
9824 +                               .digestsize = SHA224_DIGEST_SIZE,
9825 +                               .statesize = sizeof(struct caam_export_state),
9826 +                       },
9827 +               },
9828 +               .alg_type = OP_ALG_ALGSEL_SHA224,
9829 +       }, {
9830 +               .name = "sha256",
9831 +               .driver_name = "sha256-caam-qi2",
9832 +               .hmac_name = "hmac(sha256)",
9833 +               .hmac_driver_name = "hmac-sha256-caam-qi2",
9834 +               .blocksize = SHA256_BLOCK_SIZE,
9835 +               .template_ahash = {
9836 +                       .init = ahash_init,
9837 +                       .update = ahash_update,
9838 +                       .final = ahash_final,
9839 +                       .finup = ahash_finup,
9840 +                       .digest = ahash_digest,
9841 +                       .export = ahash_export,
9842 +                       .import = ahash_import,
9843 +                       .setkey = ahash_setkey,
9844 +                       .halg = {
9845 +                               .digestsize = SHA256_DIGEST_SIZE,
9846 +                               .statesize = sizeof(struct caam_export_state),
9847 +                       },
9848 +               },
9849 +               .alg_type = OP_ALG_ALGSEL_SHA256,
9850 +       }, {
9851 +               .name = "sha384",
9852 +               .driver_name = "sha384-caam-qi2",
9853 +               .hmac_name = "hmac(sha384)",
9854 +               .hmac_driver_name = "hmac-sha384-caam-qi2",
9855 +               .blocksize = SHA384_BLOCK_SIZE,
9856 +               .template_ahash = {
9857 +                       .init = ahash_init,
9858 +                       .update = ahash_update,
9859 +                       .final = ahash_final,
9860 +                       .finup = ahash_finup,
9861 +                       .digest = ahash_digest,
9862 +                       .export = ahash_export,
9863 +                       .import = ahash_import,
9864 +                       .setkey = ahash_setkey,
9865 +                       .halg = {
9866 +                               .digestsize = SHA384_DIGEST_SIZE,
9867 +                               .statesize = sizeof(struct caam_export_state),
9868 +                       },
9869 +               },
9870 +               .alg_type = OP_ALG_ALGSEL_SHA384,
9871 +       }, {
9872 +               .name = "sha512",
9873 +               .driver_name = "sha512-caam-qi2",
9874 +               .hmac_name = "hmac(sha512)",
9875 +               .hmac_driver_name = "hmac-sha512-caam-qi2",
9876 +               .blocksize = SHA512_BLOCK_SIZE,
9877 +               .template_ahash = {
9878 +                       .init = ahash_init,
9879 +                       .update = ahash_update,
9880 +                       .final = ahash_final,
9881 +                       .finup = ahash_finup,
9882 +                       .digest = ahash_digest,
9883 +                       .export = ahash_export,
9884 +                       .import = ahash_import,
9885 +                       .setkey = ahash_setkey,
9886 +                       .halg = {
9887 +                               .digestsize = SHA512_DIGEST_SIZE,
9888 +                               .statesize = sizeof(struct caam_export_state),
9889 +                       },
9890 +               },
9891 +               .alg_type = OP_ALG_ALGSEL_SHA512,
9892 +       }, {
9893 +               .name = "md5",
9894 +               .driver_name = "md5-caam-qi2",
9895 +               .hmac_name = "hmac(md5)",
9896 +               .hmac_driver_name = "hmac-md5-caam-qi2",
9897 +               .blocksize = MD5_BLOCK_WORDS * 4,
9898 +               .template_ahash = {
9899 +                       .init = ahash_init,
9900 +                       .update = ahash_update,
9901 +                       .final = ahash_final,
9902 +                       .finup = ahash_finup,
9903 +                       .digest = ahash_digest,
9904 +                       .export = ahash_export,
9905 +                       .import = ahash_import,
9906 +                       .setkey = ahash_setkey,
9907 +                       .halg = {
9908 +                               .digestsize = MD5_DIGEST_SIZE,
9909 +                               .statesize = sizeof(struct caam_export_state),
9910 +                       },
9911 +               },
9912 +               .alg_type = OP_ALG_ALGSEL_MD5,
9913 +       }
9914 +};
9915 +
9916 +struct caam_hash_alg {
9917 +       struct list_head entry;
9918 +       struct device *dev;
9919 +       int alg_type;
9920 +       struct ahash_alg ahash_alg;
9921 +};
9922 +
9923 +static int caam_hash_cra_init(struct crypto_tfm *tfm)
9924 +{
9925 +       struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
9926 +       struct crypto_alg *base = tfm->__crt_alg;
9927 +       struct hash_alg_common *halg =
9928 +                container_of(base, struct hash_alg_common, base);
9929 +       struct ahash_alg *alg =
9930 +                container_of(halg, struct ahash_alg, halg);
9931 +       struct caam_hash_alg *caam_hash =
9932 +                container_of(alg, struct caam_hash_alg, ahash_alg);
9933 +       struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
9934 +       /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
9935 +       static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
9936 +                                        HASH_MSG_LEN + SHA1_DIGEST_SIZE,
9937 +                                        HASH_MSG_LEN + 32,
9938 +                                        HASH_MSG_LEN + SHA256_DIGEST_SIZE,
9939 +                                        HASH_MSG_LEN + 64,
9940 +                                        HASH_MSG_LEN + SHA512_DIGEST_SIZE };
9941 +       dma_addr_t dma_addr;
9942 +       int i;
9943 +
9944 +       ctx->dev = caam_hash->dev;
9945 +
9946 +       dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
9947 +                                       DMA_BIDIRECTIONAL,
9948 +                                       DMA_ATTR_SKIP_CPU_SYNC);
9949 +       if (dma_mapping_error(ctx->dev, dma_addr)) {
9950 +               dev_err(ctx->dev, "unable to map shared descriptors\n");
9951 +               return -ENOMEM;
9952 +       }
9953 +
9954 +       for (i = 0; i < HASH_NUM_OP; i++)
9955 +               ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
9956 +
9957 +       /* copy descriptor header template value */
9958 +       ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
9959 +
9960 +       ctx->ctx_len = runninglen[(ctx->adata.algtype &
9961 +                                  OP_ALG_ALGSEL_SUBMASK) >>
9962 +                                 OP_ALG_ALGSEL_SHIFT];
9963 +
9964 +       crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
9965 +                                sizeof(struct caam_hash_state));
9966 +
9967 +       return ahash_set_sh_desc(ahash);
9968 +}
9969 +
9970 +static void caam_hash_cra_exit(struct crypto_tfm *tfm)
9971 +{
9972 +       struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
9973 +
9974 +       dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
9975 +                              DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
9976 +}
9977 +
9978 +static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
9979 +       struct caam_hash_template *template, bool keyed)
9980 +{
9981 +       struct caam_hash_alg *t_alg;
9982 +       struct ahash_alg *halg;
9983 +       struct crypto_alg *alg;
9984 +
9985 +       t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
9986 +       if (!t_alg)
9987 +               return ERR_PTR(-ENOMEM);
9988 +
9989 +       t_alg->ahash_alg = template->template_ahash;
9990 +       halg = &t_alg->ahash_alg;
9991 +       alg = &halg->halg.base;
9992 +
9993 +       if (keyed) {
9994 +               snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
9995 +                        template->hmac_name);
9996 +               snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
9997 +                        template->hmac_driver_name);
9998 +       } else {
9999 +               snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
10000 +                        template->name);
10001 +               snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
10002 +                        template->driver_name);
10003 +               t_alg->ahash_alg.setkey = NULL;
10004 +       }
10005 +       alg->cra_module = THIS_MODULE;
10006 +       alg->cra_init = caam_hash_cra_init;
10007 +       alg->cra_exit = caam_hash_cra_exit;
10008 +       alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
10009 +       alg->cra_priority = CAAM_CRA_PRIORITY;
10010 +       alg->cra_blocksize = template->blocksize;
10011 +       alg->cra_alignmask = 0;
10012 +       alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
10013 +       alg->cra_type = &crypto_ahash_type;
10014 +
10015 +       t_alg->alg_type = template->alg_type;
10016 +       t_alg->dev = dev;
10017 +
10018 +       return t_alg;
10019 +}
10020 +
10021 +static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
10022 +{
10023 +       struct dpaa2_caam_priv_per_cpu *ppriv;
10024 +
10025 +       ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
10026 +       napi_schedule_irqoff(&ppriv->napi);
10027 +}
10028 +
10029 +static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
10030 +{
10031 +       struct device *dev = priv->dev;
10032 +       struct dpaa2_io_notification_ctx *nctx;
10033 +       struct dpaa2_caam_priv_per_cpu *ppriv;
10034 +       int err, i = 0, cpu;
10035 +
10036 +       for_each_online_cpu(cpu) {
10037 +               ppriv = per_cpu_ptr(priv->ppriv, cpu);
10038 +               ppriv->priv = priv;
10039 +               nctx = &ppriv->nctx;
10040 +               nctx->is_cdan = 0;
10041 +               nctx->id = ppriv->rsp_fqid;
10042 +               nctx->desired_cpu = cpu;
10043 +               nctx->cb = dpaa2_caam_fqdan_cb;
10044 +
10045 +               /* Register notification callbacks */
10046 +               ppriv->dpio = dpaa2_io_service_select(cpu);
10047 +               err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
10048 +               if (unlikely(err)) {
10049 +                       dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
10050 +                       nctx->cb = NULL;
10051 +                       /*
10052 +                        * If no affine DPIO for this core, there's probably
10053 +                        * none available for next cores either. Signal we want
10054 +                        * to retry later, in case the DPIO devices weren't
10055 +                        * probed yet.
10056 +                        */
10057 +                       err = -EPROBE_DEFER;
10058 +                       goto err;
10059 +               }
10060 +
10061 +               ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
10062 +                                                    dev);
10063 +               if (unlikely(!ppriv->store)) {
10064 +                       dev_err(dev, "dpaa2_io_store_create() failed\n");
10065 +                       err = -ENOMEM;
10066 +                       goto err;
10067 +               }
10068 +
10069 +               if (++i == priv->num_pairs)
10070 +                       break;
10071 +       }
10072 +
10073 +       return 0;
10074 +
10075 +err:
10076 +       for_each_online_cpu(cpu) {
10077 +               ppriv = per_cpu_ptr(priv->ppriv, cpu);
10078 +               if (!ppriv->nctx.cb)
10079 +                       break;
10080 +               dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
10081 +       }
10082 +
10083 +       for_each_online_cpu(cpu) {
10084 +               ppriv = per_cpu_ptr(priv->ppriv, cpu);
10085 +               if (!ppriv->store)
10086 +                       break;
10087 +               dpaa2_io_store_destroy(ppriv->store);
10088 +       }
10089 +
10090 +       return err;
10091 +}
10092 +
10093 +static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
10094 +{
10095 +       struct dpaa2_caam_priv_per_cpu *ppriv;
10096 +       struct device *dev = priv->dev;
10097 +       int i = 0, cpu;
10098 +
10099 +       for_each_online_cpu(cpu) {
10100 +               ppriv = per_cpu_ptr(priv->ppriv, cpu);
10101 +               dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
10102 +               dpaa2_io_store_destroy(ppriv->store);
10103 +
10104 +               if (++i == priv->num_pairs)
10105 +                       return;
10106 +       }
10107 +}
10108 +
10109 +static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
10110 +{
10111 +       struct dpseci_rx_queue_cfg rx_queue_cfg;
10112 +       struct device *dev = priv->dev;
10113 +       struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
10114 +       struct dpaa2_caam_priv_per_cpu *ppriv;
10115 +       int err = 0, i = 0, cpu;
10116 +
10117 +       /* Configure Rx queues */
10118 +       for_each_online_cpu(cpu) {
10119 +               ppriv = per_cpu_ptr(priv->ppriv, cpu);
10120 +
10121 +               rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
10122 +                                      DPSECI_QUEUE_OPT_USER_CTX;
10123 +               rx_queue_cfg.order_preservation_en = 0;
10124 +               rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
10125 +               rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
10126 +               /*
10127 +                * Rx priority (WQ) doesn't really matter, since we use
10128 +                * pull mode, i.e. volatile dequeues from specific FQs
10129 +                */
10130 +               rx_queue_cfg.dest_cfg.priority = 0;
10131 +               rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
10132 +
10133 +               err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
10134 +                                         &rx_queue_cfg);
10135 +               if (err) {
10136 +                       dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
10137 +                               err);
10138 +                       return err;
10139 +               }
10140 +
10141 +               if (++i == priv->num_pairs)
10142 +                       break;
10143 +       }
10144 +
10145 +       return err;
10146 +}
10147 +
10148 +static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
10149 +{
10150 +       struct device *dev = priv->dev;
10151 +
10152 +       if (!priv->cscn_mem)
10153 +               return;
10154 +
10155 +       dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
10156 +       kfree(priv->cscn_mem);
10157 +}
10158 +
10159 +static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
10160 +{
10161 +       struct device *dev = priv->dev;
10162 +       struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
10163 +
10164 +       dpaa2_dpseci_congestion_free(priv);
10165 +       dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
10166 +}
10167 +
10168 +static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
10169 +                                 const struct dpaa2_fd *fd)
10170 +{
10171 +       struct caam_request *req;
10172 +       u32 fd_err;
10173 +
10174 +       if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
10175 +               dev_err(priv->dev, "Only Frame List FD format is supported!\n");
10176 +               return;
10177 +       }
10178 +
10179 +       fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
10180 +       if (unlikely(fd_err))
10181 +               dev_err(priv->dev, "FD error: %08x\n", fd_err);
10182 +
10183 +       /*
10184 +        * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
10185 +        * in FD[ERR] or FD[FRC].
10186 +        */
10187 +       req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
10188 +       dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
10189 +                        DMA_BIDIRECTIONAL);
10190 +       req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
10191 +}
10192 +
10193 +static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
10194 +{
10195 +       int err;
10196 +
10197 +       /* Retry while portal is busy */
10198 +       do {
10199 +               err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
10200 +                                              ppriv->store);
10201 +       } while (err == -EBUSY);
10202 +
10203 +       if (unlikely(err))
10204 +               dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
10205 +
10206 +       return err;
10207 +}
10208 +
10209 +static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
10210 +{
10211 +       struct dpaa2_dq *dq;
10212 +       int cleaned = 0, is_last;
10213 +
10214 +       do {
10215 +               dq = dpaa2_io_store_next(ppriv->store, &is_last);
10216 +               if (unlikely(!dq)) {
10217 +                       if (unlikely(!is_last)) {
10218 +                               dev_dbg(ppriv->priv->dev,
10219 +                                       "FQ %d returned no valid frames\n",
10220 +                                       ppriv->rsp_fqid);
10221 +                               /*
10222 +                                * MUST retry until we get some sort of
10223 +                                * valid response token (be it "empty dequeue"
10224 +                                * or a valid frame).
10225 +                                */
10226 +                               continue;
10227 +                       }
10228 +                       break;
10229 +               }
10230 +
10231 +               /* Process FD */
10232 +               dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
10233 +               cleaned++;
10234 +       } while (!is_last);
10235 +
10236 +       return cleaned;
10237 +}
10238 +
10239 +static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
10240 +{
10241 +       struct dpaa2_caam_priv_per_cpu *ppriv;
10242 +       struct dpaa2_caam_priv *priv;
10243 +       int err, cleaned = 0, store_cleaned;
10244 +
10245 +       ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
10246 +       priv = ppriv->priv;
10247 +
10248 +       if (unlikely(dpaa2_caam_pull_fq(ppriv)))
10249 +               return 0;
10250 +
10251 +       do {
10252 +               store_cleaned = dpaa2_caam_store_consume(ppriv);
10253 +               cleaned += store_cleaned;
10254 +
10255 +               if (store_cleaned == 0 ||
10256 +                   cleaned > budget - DPAA2_CAAM_STORE_SIZE)
10257 +                       break;
10258 +
10259 +               /* Try to dequeue some more */
10260 +               err = dpaa2_caam_pull_fq(ppriv);
10261 +               if (unlikely(err))
10262 +                       break;
10263 +       } while (1);
10264 +
10265 +       if (cleaned < budget) {
10266 +               napi_complete_done(napi, cleaned);
10267 +               err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
10268 +               if (unlikely(err))
10269 +                       dev_err(priv->dev, "Notification rearm failed: %d\n",
10270 +                               err);
10271 +       }
10272 +
10273 +       return cleaned;
10274 +}
10275 +
10276 +static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
10277 +                                        u16 token)
10278 +{
10279 +       struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
10280 +       struct device *dev = priv->dev;
10281 +       int err;
10282 +
10283 +       /*
10284 +        * Congestion group feature supported starting with DPSECI API v5.1
10285 +        * and only when object has been created with this capability.
10286 +        */
10287 +       if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
10288 +           !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
10289 +               return 0;
10290 +
10291 +       priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
10292 +                                GFP_KERNEL | GFP_DMA);
10293 +       if (!priv->cscn_mem)
10294 +               return -ENOMEM;
10295 +
10296 +       priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
10297 +       priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
10298 +                                       DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
10299 +       if (dma_mapping_error(dev, priv->cscn_dma)) {
10300 +               dev_err(dev, "Error mapping CSCN memory area\n");
10301 +               err = -ENOMEM;
10302 +               goto err_dma_map;
10303 +       }
10304 +
10305 +       cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
10306 +       cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
10307 +       cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
10308 +       cong_notif_cfg.message_ctx = (u64)priv;
10309 +       cong_notif_cfg.message_iova = priv->cscn_dma;
10310 +       cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
10311 +                                       DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
10312 +                                       DPSECI_CGN_MODE_COHERENT_WRITE;
10313 +
10314 +       err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
10315 +                                                &cong_notif_cfg);
10316 +       if (err) {
10317 +               dev_err(dev, "dpseci_set_congestion_notification failed\n");
10318 +               goto err_set_cong;
10319 +       }
10320 +
10321 +       return 0;
10322 +
10323 +err_set_cong:
10324 +       dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
10325 +err_dma_map:
10326 +       kfree(priv->cscn_mem);
10327 +
10328 +       return err;
10329 +}
10330 +
10331 +static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
10332 +{
10333 +       struct device *dev = &ls_dev->dev;
10334 +       struct dpaa2_caam_priv *priv;
10335 +       struct dpaa2_caam_priv_per_cpu *ppriv;
10336 +       int err, cpu;
10337 +       u8 i;
10338 +
10339 +       priv = dev_get_drvdata(dev);
10340 +
10341 +       priv->dev = dev;
10342 +       priv->dpsec_id = ls_dev->obj_desc.id;
10343 +
10344 +       /* Get a handle for the DPSECI this interface is associate with */
10345 +       err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
10346 +       if (err) {
10347 +               dev_err(dev, "dpsec_open() failed: %d\n", err);
10348 +               goto err_open;
10349 +       }
10350 +
10351 +       dev_info(dev, "Opened dpseci object successfully\n");
10352 +
10353 +       err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
10354 +                                    &priv->minor_ver);
10355 +       if (err) {
10356 +               dev_err(dev, "dpseci_get_api_version() failed\n");
10357 +               goto err_get_vers;
10358 +       }
10359 +
10360 +       err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
10361 +                                   &priv->dpseci_attr);
10362 +       if (err) {
10363 +               dev_err(dev, "dpseci_get_attributes() failed\n");
10364 +               goto err_get_vers;
10365 +       }
10366 +
10367 +       err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
10368 +                                 &priv->sec_attr);
10369 +       if (err) {
10370 +               dev_err(dev, "dpseci_get_sec_attr() failed\n");
10371 +               goto err_get_vers;
10372 +       }
10373 +
10374 +       err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
10375 +       if (err) {
10376 +               dev_err(dev, "setup_congestion() failed\n");
10377 +               goto err_get_vers;
10378 +       }
10379 +
10380 +       priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
10381 +                             priv->dpseci_attr.num_tx_queues);
10382 +       if (priv->num_pairs > num_online_cpus()) {
10383 +               dev_warn(dev, "%d queues won't be used\n",
10384 +                        priv->num_pairs - num_online_cpus());
10385 +               priv->num_pairs = num_online_cpus();
10386 +       }
10387 +
10388 +       for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
10389 +               err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
10390 +                                         &priv->rx_queue_attr[i]);
10391 +               if (err) {
10392 +                       dev_err(dev, "dpseci_get_rx_queue() failed\n");
10393 +                       goto err_get_rx_queue;
10394 +               }
10395 +       }
10396 +
10397 +       for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
10398 +               err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
10399 +                                         &priv->tx_queue_attr[i]);
10400 +               if (err) {
10401 +                       dev_err(dev, "dpseci_get_tx_queue() failed\n");
10402 +                       goto err_get_rx_queue;
10403 +               }
10404 +       }
10405 +
10406 +       i = 0;
10407 +       for_each_online_cpu(cpu) {
10408 +               u8 j;
10409 +
10410 +               j = i % priv->num_pairs;
10411 +
10412 +               ppriv = per_cpu_ptr(priv->ppriv, cpu);
10413 +               ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
10414 +
10415 +               /*
10416 +                * Allow all cores to enqueue, while only some of them
10417 +                * will take part in dequeuing.
10418 +                */
10419 +               if (++i > priv->num_pairs)
10420 +                       continue;
10421 +
10422 +               ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
10423 +               ppriv->prio = j;
10424 +
10425 +               dev_info(dev, "pair %d: rx queue %d, tx queue %d\n", j,
10426 +                        priv->rx_queue_attr[j].fqid,
10427 +                        priv->tx_queue_attr[j].fqid);
10428 +
10429 +               ppriv->net_dev.dev = *dev;
10430 +               INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
10431 +               netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
10432 +                              DPAA2_CAAM_NAPI_WEIGHT);
10433 +       }
10434 +
10435 +       return 0;
10436 +
10437 +err_get_rx_queue:
10438 +       dpaa2_dpseci_congestion_free(priv);
10439 +err_get_vers:
10440 +       dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
10441 +err_open:
10442 +       return err;
10443 +}
10444 +
10445 +static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
10446 +{
10447 +       struct device *dev = priv->dev;
10448 +       struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
10449 +       struct dpaa2_caam_priv_per_cpu *ppriv;
10450 +       int err, i;
10451 +
10452 +       for (i = 0; i < priv->num_pairs; i++) {
10453 +               ppriv = per_cpu_ptr(priv->ppriv, i);
10454 +               napi_enable(&ppriv->napi);
10455 +       }
10456 +
10457 +       err = dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
10458 +       if (err) {
10459 +               dev_err(dev, "dpseci_enable() failed\n");
10460 +               return err;
10461 +       }
10462 +
10463 +       dev_info(dev, "DPSECI version %d.%d\n",
10464 +                priv->major_ver,
10465 +                priv->minor_ver);
10466 +
10467 +       return 0;
10468 +}
10469 +
10470 +static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
10471 +{
10472 +       struct device *dev = priv->dev;
10473 +       struct dpaa2_caam_priv_per_cpu *ppriv;
10474 +       struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
10475 +       int i, err = 0, enabled;
10476 +
10477 +       err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
10478 +       if (err) {
10479 +               dev_err(dev, "dpseci_disable() failed\n");
10480 +               return err;
10481 +       }
10482 +
10483 +       err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
10484 +       if (err) {
10485 +               dev_err(dev, "dpseci_is_enabled() failed\n");
10486 +               return err;
10487 +       }
10488 +
10489 +       dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
10490 +
10491 +       for (i = 0; i < priv->num_pairs; i++) {
10492 +               ppriv = per_cpu_ptr(priv->ppriv, i);
10493 +               napi_disable(&ppriv->napi);
10494 +               netif_napi_del(&ppriv->napi);
10495 +       }
10496 +
10497 +       return 0;
10498 +}
10499 +
10500 +static struct list_head hash_list;
10501 +
10502 +static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
10503 +{
10504 +       struct device *dev;
10505 +       struct dpaa2_caam_priv *priv;
10506 +       int i, err = 0;
10507 +       bool registered = false;
10508 +
10509 +       /*
10510 +        * There is no way to get CAAM endianness - there is no direct register
10511 +        * space access and MC f/w does not provide this attribute.
10512 +        * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
10513 +        * property.
10514 +        */
10515 +       caam_little_end = true;
10516 +
10517 +       caam_imx = false;
10518 +
10519 +       dev = &dpseci_dev->dev;
10520 +
10521 +       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
10522 +       if (!priv)
10523 +               return -ENOMEM;
10524 +
10525 +       dev_set_drvdata(dev, priv);
10526 +
10527 +       priv->domain = iommu_get_domain_for_dev(dev);
10528 +
10529 +       qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
10530 +                                    0, SLAB_CACHE_DMA, NULL);
10531 +       if (!qi_cache) {
10532 +               dev_err(dev, "Can't allocate SEC cache\n");
10533 +               err = -ENOMEM;
10534 +               goto err_qicache;
10535 +       }
10536 +
10537 +       err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
10538 +       if (err) {
10539 +               dev_err(dev, "dma_set_mask_and_coherent() failed\n");
10540 +               goto err_dma_mask;
10541 +       }
10542 +
10543 +       /* Obtain a MC portal */
10544 +       err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
10545 +       if (err) {
10546 +               if (err == -ENXIO)
10547 +                       err = -EPROBE_DEFER;
10548 +               else
10549 +                       dev_err(dev, "MC portal allocation failed\n");
10550 +
10551 +               goto err_dma_mask;
10552 +       }
10553 +
10554 +       priv->ppriv = alloc_percpu(*priv->ppriv);
10555 +       if (!priv->ppriv) {
10556 +               dev_err(dev, "alloc_percpu() failed\n");
10557 +               err = -ENOMEM;
10558 +               goto err_alloc_ppriv;
10559 +       }
10560 +
10561 +       /* DPSECI initialization */
10562 +       err = dpaa2_dpseci_setup(dpseci_dev);
10563 +       if (err) {
10564 +               dev_err(dev, "dpaa2_dpseci_setup() failed\n");
10565 +               goto err_dpseci_setup;
10566 +       }
10567 +
10568 +       /* DPIO */
10569 +       err = dpaa2_dpseci_dpio_setup(priv);
10570 +       if (err) {
10571 +               if (err != -EPROBE_DEFER)
10572 +                       dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
10573 +               goto err_dpio_setup;
10574 +       }
10575 +
10576 +       /* DPSECI binding to DPIO */
10577 +       err = dpaa2_dpseci_bind(priv);
10578 +       if (err) {
10579 +               dev_err(dev, "dpaa2_dpseci_bind() failed\n");
10580 +               goto err_bind;
10581 +       }
10582 +
10583 +       /* DPSECI enable */
10584 +       err = dpaa2_dpseci_enable(priv);
10585 +       if (err) {
10586 +               dev_err(dev, "dpaa2_dpseci_enable() failed");
10587 +               goto err_bind;
10588 +       }
10589 +
10590 +       /* register crypto algorithms the device supports */
10591 +       for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
10592 +               struct caam_skcipher_alg *t_alg = driver_algs + i;
10593 +               u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
10594 +
10595 +               /* Skip DES algorithms if not supported by device */
10596 +               if (!priv->sec_attr.des_acc_num &&
10597 +                   ((alg_sel == OP_ALG_ALGSEL_3DES) ||
10598 +                    (alg_sel == OP_ALG_ALGSEL_DES)))
10599 +                       continue;
10600 +
10601 +               /* Skip AES algorithms if not supported by device */
10602 +               if (!priv->sec_attr.aes_acc_num &&
10603 +                   (alg_sel == OP_ALG_ALGSEL_AES))
10604 +                       continue;
10605 +
10606 +               /* Skip CHACHA20 algorithms if not supported by device */
10607 +               if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
10608 +                   !priv->sec_attr.ccha_acc_num)
10609 +                       continue;
10610 +
10611 +               t_alg->caam.dev = dev;
10612 +               caam_skcipher_alg_init(t_alg);
10613 +
10614 +               err = crypto_register_skcipher(&t_alg->skcipher);
10615 +               if (err) {
10616 +                       dev_warn(dev, "%s alg registration failed: %d\n",
10617 +                                t_alg->skcipher.base.cra_driver_name, err);
10618 +                       continue;
10619 +               }
10620 +
10621 +               t_alg->registered = true;
10622 +               registered = true;
10623 +       }
10624 +
10625 +       for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
10626 +               struct caam_aead_alg *t_alg = driver_aeads + i;
10627 +               u32 c1_alg_sel = t_alg->caam.class1_alg_type &
10628 +                                OP_ALG_ALGSEL_MASK;
10629 +               u32 c2_alg_sel = t_alg->caam.class2_alg_type &
10630 +                                OP_ALG_ALGSEL_MASK;
10631 +
10632 +               /* Skip DES algorithms if not supported by device */
10633 +               if (!priv->sec_attr.des_acc_num &&
10634 +                   ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
10635 +                    (c1_alg_sel == OP_ALG_ALGSEL_DES)))
10636 +                       continue;
10637 +
10638 +               /* Skip AES algorithms if not supported by device */
10639 +               if (!priv->sec_attr.aes_acc_num &&
10640 +                   (c1_alg_sel == OP_ALG_ALGSEL_AES))
10641 +                       continue;
10642 +
10643 +               /* Skip CHACHA20 algorithms if not supported by device */
10644 +               if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
10645 +                   !priv->sec_attr.ccha_acc_num)
10646 +                       continue;
10647 +
10648 +               /* Skip POLY1305 algorithms if not supported by device */
10649 +               if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
10650 +                   !priv->sec_attr.ptha_acc_num)
10651 +                       continue;
10652 +
10653 +               /*
10654 +                * Skip algorithms requiring message digests
10655 +                * if MD not supported by device.
10656 +                */
10657 +               if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
10658 +                   !priv->sec_attr.md_acc_num)
10659 +                       continue;
10660 +
10661 +               t_alg->caam.dev = dev;
10662 +               caam_aead_alg_init(t_alg);
10663 +
10664 +               err = crypto_register_aead(&t_alg->aead);
10665 +               if (err) {
10666 +                       dev_warn(dev, "%s alg registration failed: %d\n",
10667 +                                t_alg->aead.base.cra_driver_name, err);
10668 +                       continue;
10669 +               }
10670 +
10671 +               t_alg->registered = true;
10672 +               registered = true;
10673 +       }
10674 +       if (registered)
10675 +               dev_info(dev, "algorithms registered in /proc/crypto\n");
10676 +
10677 +       /* register hash algorithms the device supports */
10678 +       INIT_LIST_HEAD(&hash_list);
10679 +
10680 +       /*
10681 +        * Skip registration of any hashing algorithms if MD block
10682 +        * is not present.
10683 +        */
10684 +       if (!priv->sec_attr.md_acc_num)
10685 +               return 0;
10686 +
10687 +       for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
10688 +               struct caam_hash_alg *t_alg;
10689 +               struct caam_hash_template *alg = driver_hash + i;
10690 +
10691 +               /* register hmac version */
10692 +               t_alg = caam_hash_alloc(dev, alg, true);
10693 +               if (IS_ERR(t_alg)) {
10694 +                       err = PTR_ERR(t_alg);
10695 +                       dev_warn(dev, "%s hash alg allocation failed: %d\n",
10696 +                                alg->driver_name, err);
10697 +                       continue;
10698 +               }
10699 +
10700 +               err = crypto_register_ahash(&t_alg->ahash_alg);
10701 +               if (err) {
10702 +                       dev_warn(dev, "%s alg registration failed: %d\n",
10703 +                                t_alg->ahash_alg.halg.base.cra_driver_name,
10704 +                                err);
10705 +                       kfree(t_alg);
10706 +               } else {
10707 +                       list_add_tail(&t_alg->entry, &hash_list);
10708 +               }
10709 +
10710 +               /* register unkeyed version */
10711 +               t_alg = caam_hash_alloc(dev, alg, false);
10712 +               if (IS_ERR(t_alg)) {
10713 +                       err = PTR_ERR(t_alg);
10714 +                       dev_warn(dev, "%s alg allocation failed: %d\n",
10715 +                                alg->driver_name, err);
10716 +                       continue;
10717 +               }
10718 +
10719 +               err = crypto_register_ahash(&t_alg->ahash_alg);
10720 +               if (err) {
10721 +                       dev_warn(dev, "%s alg registration failed: %d\n",
10722 +                                t_alg->ahash_alg.halg.base.cra_driver_name,
10723 +                                err);
10724 +                       kfree(t_alg);
10725 +               } else {
10726 +                       list_add_tail(&t_alg->entry, &hash_list);
10727 +               }
10728 +       }
10729 +       if (!list_empty(&hash_list))
10730 +               dev_info(dev, "hash algorithms registered in /proc/crypto\n");
10731 +
10732 +       return err;
10733 +
10734 +err_bind:
10735 +       dpaa2_dpseci_dpio_free(priv);
10736 +err_dpio_setup:
10737 +       dpaa2_dpseci_free(priv);
10738 +err_dpseci_setup:
10739 +       free_percpu(priv->ppriv);
10740 +err_alloc_ppriv:
10741 +       fsl_mc_portal_free(priv->mc_io);
10742 +err_dma_mask:
10743 +       kmem_cache_destroy(qi_cache);
10744 +err_qicache:
10745 +       dev_set_drvdata(dev, NULL);
10746 +
10747 +       return err;
10748 +}
10749 +
10750 +static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
10751 +{
10752 +       struct device *dev;
10753 +       struct dpaa2_caam_priv *priv;
10754 +       int i;
10755 +
10756 +       dev = &ls_dev->dev;
10757 +       priv = dev_get_drvdata(dev);
10758 +
10759 +       for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
10760 +               struct caam_aead_alg *t_alg = driver_aeads + i;
10761 +
10762 +               if (t_alg->registered)
10763 +                       crypto_unregister_aead(&t_alg->aead);
10764 +       }
10765 +
10766 +       for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
10767 +               struct caam_skcipher_alg *t_alg = driver_algs + i;
10768 +
10769 +               if (t_alg->registered)
10770 +                       crypto_unregister_skcipher(&t_alg->skcipher);
10771 +       }
10772 +
10773 +       if (hash_list.next) {
10774 +               struct caam_hash_alg *t_hash_alg, *p;
10775 +
10776 +               list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
10777 +                       crypto_unregister_ahash(&t_hash_alg->ahash_alg);
10778 +                       list_del(&t_hash_alg->entry);
10779 +                       kfree(t_hash_alg);
10780 +               }
10781 +       }
10782 +
10783 +       dpaa2_dpseci_disable(priv);
10784 +       dpaa2_dpseci_dpio_free(priv);
10785 +       dpaa2_dpseci_free(priv);
10786 +       free_percpu(priv->ppriv);
10787 +       fsl_mc_portal_free(priv->mc_io);
10788 +       dev_set_drvdata(dev, NULL);
10789 +       kmem_cache_destroy(qi_cache);
10790 +
10791 +       return 0;
10792 +}
10793 +
10794 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
10795 +{
10796 +       struct dpaa2_fd fd;
10797 +       struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
10798 +       struct dpaa2_caam_priv_per_cpu *ppriv;
10799 +       int err = 0, i;
10800 +
10801 +       if (IS_ERR(req))
10802 +               return PTR_ERR(req);
10803 +
10804 +       if (priv->cscn_mem) {
10805 +               dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
10806 +                                       DPAA2_CSCN_SIZE,
10807 +                                       DMA_FROM_DEVICE);
10808 +               if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
10809 +                       dev_dbg_ratelimited(dev, "Dropping request\n");
10810 +                       return -EBUSY;
10811 +               }
10812 +       }
10813 +
10814 +       dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
10815 +
10816 +       req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
10817 +                                        DMA_BIDIRECTIONAL);
10818 +       if (dma_mapping_error(dev, req->fd_flt_dma)) {
10819 +               dev_err(dev, "DMA mapping error for QI enqueue request\n");
10820 +               goto err_out;
10821 +       }
10822 +
10823 +       memset(&fd, 0, sizeof(fd));
10824 +       dpaa2_fd_set_format(&fd, dpaa2_fd_list);
10825 +       dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
10826 +       dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
10827 +       dpaa2_fd_set_flc(&fd, req->flc_dma);
10828 +
10829 +       ppriv = this_cpu_ptr(priv->ppriv);
10830 +       for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
10831 +               err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
10832 +                                                 &fd);
10833 +               if (err != -EBUSY)
10834 +                       break;
10835 +
10836 +               cpu_relax();
10837 +       }
10838 +
10839 +       if (unlikely(err)) {
10840 +               dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
10841 +               goto err_out;
10842 +       }
10843 +
10844 +       return -EINPROGRESS;
10845 +
10846 +err_out:
10847 +       dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
10848 +                        DMA_BIDIRECTIONAL);
10849 +       return -EIO;
10850 +}
10851 +EXPORT_SYMBOL(dpaa2_caam_enqueue);
10852 +
10853 +const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
10854 +       {
10855 +               .vendor = FSL_MC_VENDOR_FREESCALE,
10856 +               .obj_type = "dpseci",
10857 +       },
10858 +       { .vendor = 0x0 }
10859 +};
10860 +
10861 +static struct fsl_mc_driver dpaa2_caam_driver = {
10862 +       .driver = {
10863 +               .name           = KBUILD_MODNAME,
10864 +               .owner          = THIS_MODULE,
10865 +       },
10866 +       .probe          = dpaa2_caam_probe,
10867 +       .remove         = dpaa2_caam_remove,
10868 +       .match_id_table = dpaa2_caam_match_id_table
10869 +};
10870 +
10871 +MODULE_LICENSE("Dual BSD/GPL");
10872 +MODULE_AUTHOR("Freescale Semiconductor, Inc");
10873 +MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
10874 +
10875 +module_fsl_mc_driver(dpaa2_caam_driver);
10876 --- /dev/null
10877 +++ b/drivers/crypto/caam/caamalg_qi2.h
10878 @@ -0,0 +1,276 @@
10879 +/*
10880 + * Copyright 2015-2016 Freescale Semiconductor Inc.
10881 + * Copyright 2017 NXP
10882 + *
10883 + * Redistribution and use in source and binary forms, with or without
10884 + * modification, are permitted provided that the following conditions are met:
10885 + *     * Redistributions of source code must retain the above copyright
10886 + *      notice, this list of conditions and the following disclaimer.
10887 + *     * Redistributions in binary form must reproduce the above copyright
10888 + *      notice, this list of conditions and the following disclaimer in the
10889 + *      documentation and/or other materials provided with the distribution.
10890 + *     * Neither the names of the above-listed copyright holders nor the
10891 + *      names of any contributors may be used to endorse or promote products
10892 + *      derived from this software without specific prior written permission.
10893 + *
10894 + *
10895 + * ALTERNATIVELY, this software may be distributed under the terms of the
10896 + * GNU General Public License ("GPL") as published by the Free Software
10897 + * Foundation, either version 2 of that License or (at your option) any
10898 + * later version.
10899 + *
10900 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
10901 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
10902 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
10903 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
10904 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
10905 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
10906 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
10907 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
10908 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
10909 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
10910 + * POSSIBILITY OF SUCH DAMAGE.
10911 + */
10912 +
10913 +#ifndef _CAAMALG_QI2_H_
10914 +#define _CAAMALG_QI2_H_
10915 +
10916 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
10917 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
10918 +#include <linux/threads.h>
10919 +#include "dpseci.h"
10920 +#include "desc_constr.h"
10921 +
10922 +#define DPAA2_CAAM_STORE_SIZE  16
10923 +/* NAPI weight *must* be a multiple of the store size. */
10924 +#define DPAA2_CAAM_NAPI_WEIGHT 64
10925 +
10926 +/* The congestion entrance threshold was chosen so that on LS2088
10927 + * we support the maximum throughput for the available memory
10928 + */
10929 +#define DPAA2_SEC_CONG_ENTRY_THRESH    (128 * 1024 * 1024)
10930 +#define DPAA2_SEC_CONG_EXIT_THRESH     (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10)
10931 +
10932 +/**
10933 + * dpaa2_caam_priv - driver private data
10934 + * @dpseci_id: DPSECI object unique ID
10935 + * @major_ver: DPSECI major version
10936 + * @minor_ver: DPSECI minor version
10937 + * @dpseci_attr: DPSECI attributes
10938 + * @sec_attr: SEC engine attributes
10939 + * @rx_queue_attr: array of Rx queue attributes
10940 + * @tx_queue_attr: array of Tx queue attributes
10941 + * @cscn_mem: pointer to memory region containing the
10942 + *     dpaa2_cscn struct; it's size is larger than
10943 + *     sizeof(struct dpaa2_cscn) to accommodate alignment
10944 + * @cscn_mem_aligned: pointer to struct dpaa2_cscn; it is computed
10945 + *     as PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
10946 + * @cscn_dma: dma address used by the QMAN to write CSCN messages
10947 + * @dev: device associated with the DPSECI object
10948 + * @mc_io: pointer to MC portal's I/O object
10949 + * @domain: IOMMU domain
10950 + * @ppriv: per CPU pointers to privata data
10951 + */
10952 +struct dpaa2_caam_priv {
10953 +       int dpsec_id;
10954 +
10955 +       u16 major_ver;
10956 +       u16 minor_ver;
10957 +
10958 +       struct dpseci_attr dpseci_attr;
10959 +       struct dpseci_sec_attr sec_attr;
10960 +       struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_MAX_QUEUE_NUM];
10961 +       struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_MAX_QUEUE_NUM];
10962 +       int num_pairs;
10963 +
10964 +       /* congestion */
10965 +       void *cscn_mem;
10966 +       void *cscn_mem_aligned;
10967 +       dma_addr_t cscn_dma;
10968 +
10969 +       struct device *dev;
10970 +       struct fsl_mc_io *mc_io;
10971 +       struct iommu_domain *domain;
10972 +
10973 +       struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
10974 +};
10975 +
10976 +/**
10977 + * dpaa2_caam_priv_per_cpu - per CPU private data
10978 + * @napi: napi structure
10979 + * @net_dev: netdev used by napi
10980 + * @req_fqid: (virtual) request (Tx / enqueue) FQID
10981 + * @rsp_fqid: (virtual) response (Rx / dequeue) FQID
10982 + * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr
10983 + * @nctx: notification context of response FQ
10984 + * @store: where dequeued frames are stored
10985 + * @priv: backpointer to dpaa2_caam_priv
10986 + * @dpio: portal used for data path operations
10987 + */
10988 +struct dpaa2_caam_priv_per_cpu {
10989 +       struct napi_struct napi;
10990 +       struct net_device net_dev;
10991 +       int req_fqid;
10992 +       int rsp_fqid;
10993 +       int prio;
10994 +       struct dpaa2_io_notification_ctx nctx;
10995 +       struct dpaa2_io_store *store;
10996 +       struct dpaa2_caam_priv *priv;
10997 +       struct dpaa2_io *dpio;
10998 +};
10999 +
11000 +/*
11001 + * The CAAM QI hardware constructs a job descriptor which points
11002 + * to shared descriptor (as pointed by context_a of FQ to CAAM).
11003 + * When the job descriptor is executed by deco, the whole job
11004 + * descriptor together with shared descriptor gets loaded in
11005 + * deco buffer which is 64 words long (each 32-bit).
11006 + *
11007 + * The job descriptor constructed by QI hardware has layout:
11008 + *
11009 + *     HEADER          (1 word)
11010 + *     Shdesc ptr      (1 or 2 words)
11011 + *     SEQ_OUT_PTR     (1 word)
11012 + *     Out ptr         (1 or 2 words)
11013 + *     Out length      (1 word)
11014 + *     SEQ_IN_PTR      (1 word)
11015 + *     In ptr          (1 or 2 words)
11016 + *     In length       (1 word)
11017 + *
11018 + * The shdesc ptr is used to fetch shared descriptor contents
11019 + * into deco buffer.
11020 + *
11021 + * Apart from shdesc contents, the total number of words that
11022 + * get loaded in deco buffer are '8' or '11'. The remaining words
11023 + * in deco buffer can be used for storing shared descriptor.
11024 + */
11025 +#define MAX_SDLEN      ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
11026 +
11027 +/* Length of a single buffer in the QI driver memory cache */
11028 +#define CAAM_QI_MEMCACHE_SIZE  512
11029 +
11030 +/*
11031 + * aead_edesc - s/w-extended aead descriptor
11032 + * @src_nents: number of segments in input scatterlist
11033 + * @dst_nents: number of segments in output scatterlist
11034 + * @iv_dma: dma address of iv for checking continuity and link table
11035 + * @qm_sg_bytes: length of dma mapped h/w link table
11036 + * @qm_sg_dma: bus physical mapped address of h/w link table
11037 + * @assoclen: associated data length, in CAAM endianness
11038 + * @assoclen_dma: bus physical mapped address of req->assoclen
11039 + * @sgt: the h/w link table, followed by IV
11040 + */
11041 +struct aead_edesc {
11042 +       int src_nents;
11043 +       int dst_nents;
11044 +       dma_addr_t iv_dma;
11045 +       int qm_sg_bytes;
11046 +       dma_addr_t qm_sg_dma;
11047 +       unsigned int assoclen;
11048 +       dma_addr_t assoclen_dma;
11049 +       struct dpaa2_sg_entry sgt[0];
11050 +};
11051 +
11052 +/*
11053 + * tls_edesc - s/w-extended tls descriptor
11054 + * @src_nents: number of segments in input scatterlist
11055 + * @dst_nents: number of segments in output scatterlist
11056 + * @iv_dma: dma address of iv for checking continuity and link table
11057 + * @qm_sg_bytes: length of dma mapped h/w link table
11058 + * @qm_sg_dma: bus physical mapped address of h/w link table
11059 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
11060 + * @dst: pointer to output scatterlist, usefull for unmapping
11061 + * @sgt: the h/w link table, followed by IV
11062 + */
11063 +struct tls_edesc {
11064 +       int src_nents;
11065 +       int dst_nents;
11066 +       dma_addr_t iv_dma;
11067 +       int qm_sg_bytes;
11068 +       dma_addr_t qm_sg_dma;
11069 +       struct scatterlist tmp[2];
11070 +       struct scatterlist *dst;
11071 +       struct dpaa2_sg_entry sgt[0];
11072 +};
11073 +
11074 +/*
11075 + * skcipher_edesc - s/w-extended skcipher descriptor
11076 + * @src_nents: number of segments in input scatterlist
11077 + * @dst_nents: number of segments in output scatterlist
11078 + * @iv_dma: dma address of iv for checking continuity and link table
11079 + * @qm_sg_bytes: length of dma mapped qm_sg space
11080 + * @qm_sg_dma: I/O virtual address of h/w link table
11081 + * @sgt: the h/w link table, followed by IV
11082 + */
11083 +struct skcipher_edesc {
11084 +       int src_nents;
11085 +       int dst_nents;
11086 +       dma_addr_t iv_dma;
11087 +       int qm_sg_bytes;
11088 +       dma_addr_t qm_sg_dma;
11089 +       struct dpaa2_sg_entry sgt[0];
11090 +};
11091 +
11092 +/*
11093 + * ahash_edesc - s/w-extended ahash descriptor
11094 + * @dst_dma: I/O virtual address of req->result
11095 + * @qm_sg_dma: I/O virtual address of h/w link table
11096 + * @src_nents: number of segments in input scatterlist
11097 + * @qm_sg_bytes: length of dma mapped qm_sg space
11098 + * @sgt: pointer to h/w link table
11099 + */
11100 +struct ahash_edesc {
11101 +       dma_addr_t dst_dma;
11102 +       dma_addr_t qm_sg_dma;
11103 +       int src_nents;
11104 +       int qm_sg_bytes;
11105 +       struct dpaa2_sg_entry sgt[0];
11106 +};
11107 +
11108 +/**
11109 + * caam_flc - Flow Context (FLC)
11110 + * @flc: Flow Context options
11111 + * @sh_desc: Shared Descriptor
11112 + */
11113 +struct caam_flc {
11114 +       u32 flc[16];
11115 +       u32 sh_desc[MAX_SDLEN];
11116 +} ____cacheline_aligned;
11117 +
11118 +enum optype {
11119 +       ENCRYPT = 0,
11120 +       DECRYPT,
11121 +       NUM_OP
11122 +};
11123 +
11124 +/**
11125 + * caam_request - the request structure the driver application should fill while
11126 + *                submitting a job to driver.
11127 + * @fd_flt: Frame list table defining input and output
11128 + *          fd_flt[0] - FLE pointing to output buffer
11129 + *          fd_flt[1] - FLE pointing to input buffer
11130 + * @fd_flt_dma: DMA address for the frame list table
11131 + * @flc: Flow Context
11132 + * @flc_dma: I/O virtual address of Flow Context
11133 + * @cbk: Callback function to invoke when job is completed
11134 + * @ctx: arbit context attached with request by the application
11135 + * @edesc: extended descriptor; points to one of {skcipher,aead}_edesc
11136 + */
11137 +struct caam_request {
11138 +       struct dpaa2_fl_entry fd_flt[2];
11139 +       dma_addr_t fd_flt_dma;
11140 +       struct caam_flc *flc;
11141 +       dma_addr_t flc_dma;
11142 +       void (*cbk)(void *ctx, u32 err);
11143 +       void *ctx;
11144 +       void *edesc;
11145 +};
11146 +
11147 +/**
11148 + * dpaa2_caam_enqueue() - enqueue a crypto request
11149 + * @dev: device associated with the DPSECI object
11150 + * @req: pointer to caam_request
11151 + */
11152 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
11153 +
11154 +#endif /* _CAAMALG_QI2_H_ */
11155 --- a/drivers/crypto/caam/caamhash.c
11156 +++ b/drivers/crypto/caam/caamhash.c
11157 @@ -2,6 +2,7 @@
11158   * caam - Freescale FSL CAAM support for ahash functions of crypto API
11159   *
11160   * Copyright 2011 Freescale Semiconductor, Inc.
11161 + * Copyright 2018 NXP
11162   *
11163   * Based on caamalg.c crypto API driver.
11164   *
11165 @@ -62,6 +63,7 @@
11166  #include "error.h"
11167  #include "sg_sw_sec4.h"
11168  #include "key_gen.h"
11169 +#include "caamhash_desc.h"
11170  
11171  #define CAAM_CRA_PRIORITY              3000
11172  
11173 @@ -71,14 +73,6 @@
11174  #define CAAM_MAX_HASH_BLOCK_SIZE       SHA512_BLOCK_SIZE
11175  #define CAAM_MAX_HASH_DIGEST_SIZE      SHA512_DIGEST_SIZE
11176  
11177 -/* length of descriptors text */
11178 -#define DESC_AHASH_BASE                        (3 * CAAM_CMD_SZ)
11179 -#define DESC_AHASH_UPDATE_LEN          (6 * CAAM_CMD_SZ)
11180 -#define DESC_AHASH_UPDATE_FIRST_LEN    (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
11181 -#define DESC_AHASH_FINAL_LEN           (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
11182 -#define DESC_AHASH_FINUP_LEN           (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
11183 -#define DESC_AHASH_DIGEST_LEN          (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
11184 -
11185  #define DESC_HASH_MAX_USED_BYTES       (DESC_AHASH_FINAL_LEN + \
11186                                          CAAM_MAX_HASH_KEY_SIZE)
11187  #define DESC_HASH_MAX_USED_LEN         (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
11188 @@ -107,6 +101,7 @@ struct caam_hash_ctx {
11189         dma_addr_t sh_desc_update_first_dma;
11190         dma_addr_t sh_desc_fin_dma;
11191         dma_addr_t sh_desc_digest_dma;
11192 +       enum dma_data_direction dir;
11193         struct device *jrdev;
11194         u8 key[CAAM_MAX_HASH_KEY_SIZE];
11195         int ctx_len;
11196 @@ -218,7 +213,7 @@ static inline int buf_map_to_sec4_sg(str
11197  }
11198  
11199  /* Map state->caam_ctx, and add it to link table */
11200 -static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
11201 +static inline int ctx_map_to_sec4_sg(struct device *jrdev,
11202                                      struct caam_hash_state *state, int ctx_len,
11203                                      struct sec4_sg_entry *sec4_sg, u32 flag)
11204  {
11205 @@ -234,68 +229,22 @@ static inline int ctx_map_to_sec4_sg(u32
11206         return 0;
11207  }
11208  
11209 -/*
11210 - * For ahash update, final and finup (import_ctx = true)
11211 - *     import context, read and write to seqout
11212 - * For ahash firsts and digest (import_ctx = false)
11213 - *     read and write to seqout
11214 - */
11215 -static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
11216 -                                    struct caam_hash_ctx *ctx, bool import_ctx)
11217 -{
11218 -       u32 op = ctx->adata.algtype;
11219 -       u32 *skip_key_load;
11220 -
11221 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
11222 -
11223 -       /* Append key if it has been set; ahash update excluded */
11224 -       if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
11225 -               /* Skip key loading if already shared */
11226 -               skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11227 -                                           JUMP_COND_SHRD);
11228 -
11229 -               append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
11230 -                                 ctx->adata.keylen, CLASS_2 |
11231 -                                 KEY_DEST_MDHA_SPLIT | KEY_ENC);
11232 -
11233 -               set_jump_tgt_here(desc, skip_key_load);
11234 -
11235 -               op |= OP_ALG_AAI_HMAC_PRECOMP;
11236 -       }
11237 -
11238 -       /* If needed, import context from software */
11239 -       if (import_ctx)
11240 -               append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
11241 -                               LDST_SRCDST_BYTE_CONTEXT);
11242 -
11243 -       /* Class 2 operation */
11244 -       append_operation(desc, op | state | OP_ALG_ENCRYPT);
11245 -
11246 -       /*
11247 -        * Load from buf and/or src and write to req->result or state->context
11248 -        * Calculate remaining bytes to read
11249 -        */
11250 -       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11251 -       /* Read remaining bytes */
11252 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
11253 -                            FIFOLD_TYPE_MSG | KEY_VLF);
11254 -       /* Store class2 context bytes */
11255 -       append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
11256 -                        LDST_SRCDST_BYTE_CONTEXT);
11257 -}
11258 -
11259  static int ahash_set_sh_desc(struct crypto_ahash *ahash)
11260  {
11261         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
11262         int digestsize = crypto_ahash_digestsize(ahash);
11263         struct device *jrdev = ctx->jrdev;
11264 +       struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
11265         u32 *desc;
11266  
11267 +       ctx->adata.key_virt = ctx->key;
11268 +
11269         /* ahash_update shared descriptor */
11270         desc = ctx->sh_desc_update;
11271 -       ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
11272 +       cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
11273 +                         ctx->ctx_len, true, ctrlpriv->era);
11274         dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
11275 -                                  desc_bytes(desc), DMA_TO_DEVICE);
11276 +                                  desc_bytes(desc), ctx->dir);
11277  #ifdef DEBUG
11278         print_hex_dump(KERN_ERR,
11279                        "ahash update shdesc@"__stringify(__LINE__)": ",
11280 @@ -304,9 +253,10 @@ static int ahash_set_sh_desc(struct cryp
11281  
11282         /* ahash_update_first shared descriptor */
11283         desc = ctx->sh_desc_update_first;
11284 -       ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
11285 +       cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
11286 +                         ctx->ctx_len, false, ctrlpriv->era);
11287         dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
11288 -                                  desc_bytes(desc), DMA_TO_DEVICE);
11289 +                                  desc_bytes(desc), ctx->dir);
11290  #ifdef DEBUG
11291         print_hex_dump(KERN_ERR,
11292                        "ahash update first shdesc@"__stringify(__LINE__)": ",
11293 @@ -315,9 +265,10 @@ static int ahash_set_sh_desc(struct cryp
11294  
11295         /* ahash_final shared descriptor */
11296         desc = ctx->sh_desc_fin;
11297 -       ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
11298 +       cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
11299 +                         ctx->ctx_len, true, ctrlpriv->era);
11300         dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
11301 -                                  desc_bytes(desc), DMA_TO_DEVICE);
11302 +                                  desc_bytes(desc), ctx->dir);
11303  #ifdef DEBUG
11304         print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
11305                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
11306 @@ -326,9 +277,10 @@ static int ahash_set_sh_desc(struct cryp
11307  
11308         /* ahash_digest shared descriptor */
11309         desc = ctx->sh_desc_digest;
11310 -       ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
11311 +       cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
11312 +                         ctx->ctx_len, false, ctrlpriv->era);
11313         dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
11314 -                                  desc_bytes(desc), DMA_TO_DEVICE);
11315 +                                  desc_bytes(desc), ctx->dir);
11316  #ifdef DEBUG
11317         print_hex_dump(KERN_ERR,
11318                        "ahash digest shdesc@"__stringify(__LINE__)": ",
11319 @@ -421,6 +373,7 @@ static int ahash_setkey(struct crypto_ah
11320         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
11321         int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
11322         int digestsize = crypto_ahash_digestsize(ahash);
11323 +       struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
11324         int ret;
11325         u8 *hashed_key = NULL;
11326  
11327 @@ -441,16 +394,26 @@ static int ahash_setkey(struct crypto_ah
11328                 key = hashed_key;
11329         }
11330  
11331 -       ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
11332 -                           CAAM_MAX_HASH_KEY_SIZE);
11333 -       if (ret)
11334 -               goto bad_free_key;
11335 +       /*
11336 +        * If DKP is supported, use it in the shared descriptor to generate
11337 +        * the split key.
11338 +        */
11339 +       if (ctrlpriv->era >= 6) {
11340 +               ctx->adata.key_inline = true;
11341 +               ctx->adata.keylen = keylen;
11342 +               ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
11343 +                                                     OP_ALG_ALGSEL_MASK);
11344  
11345 -#ifdef DEBUG
11346 -       print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
11347 -                      DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
11348 -                      ctx->adata.keylen_pad, 1);
11349 -#endif
11350 +               if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
11351 +                       goto bad_free_key;
11352 +
11353 +               memcpy(ctx->key, key, keylen);
11354 +       } else {
11355 +               ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
11356 +                                   keylen, CAAM_MAX_HASH_KEY_SIZE);
11357 +               if (ret)
11358 +                       goto bad_free_key;
11359 +       }
11360  
11361         kfree(hashed_key);
11362         return ahash_set_sh_desc(ahash);
11363 @@ -773,7 +736,7 @@ static int ahash_update_ctx(struct ahash
11364                 edesc->src_nents = src_nents;
11365                 edesc->sec4_sg_bytes = sec4_sg_bytes;
11366  
11367 -               ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
11368 +               ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
11369                                          edesc->sec4_sg, DMA_BIDIRECTIONAL);
11370                 if (ret)
11371                         goto unmap_ctx;
11372 @@ -871,9 +834,8 @@ static int ahash_final_ctx(struct ahash_
11373         desc = edesc->hw_desc;
11374  
11375         edesc->sec4_sg_bytes = sec4_sg_bytes;
11376 -       edesc->src_nents = 0;
11377  
11378 -       ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
11379 +       ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
11380                                  edesc->sec4_sg, DMA_TO_DEVICE);
11381         if (ret)
11382                 goto unmap_ctx;
11383 @@ -967,7 +929,7 @@ static int ahash_finup_ctx(struct ahash_
11384  
11385         edesc->src_nents = src_nents;
11386  
11387 -       ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
11388 +       ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
11389                                  edesc->sec4_sg, DMA_TO_DEVICE);
11390         if (ret)
11391                 goto unmap_ctx;
11392 @@ -1126,7 +1088,6 @@ static int ahash_final_no_ctx(struct aha
11393                 dev_err(jrdev, "unable to map dst\n");
11394                 goto unmap;
11395         }
11396 -       edesc->src_nents = 0;
11397  
11398  #ifdef DEBUG
11399         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
11400 @@ -1208,7 +1169,6 @@ static int ahash_update_no_ctx(struct ah
11401  
11402                 edesc->src_nents = src_nents;
11403                 edesc->sec4_sg_bytes = sec4_sg_bytes;
11404 -               edesc->dst_dma = 0;
11405  
11406                 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
11407                 if (ret)
11408 @@ -1420,7 +1380,6 @@ static int ahash_update_first(struct aha
11409                 }
11410  
11411                 edesc->src_nents = src_nents;
11412 -               edesc->dst_dma = 0;
11413  
11414                 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
11415                                           to_hash);
11416 @@ -1722,6 +1681,7 @@ static int caam_hash_cra_init(struct cry
11417                                          HASH_MSG_LEN + 64,
11418                                          HASH_MSG_LEN + SHA512_DIGEST_SIZE };
11419         dma_addr_t dma_addr;
11420 +       struct caam_drv_private *priv;
11421  
11422         /*
11423          * Get a Job ring from Job Ring driver to ensure in-order
11424 @@ -1733,10 +1693,13 @@ static int caam_hash_cra_init(struct cry
11425                 return PTR_ERR(ctx->jrdev);
11426         }
11427  
11428 +       priv = dev_get_drvdata(ctx->jrdev->parent);
11429 +       ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
11430 +
11431         dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
11432                                         offsetof(struct caam_hash_ctx,
11433                                                  sh_desc_update_dma),
11434 -                                       DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
11435 +                                       ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
11436         if (dma_mapping_error(ctx->jrdev, dma_addr)) {
11437                 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
11438                 caam_jr_free(ctx->jrdev);
11439 @@ -1771,11 +1734,11 @@ static void caam_hash_cra_exit(struct cr
11440         dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
11441                                offsetof(struct caam_hash_ctx,
11442                                         sh_desc_update_dma),
11443 -                              DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
11444 +                              ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
11445         caam_jr_free(ctx->jrdev);
11446  }
11447  
11448 -static void __exit caam_algapi_hash_exit(void)
11449 +void caam_algapi_hash_exit(void)
11450  {
11451         struct caam_hash_alg *t_alg, *n;
11452  
11453 @@ -1834,56 +1797,38 @@ caam_hash_alloc(struct caam_hash_templat
11454         return t_alg;
11455  }
11456  
11457 -static int __init caam_algapi_hash_init(void)
11458 +int caam_algapi_hash_init(struct device *ctrldev)
11459  {
11460 -       struct device_node *dev_node;
11461 -       struct platform_device *pdev;
11462 -       struct device *ctrldev;
11463         int i = 0, err = 0;
11464 -       struct caam_drv_private *priv;
11465 +       struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
11466         unsigned int md_limit = SHA512_DIGEST_SIZE;
11467 -       u32 cha_inst, cha_vid;
11468 -
11469 -       dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
11470 -       if (!dev_node) {
11471 -               dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
11472 -               if (!dev_node)
11473 -                       return -ENODEV;
11474 -       }
11475 -
11476 -       pdev = of_find_device_by_node(dev_node);
11477 -       if (!pdev) {
11478 -               of_node_put(dev_node);
11479 -               return -ENODEV;
11480 -       }
11481 -
11482 -       ctrldev = &pdev->dev;
11483 -       priv = dev_get_drvdata(ctrldev);
11484 -       of_node_put(dev_node);
11485 -
11486 -       /*
11487 -        * If priv is NULL, it's probably because the caam driver wasn't
11488 -        * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
11489 -        */
11490 -       if (!priv)
11491 -               return -ENODEV;
11492 +       u32 md_inst, md_vid;
11493  
11494         /*
11495          * Register crypto algorithms the device supports.  First, identify
11496          * presence and attributes of MD block.
11497          */
11498 -       cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
11499 -       cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
11500 +       if (priv->era < 10) {
11501 +               md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
11502 +                         CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
11503 +               md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
11504 +                          CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
11505 +       } else {
11506 +               u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
11507 +
11508 +               md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
11509 +               md_inst = mdha & CHA_VER_NUM_MASK;
11510 +       }
11511  
11512         /*
11513          * Skip registration of any hashing algorithms if MD block
11514          * is not present.
11515          */
11516 -       if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
11517 +       if (!md_inst)
11518                 return -ENODEV;
11519  
11520         /* Limit digest size based on LP256 */
11521 -       if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
11522 +       if (md_vid == CHA_VER_VID_MD_LP256)
11523                 md_limit = SHA256_DIGEST_SIZE;
11524  
11525         INIT_LIST_HEAD(&hash_list);
11526 @@ -1934,10 +1879,3 @@ static int __init caam_algapi_hash_init(
11527  
11528         return err;
11529  }
11530 -
11531 -module_init(caam_algapi_hash_init);
11532 -module_exit(caam_algapi_hash_exit);
11533 -
11534 -MODULE_LICENSE("GPL");
11535 -MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
11536 -MODULE_AUTHOR("Freescale Semiconductor - NMG");
11537 --- /dev/null
11538 +++ b/drivers/crypto/caam/caamhash_desc.c
11539 @@ -0,0 +1,108 @@
11540 +/*
11541 + * Shared descriptors for ahash algorithms
11542 + *
11543 + * Copyright 2017 NXP
11544 + *
11545 + * Redistribution and use in source and binary forms, with or without
11546 + * modification, are permitted provided that the following conditions are met:
11547 + *     * Redistributions of source code must retain the above copyright
11548 + *      notice, this list of conditions and the following disclaimer.
11549 + *     * Redistributions in binary form must reproduce the above copyright
11550 + *      notice, this list of conditions and the following disclaimer in the
11551 + *      documentation and/or other materials provided with the distribution.
11552 + *     * Neither the names of the above-listed copyright holders nor the
11553 + *      names of any contributors may be used to endorse or promote products
11554 + *      derived from this software without specific prior written permission.
11555 + *
11556 + *
11557 + * ALTERNATIVELY, this software may be distributed under the terms of the
11558 + * GNU General Public License ("GPL") as published by the Free Software
11559 + * Foundation, either version 2 of that License or (at your option) any
11560 + * later version.
11561 + *
11562 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
11563 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
11564 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
11565 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
11566 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
11567 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
11568 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
11569 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
11570 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
11571 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
11572 + * POSSIBILITY OF SUCH DAMAGE.
11573 + */
11574 +
11575 +#include "compat.h"
11576 +#include "desc_constr.h"
11577 +#include "caamhash_desc.h"
11578 +
11579 +/**
11580 + * cnstr_shdsc_ahash - ahash shared descriptor
11581 + * @desc: pointer to buffer used for descriptor construction
11582 + * @adata: pointer to authentication transform definitions.
11583 + *         A split key is required for SEC Era < 6; the size of the split key
11584 + *         is specified in this case.
11585 + *         Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
11586 + *         SHA256, SHA384, SHA512}.
11587 + * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
11588 + * @digestsize: algorithm's digest size
11589 + * @ctx_len: size of Context Register
11590 + * @import_ctx: true if previous Context Register needs to be restored
11591 + *              must be true for ahash update and final
11592 + *              must be false for for ahash first and digest
11593 + * @era: SEC Era
11594 + */
11595 +void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
11596 +                      int digestsize, int ctx_len, bool import_ctx, int era)
11597 +{
11598 +       u32 op = adata->algtype;
11599 +
11600 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
11601 +
11602 +       /* Append key if it has been set; ahash update excluded */
11603 +       if (state != OP_ALG_AS_UPDATE && adata->keylen) {
11604 +               u32 *skip_key_load;
11605 +
11606 +               /* Skip key loading if already shared */
11607 +               skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11608 +                                           JUMP_COND_SHRD);
11609 +
11610 +               if (era < 6)
11611 +                       append_key_as_imm(desc, adata->key_virt,
11612 +                                         adata->keylen_pad,
11613 +                                         adata->keylen, CLASS_2 |
11614 +                                         KEY_DEST_MDHA_SPLIT | KEY_ENC);
11615 +               else
11616 +                       append_proto_dkp(desc, adata);
11617 +
11618 +               set_jump_tgt_here(desc, skip_key_load);
11619 +
11620 +               op |= OP_ALG_AAI_HMAC_PRECOMP;
11621 +       }
11622 +
11623 +       /* If needed, import context from software */
11624 +       if (import_ctx)
11625 +               append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB |
11626 +                               LDST_SRCDST_BYTE_CONTEXT);
11627 +
11628 +       /* Class 2 operation */
11629 +       append_operation(desc, op | state | OP_ALG_ENCRYPT);
11630 +
11631 +       /*
11632 +        * Load from buf and/or src and write to req->result or state->context
11633 +        * Calculate remaining bytes to read
11634 +        */
11635 +       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11636 +       /* Read remaining bytes */
11637 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
11638 +                            FIFOLD_TYPE_MSG | KEY_VLF);
11639 +       /* Store class2 context bytes */
11640 +       append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
11641 +                        LDST_SRCDST_BYTE_CONTEXT);
11642 +}
11643 +EXPORT_SYMBOL(cnstr_shdsc_ahash);
11644 +
11645 +MODULE_LICENSE("Dual BSD/GPL");
11646 +MODULE_DESCRIPTION("FSL CAAM ahash descriptors support");
11647 +MODULE_AUTHOR("NXP Semiconductors");
11648 --- /dev/null
11649 +++ b/drivers/crypto/caam/caamhash_desc.h
11650 @@ -0,0 +1,49 @@
11651 +/*
11652 + * Shared descriptors for ahash algorithms
11653 + *
11654 + * Copyright 2017 NXP
11655 + *
11656 + * Redistribution and use in source and binary forms, with or without
11657 + * modification, are permitted provided that the following conditions are met:
11658 + *     * Redistributions of source code must retain the above copyright
11659 + *      notice, this list of conditions and the following disclaimer.
11660 + *     * Redistributions in binary form must reproduce the above copyright
11661 + *      notice, this list of conditions and the following disclaimer in the
11662 + *      documentation and/or other materials provided with the distribution.
11663 + *     * Neither the names of the above-listed copyright holders nor the
11664 + *      names of any contributors may be used to endorse or promote products
11665 + *      derived from this software without specific prior written permission.
11666 + *
11667 + *
11668 + * ALTERNATIVELY, this software may be distributed under the terms of the
11669 + * GNU General Public License ("GPL") as published by the Free Software
11670 + * Foundation, either version 2 of that License or (at your option) any
11671 + * later version.
11672 + *
11673 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
11674 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
11675 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
11676 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
11677 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
11678 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
11679 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
11680 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
11681 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
11682 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
11683 + * POSSIBILITY OF SUCH DAMAGE.
11684 + */
11685 +
11686 +#ifndef _CAAMHASH_DESC_H_
11687 +#define _CAAMHASH_DESC_H_
11688 +
11689 +/* length of descriptors text */
11690 +#define DESC_AHASH_BASE                        (3 * CAAM_CMD_SZ)
11691 +#define DESC_AHASH_UPDATE_LEN          (6 * CAAM_CMD_SZ)
11692 +#define DESC_AHASH_UPDATE_FIRST_LEN    (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
11693 +#define DESC_AHASH_FINAL_LEN           (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
11694 +#define DESC_AHASH_DIGEST_LEN          (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
11695 +
11696 +void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
11697 +                      int digestsize, int ctx_len, bool import_ctx, int era);
11698 +
11699 +#endif /* _CAAMHASH_DESC_H_ */
11700 --- a/drivers/crypto/caam/caampkc.c
11701 +++ b/drivers/crypto/caam/caampkc.c
11702 @@ -2,6 +2,7 @@
11703   * caam - Freescale FSL CAAM support for Public Key Cryptography
11704   *
11705   * Copyright 2016 Freescale Semiconductor, Inc.
11706 + * Copyright 2018 NXP
11707   *
11708   * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
11709   * all the desired key parameters, input and output pointers.
11710 @@ -1017,46 +1018,22 @@ static struct akcipher_alg caam_rsa = {
11711  };
11712  
11713  /* Public Key Cryptography module initialization handler */
11714 -static int __init caam_pkc_init(void)
11715 +int caam_pkc_init(struct device *ctrldev)
11716  {
11717 -       struct device_node *dev_node;
11718 -       struct platform_device *pdev;
11719 -       struct device *ctrldev;
11720 -       struct caam_drv_private *priv;
11721 -       u32 cha_inst, pk_inst;
11722 +       struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
11723 +       u32 pk_inst;
11724         int err;
11725  
11726 -       dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
11727 -       if (!dev_node) {
11728 -               dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
11729 -               if (!dev_node)
11730 -                       return -ENODEV;
11731 -       }
11732 -
11733 -       pdev = of_find_device_by_node(dev_node);
11734 -       if (!pdev) {
11735 -               of_node_put(dev_node);
11736 -               return -ENODEV;
11737 -       }
11738 -
11739 -       ctrldev = &pdev->dev;
11740 -       priv = dev_get_drvdata(ctrldev);
11741 -       of_node_put(dev_node);
11742 -
11743 -       /*
11744 -        * If priv is NULL, it's probably because the caam driver wasn't
11745 -        * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
11746 -        */
11747 -       if (!priv)
11748 -               return -ENODEV;
11749 -
11750         /* Determine public key hardware accelerator presence. */
11751 -       cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
11752 -       pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
11753 +       if (priv->era < 10)
11754 +               pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
11755 +                          CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
11756 +       else
11757 +               pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
11758  
11759         /* Do not register algorithms if PKHA is not present. */
11760         if (!pk_inst)
11761 -               return -ENODEV;
11762 +               return 0;
11763  
11764         err = crypto_register_akcipher(&caam_rsa);
11765         if (err)
11766 @@ -1068,14 +1045,7 @@ static int __init caam_pkc_init(void)
11767         return err;
11768  }
11769  
11770 -static void __exit caam_pkc_exit(void)
11771 +void caam_pkc_exit(void)
11772  {
11773         crypto_unregister_akcipher(&caam_rsa);
11774  }
11775 -
11776 -module_init(caam_pkc_init);
11777 -module_exit(caam_pkc_exit);
11778 -
11779 -MODULE_LICENSE("Dual BSD/GPL");
11780 -MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
11781 -MODULE_AUTHOR("Freescale Semiconductor");
11782 --- a/drivers/crypto/caam/caamrng.c
11783 +++ b/drivers/crypto/caam/caamrng.c
11784 @@ -2,6 +2,7 @@
11785   * caam - Freescale FSL CAAM support for hw_random
11786   *
11787   * Copyright 2011 Freescale Semiconductor, Inc.
11788 + * Copyright 2018 NXP
11789   *
11790   * Based on caamalg.c crypto API driver.
11791   *
11792 @@ -294,49 +295,29 @@ static struct hwrng caam_rng = {
11793         .read           = caam_read,
11794  };
11795  
11796 -static void __exit caam_rng_exit(void)
11797 +void caam_rng_exit(void)
11798  {
11799         caam_jr_free(rng_ctx->jrdev);
11800         hwrng_unregister(&caam_rng);
11801         kfree(rng_ctx);
11802  }
11803  
11804 -static int __init caam_rng_init(void)
11805 +int caam_rng_init(struct device *ctrldev)
11806  {
11807         struct device *dev;
11808 -       struct device_node *dev_node;
11809 -       struct platform_device *pdev;
11810 -       struct device *ctrldev;
11811 -       struct caam_drv_private *priv;
11812 +       u32 rng_inst;
11813 +       struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
11814         int err;
11815  
11816 -       dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
11817 -       if (!dev_node) {
11818 -               dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
11819 -               if (!dev_node)
11820 -                       return -ENODEV;
11821 -       }
11822 -
11823 -       pdev = of_find_device_by_node(dev_node);
11824 -       if (!pdev) {
11825 -               of_node_put(dev_node);
11826 -               return -ENODEV;
11827 -       }
11828 -
11829 -       ctrldev = &pdev->dev;
11830 -       priv = dev_get_drvdata(ctrldev);
11831 -       of_node_put(dev_node);
11832 -
11833 -       /*
11834 -        * If priv is NULL, it's probably because the caam driver wasn't
11835 -        * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
11836 -        */
11837 -       if (!priv)
11838 -               return -ENODEV;
11839 -
11840         /* Check for an instantiated RNG before registration */
11841 -       if (!(rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & CHA_ID_LS_RNG_MASK))
11842 -               return -ENODEV;
11843 +       if (priv->era < 10)
11844 +               rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
11845 +                           CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
11846 +       else
11847 +               rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
11848 +
11849 +       if (!rng_inst)
11850 +               return 0;
11851  
11852         dev = caam_jr_alloc();
11853         if (IS_ERR(dev)) {
11854 @@ -361,10 +342,3 @@ free_caam_alloc:
11855         caam_jr_free(dev);
11856         return err;
11857  }
11858 -
11859 -module_init(caam_rng_init);
11860 -module_exit(caam_rng_exit);
11861 -
11862 -MODULE_LICENSE("GPL");
11863 -MODULE_DESCRIPTION("FSL CAAM support for hw_random API");
11864 -MODULE_AUTHOR("Freescale Semiconductor - NMG");
11865 --- a/drivers/crypto/caam/compat.h
11866 +++ b/drivers/crypto/caam/compat.h
11867 @@ -17,6 +17,7 @@
11868  #include <linux/of_platform.h>
11869  #include <linux/dma-mapping.h>
11870  #include <linux/io.h>
11871 +#include <linux/iommu.h>
11872  #include <linux/spinlock.h>
11873  #include <linux/rtnetlink.h>
11874  #include <linux/in.h>
11875 @@ -34,10 +35,13 @@
11876  #include <crypto/des.h>
11877  #include <crypto/sha.h>
11878  #include <crypto/md5.h>
11879 +#include <crypto/chacha20.h>
11880 +#include <crypto/poly1305.h>
11881  #include <crypto/internal/aead.h>
11882  #include <crypto/authenc.h>
11883  #include <crypto/akcipher.h>
11884  #include <crypto/scatterwalk.h>
11885 +#include <crypto/skcipher.h>
11886  #include <crypto/internal/skcipher.h>
11887  #include <crypto/internal/hash.h>
11888  #include <crypto/internal/rsa.h>
11889 --- a/drivers/crypto/caam/ctrl.c
11890 +++ b/drivers/crypto/caam/ctrl.c
11891 @@ -2,6 +2,7 @@
11892   * Controller-level driver, kernel property detection, initialization
11893   *
11894   * Copyright 2008-2012 Freescale Semiconductor, Inc.
11895 + * Copyright 2018 NXP
11896   */
11897  
11898  #include <linux/device.h>
11899 @@ -16,17 +17,15 @@
11900  #include "desc_constr.h"
11901  #include "ctrl.h"
11902  
11903 -bool caam_little_end;
11904 -EXPORT_SYMBOL(caam_little_end);
11905  bool caam_dpaa2;
11906  EXPORT_SYMBOL(caam_dpaa2);
11907 -bool caam_imx;
11908 -EXPORT_SYMBOL(caam_imx);
11909  
11910  #ifdef CONFIG_CAAM_QI
11911  #include "qi.h"
11912  #endif
11913  
11914 +static struct platform_device *caam_dma_dev;
11915 +
11916  /*
11917   * i.MX targets tend to have clock control subsystems that can
11918   * enable/disable clocking to our device.
11919 @@ -105,7 +104,7 @@ static inline int run_descriptor_deco0(s
11920         struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
11921         struct caam_deco __iomem *deco = ctrlpriv->deco;
11922         unsigned int timeout = 100000;
11923 -       u32 deco_dbg_reg, flags;
11924 +       u32 deco_dbg_reg, deco_state, flags;
11925         int i;
11926  
11927  
11928 @@ -148,13 +147,22 @@ static inline int run_descriptor_deco0(s
11929         timeout = 10000000;
11930         do {
11931                 deco_dbg_reg = rd_reg32(&deco->desc_dbg);
11932 +
11933 +               if (ctrlpriv->era < 10)
11934 +                       deco_state = (deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) >>
11935 +                                    DESC_DBG_DECO_STAT_SHIFT;
11936 +               else
11937 +                       deco_state = (rd_reg32(&deco->dbg_exec) &
11938 +                                     DESC_DER_DECO_STAT_MASK) >>
11939 +                                    DESC_DER_DECO_STAT_SHIFT;
11940 +
11941                 /*
11942                  * If an error occured in the descriptor, then
11943                  * the DECO status field will be set to 0x0D
11944                  */
11945 -               if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
11946 -                   DESC_DBG_DECO_STAT_HOST_ERR)
11947 +               if (deco_state == DECO_STAT_HOST_ERR)
11948                         break;
11949 +
11950                 cpu_relax();
11951         } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
11952  
11953 @@ -316,15 +324,15 @@ static int caam_remove(struct platform_d
11954         of_platform_depopulate(ctrldev);
11955  
11956  #ifdef CONFIG_CAAM_QI
11957 -       if (ctrlpriv->qidev)
11958 -               caam_qi_shutdown(ctrlpriv->qidev);
11959 +       if (ctrlpriv->qi_init)
11960 +               caam_qi_shutdown(ctrldev);
11961  #endif
11962  
11963         /*
11964          * De-initialize RNG state handles initialized by this driver.
11965 -        * In case of DPAA 2.x, RNG is managed by MC firmware.
11966 +        * In case of SoCs with Management Complex, RNG is managed by MC f/w.
11967          */
11968 -       if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
11969 +       if (!ctrlpriv->mc_en && ctrlpriv->rng4_sh_init)
11970                 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
11971  
11972         /* Shut down debug views */
11973 @@ -332,6 +340,9 @@ static int caam_remove(struct platform_d
11974         debugfs_remove_recursive(ctrlpriv->dfs_root);
11975  #endif
11976  
11977 +       if (caam_dma_dev)
11978 +               platform_device_unregister(caam_dma_dev);
11979 +
11980         /* Unmap controller region */
11981         iounmap(ctrl);
11982  
11983 @@ -433,6 +444,10 @@ static int caam_probe(struct platform_de
11984                 {.family = "Freescale i.MX"},
11985                 {},
11986         };
11987 +       static struct platform_device_info caam_dma_pdev_info = {
11988 +               .name = "caam-dma",
11989 +               .id = PLATFORM_DEVID_NONE
11990 +       };
11991         struct device *dev;
11992         struct device_node *nprop, *np;
11993         struct caam_ctrl __iomem *ctrl;
11994 @@ -442,7 +457,7 @@ static int caam_probe(struct platform_de
11995         struct caam_perfmon *perfmon;
11996  #endif
11997         u32 scfgr, comp_params;
11998 -       u32 cha_vid_ls;
11999 +       u8 rng_vid;
12000         int pg_size;
12001         int BLOCK_OFFSET = 0;
12002  
12003 @@ -454,15 +469,54 @@ static int caam_probe(struct platform_de
12004         dev_set_drvdata(dev, ctrlpriv);
12005         nprop = pdev->dev.of_node;
12006  
12007 +       /* Get configuration properties from device tree */
12008 +       /* First, get register page */
12009 +       ctrl = of_iomap(nprop, 0);
12010 +       if (!ctrl) {
12011 +               dev_err(dev, "caam: of_iomap() failed\n");
12012 +               return -ENOMEM;
12013 +       }
12014 +
12015 +       caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
12016 +                                 (CSTA_PLEND | CSTA_ALT_PLEND));
12017         caam_imx = (bool)soc_device_match(imx_soc);
12018  
12019 +       comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
12020 +       caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
12021 +       ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
12022 +
12023 +#ifdef CONFIG_CAAM_QI
12024 +       /* If (DPAA 1.x) QI present, check whether dependencies are available */
12025 +       if (ctrlpriv->qi_present && !caam_dpaa2) {
12026 +               ret = qman_is_probed();
12027 +               if (!ret) {
12028 +                       ret = -EPROBE_DEFER;
12029 +                       goto iounmap_ctrl;
12030 +               } else if (ret < 0) {
12031 +                       dev_err(dev, "failing probe due to qman probe error\n");
12032 +                       ret = -ENODEV;
12033 +                       goto iounmap_ctrl;
12034 +               }
12035 +
12036 +               ret = qman_portals_probed();
12037 +               if (!ret) {
12038 +                       ret = -EPROBE_DEFER;
12039 +                       goto iounmap_ctrl;
12040 +               } else if (ret < 0) {
12041 +                       dev_err(dev, "failing probe due to qman portals probe error\n");
12042 +                       ret = -ENODEV;
12043 +                       goto iounmap_ctrl;
12044 +               }
12045 +       }
12046 +#endif
12047 +
12048         /* Enable clocking */
12049         clk = caam_drv_identify_clk(&pdev->dev, "ipg");
12050         if (IS_ERR(clk)) {
12051                 ret = PTR_ERR(clk);
12052                 dev_err(&pdev->dev,
12053                         "can't identify CAAM ipg clk: %d\n", ret);
12054 -               return ret;
12055 +               goto iounmap_ctrl;
12056         }
12057         ctrlpriv->caam_ipg = clk;
12058  
12059 @@ -471,7 +525,7 @@ static int caam_probe(struct platform_de
12060                 ret = PTR_ERR(clk);
12061                 dev_err(&pdev->dev,
12062                         "can't identify CAAM mem clk: %d\n", ret);
12063 -               return ret;
12064 +               goto iounmap_ctrl;
12065         }
12066         ctrlpriv->caam_mem = clk;
12067  
12068 @@ -480,7 +534,7 @@ static int caam_probe(struct platform_de
12069                 ret = PTR_ERR(clk);
12070                 dev_err(&pdev->dev,
12071                         "can't identify CAAM aclk clk: %d\n", ret);
12072 -               return ret;
12073 +               goto iounmap_ctrl;
12074         }
12075         ctrlpriv->caam_aclk = clk;
12076  
12077 @@ -490,7 +544,7 @@ static int caam_probe(struct platform_de
12078                         ret = PTR_ERR(clk);
12079                         dev_err(&pdev->dev,
12080                                 "can't identify CAAM emi_slow clk: %d\n", ret);
12081 -                       return ret;
12082 +                       goto iounmap_ctrl;
12083                 }
12084                 ctrlpriv->caam_emi_slow = clk;
12085         }
12086 @@ -498,7 +552,7 @@ static int caam_probe(struct platform_de
12087         ret = clk_prepare_enable(ctrlpriv->caam_ipg);
12088         if (ret < 0) {
12089                 dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
12090 -               return ret;
12091 +               goto iounmap_ctrl;
12092         }
12093  
12094         ret = clk_prepare_enable(ctrlpriv->caam_mem);
12095 @@ -523,25 +577,10 @@ static int caam_probe(struct platform_de
12096                 }
12097         }
12098  
12099 -       /* Get configuration properties from device tree */
12100 -       /* First, get register page */
12101 -       ctrl = of_iomap(nprop, 0);
12102 -       if (ctrl == NULL) {
12103 -               dev_err(dev, "caam: of_iomap() failed\n");
12104 -               ret = -ENOMEM;
12105 -               goto disable_caam_emi_slow;
12106 -       }
12107 -
12108 -       caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
12109 -                                 (CSTA_PLEND | CSTA_ALT_PLEND));
12110 -
12111 -       /* Finding the page size for using the CTPR_MS register */
12112 -       comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
12113 -       pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
12114 -
12115         /* Allocating the BLOCK_OFFSET based on the supported page size on
12116          * the platform
12117          */
12118 +       pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
12119         if (pg_size == 0)
12120                 BLOCK_OFFSET = PG_SIZE_4K;
12121         else
12122 @@ -563,11 +602,14 @@ static int caam_probe(struct platform_de
12123         /*
12124          * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
12125          * long pointers in master configuration register.
12126 -        * In case of DPAA 2.x, Management Complex firmware performs
12127 +        * In case of SoCs with Management Complex, MC f/w performs
12128          * the configuration.
12129          */
12130 -       caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
12131 -       if (!caam_dpaa2)
12132 +       np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
12133 +       ctrlpriv->mc_en = !!np;
12134 +       of_node_put(np);
12135 +
12136 +       if (!ctrlpriv->mc_en)
12137                 clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
12138                               MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
12139                               MCFGR_WDENABLE | MCFGR_LARGE_BURST |
12140 @@ -612,14 +654,11 @@ static int caam_probe(struct platform_de
12141         }
12142         if (ret) {
12143                 dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
12144 -               goto iounmap_ctrl;
12145 +               goto disable_caam_emi_slow;
12146         }
12147  
12148 -       ret = of_platform_populate(nprop, caam_match, NULL, dev);
12149 -       if (ret) {
12150 -               dev_err(dev, "JR platform devices creation error\n");
12151 -               goto iounmap_ctrl;
12152 -       }
12153 +       ctrlpriv->era = caam_get_era();
12154 +       ctrlpriv->domain = iommu_get_domain_for_dev(dev);
12155  
12156  #ifdef CONFIG_DEBUG_FS
12157         /*
12158 @@ -633,21 +672,7 @@ static int caam_probe(struct platform_de
12159         ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
12160  #endif
12161  
12162 -       ring = 0;
12163 -       for_each_available_child_of_node(nprop, np)
12164 -               if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
12165 -                   of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
12166 -                       ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
12167 -                                            ((__force uint8_t *)ctrl +
12168 -                                            (ring + JR_BLOCK_NUMBER) *
12169 -                                             BLOCK_OFFSET
12170 -                                            );
12171 -                       ctrlpriv->total_jobrs++;
12172 -                       ring++;
12173 -               }
12174 -
12175         /* Check to see if (DPAA 1.x) QI present. If so, enable */
12176 -       ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
12177         if (ctrlpriv->qi_present && !caam_dpaa2) {
12178                 ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
12179                                ((__force uint8_t *)ctrl +
12180 @@ -664,6 +689,25 @@ static int caam_probe(struct platform_de
12181  #endif
12182         }
12183  
12184 +       ret = of_platform_populate(nprop, caam_match, NULL, dev);
12185 +       if (ret) {
12186 +               dev_err(dev, "JR platform devices creation error\n");
12187 +               goto shutdown_qi;
12188 +       }
12189 +
12190 +       ring = 0;
12191 +       for_each_available_child_of_node(nprop, np)
12192 +               if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
12193 +                   of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
12194 +                       ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
12195 +                                            ((__force uint8_t *)ctrl +
12196 +                                            (ring + JR_BLOCK_NUMBER) *
12197 +                                             BLOCK_OFFSET
12198 +                                            );
12199 +                       ctrlpriv->total_jobrs++;
12200 +                       ring++;
12201 +               }
12202 +
12203         /* If no QI and no rings specified, quit and go home */
12204         if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
12205                 dev_err(dev, "no queues configured, terminating\n");
12206 @@ -671,15 +715,29 @@ static int caam_probe(struct platform_de
12207                 goto caam_remove;
12208         }
12209  
12210 -       cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
12211 +       caam_dma_pdev_info.parent = dev;
12212 +       caam_dma_pdev_info.dma_mask = dma_get_mask(dev);
12213 +       caam_dma_dev = platform_device_register_full(&caam_dma_pdev_info);
12214 +       if (IS_ERR(caam_dma_dev)) {
12215 +               dev_err(dev, "Unable to create and register caam-dma dev\n");
12216 +               caam_dma_dev = 0;
12217 +       } else {
12218 +               set_dma_ops(&caam_dma_dev->dev, get_dma_ops(dev));
12219 +       }
12220 +
12221 +       if (ctrlpriv->era < 10)
12222 +               rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) &
12223 +                          CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
12224 +       else
12225 +               rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >>
12226 +                          CHA_VER_VID_SHIFT;
12227  
12228         /*
12229          * If SEC has RNG version >= 4 and RNG state handle has not been
12230          * already instantiated, do RNG instantiation
12231 -        * In case of DPAA 2.x, RNG is managed by MC firmware.
12232 +        * In case of SoCs with Management Complex, RNG is managed by MC f/w.
12233          */
12234 -       if (!caam_dpaa2 &&
12235 -           (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
12236 +       if (!ctrlpriv->mc_en && rng_vid >= 4) {
12237                 ctrlpriv->rng4_sh_init =
12238                         rd_reg32(&ctrl->r4tst[0].rdsta);
12239                 /*
12240 @@ -746,10 +804,9 @@ static int caam_probe(struct platform_de
12241  
12242         /* Report "alive" for developer to see */
12243         dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
12244 -                caam_get_era());
12245 -       dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
12246 -                ctrlpriv->total_jobrs, ctrlpriv->qi_present,
12247 -                caam_dpaa2 ? "yes" : "no");
12248 +                ctrlpriv->era);
12249 +       dev_info(dev, "job rings = %d, qi = %d\n",
12250 +                ctrlpriv->total_jobrs, ctrlpriv->qi_present);
12251  
12252  #ifdef CONFIG_DEBUG_FS
12253         debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
12254 @@ -816,8 +873,11 @@ caam_remove:
12255         caam_remove(pdev);
12256         return ret;
12257  
12258 -iounmap_ctrl:
12259 -       iounmap(ctrl);
12260 +shutdown_qi:
12261 +#ifdef CONFIG_CAAM_QI
12262 +       if (ctrlpriv->qi_init)
12263 +               caam_qi_shutdown(dev);
12264 +#endif
12265  disable_caam_emi_slow:
12266         if (ctrlpriv->caam_emi_slow)
12267                 clk_disable_unprepare(ctrlpriv->caam_emi_slow);
12268 @@ -827,6 +887,8 @@ disable_caam_mem:
12269         clk_disable_unprepare(ctrlpriv->caam_mem);
12270  disable_caam_ipg:
12271         clk_disable_unprepare(ctrlpriv->caam_ipg);
12272 +iounmap_ctrl:
12273 +       iounmap(ctrl);
12274         return ret;
12275  }
12276  
12277 --- a/drivers/crypto/caam/desc.h
12278 +++ b/drivers/crypto/caam/desc.h
12279 @@ -4,6 +4,7 @@
12280   * Definitions to support CAAM descriptor instruction generation
12281   *
12282   * Copyright 2008-2011 Freescale Semiconductor, Inc.
12283 + * Copyright 2018 NXP
12284   */
12285  
12286  #ifndef DESC_H
12287 @@ -42,6 +43,7 @@
12288  #define CMD_SEQ_LOAD           (0x03 << CMD_SHIFT)
12289  #define CMD_FIFO_LOAD          (0x04 << CMD_SHIFT)
12290  #define CMD_SEQ_FIFO_LOAD      (0x05 << CMD_SHIFT)
12291 +#define CMD_MOVEB              (0x07 << CMD_SHIFT)
12292  #define CMD_STORE              (0x0a << CMD_SHIFT)
12293  #define CMD_SEQ_STORE          (0x0b << CMD_SHIFT)
12294  #define CMD_FIFO_STORE         (0x0c << CMD_SHIFT)
12295 @@ -242,6 +244,7 @@
12296  #define LDST_SRCDST_WORD_DESCBUF_SHARED        (0x42 << LDST_SRCDST_SHIFT)
12297  #define LDST_SRCDST_WORD_DESCBUF_JOB_WE        (0x45 << LDST_SRCDST_SHIFT)
12298  #define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT)
12299 +#define LDST_SRCDST_WORD_INFO_FIFO_SM  (0x71 << LDST_SRCDST_SHIFT)
12300  #define LDST_SRCDST_WORD_INFO_FIFO     (0x7a << LDST_SRCDST_SHIFT)
12301  
12302  /* Offset in source/destination */
12303 @@ -284,6 +287,12 @@
12304  #define LDLEN_SET_OFIFO_OFFSET_SHIFT   0
12305  #define LDLEN_SET_OFIFO_OFFSET_MASK    (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
12306  
12307 +/* Special Length definitions when dst=sm, nfifo-{sm,m} */
12308 +#define LDLEN_MATH0                    0
12309 +#define LDLEN_MATH1                    1
12310 +#define LDLEN_MATH2                    2
12311 +#define LDLEN_MATH3                    3
12312 +
12313  /*
12314   * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
12315   * Command Constructs
12316 @@ -355,6 +364,7 @@
12317  #define FIFOLD_TYPE_PK_N       (0x08 << FIFOLD_TYPE_SHIFT)
12318  #define FIFOLD_TYPE_PK_A       (0x0c << FIFOLD_TYPE_SHIFT)
12319  #define FIFOLD_TYPE_PK_B       (0x0d << FIFOLD_TYPE_SHIFT)
12320 +#define FIFOLD_TYPE_IFIFO      (0x0f << FIFOLD_TYPE_SHIFT)
12321  
12322  /* Other types. Need to OR in last/flush bits as desired */
12323  #define FIFOLD_TYPE_MSG_MASK   (0x38 << FIFOLD_TYPE_SHIFT)
12324 @@ -408,6 +418,7 @@
12325  #define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
12326  #define FIFOST_TYPE_RNGSTORE    (0x34 << FIFOST_TYPE_SHIFT)
12327  #define FIFOST_TYPE_RNGFIFO     (0x35 << FIFOST_TYPE_SHIFT)
12328 +#define FIFOST_TYPE_METADATA    (0x3e << FIFOST_TYPE_SHIFT)
12329  #define FIFOST_TYPE_SKIP        (0x3f << FIFOST_TYPE_SHIFT)
12330  
12331  /*
12332 @@ -444,6 +455,18 @@
12333  #define OP_PCLID_DSAVERIFY     (0x16 << OP_PCLID_SHIFT)
12334  #define OP_PCLID_RSAENC_PUBKEY  (0x18 << OP_PCLID_SHIFT)
12335  #define OP_PCLID_RSADEC_PRVKEY  (0x19 << OP_PCLID_SHIFT)
12336 +#define OP_PCLID_DKP_MD5       (0x20 << OP_PCLID_SHIFT)
12337 +#define OP_PCLID_DKP_SHA1      (0x21 << OP_PCLID_SHIFT)
12338 +#define OP_PCLID_DKP_SHA224    (0x22 << OP_PCLID_SHIFT)
12339 +#define OP_PCLID_DKP_SHA256    (0x23 << OP_PCLID_SHIFT)
12340 +#define OP_PCLID_DKP_SHA384    (0x24 << OP_PCLID_SHIFT)
12341 +#define OP_PCLID_DKP_SHA512    (0x25 << OP_PCLID_SHIFT)
12342 +#define OP_PCLID_DKP_RIF_MD5   (0x60 << OP_PCLID_SHIFT)
12343 +#define OP_PCLID_DKP_RIF_SHA1  (0x61 << OP_PCLID_SHIFT)
12344 +#define OP_PCLID_DKP_RIF_SHA224        (0x62 << OP_PCLID_SHIFT)
12345 +#define OP_PCLID_DKP_RIF_SHA256        (0x63 << OP_PCLID_SHIFT)
12346 +#define OP_PCLID_DKP_RIF_SHA384        (0x64 << OP_PCLID_SHIFT)
12347 +#define OP_PCLID_DKP_RIF_SHA512        (0x65 << OP_PCLID_SHIFT)
12348  
12349  /* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
12350  #define OP_PCLID_IPSEC         (0x01 << OP_PCLID_SHIFT)
12351 @@ -1093,6 +1116,22 @@
12352  /* MacSec protinfos */
12353  #define OP_PCL_MACSEC                           0x0001
12354  
12355 +/* Derived Key Protocol (DKP) Protinfo */
12356 +#define OP_PCL_DKP_SRC_SHIFT   14
12357 +#define OP_PCL_DKP_SRC_MASK    (3 << OP_PCL_DKP_SRC_SHIFT)
12358 +#define OP_PCL_DKP_SRC_IMM     (0 << OP_PCL_DKP_SRC_SHIFT)
12359 +#define OP_PCL_DKP_SRC_SEQ     (1 << OP_PCL_DKP_SRC_SHIFT)
12360 +#define OP_PCL_DKP_SRC_PTR     (2 << OP_PCL_DKP_SRC_SHIFT)
12361 +#define OP_PCL_DKP_SRC_SGF     (3 << OP_PCL_DKP_SRC_SHIFT)
12362 +#define OP_PCL_DKP_DST_SHIFT   12
12363 +#define OP_PCL_DKP_DST_MASK    (3 << OP_PCL_DKP_DST_SHIFT)
12364 +#define OP_PCL_DKP_DST_IMM     (0 << OP_PCL_DKP_DST_SHIFT)
12365 +#define OP_PCL_DKP_DST_SEQ     (1 << OP_PCL_DKP_DST_SHIFT)
12366 +#define OP_PCL_DKP_DST_PTR     (2 << OP_PCL_DKP_DST_SHIFT)
12367 +#define OP_PCL_DKP_DST_SGF     (3 << OP_PCL_DKP_DST_SHIFT)
12368 +#define OP_PCL_DKP_KEY_SHIFT   0
12369 +#define OP_PCL_DKP_KEY_MASK    (0xfff << OP_PCL_DKP_KEY_SHIFT)
12370 +
12371  /* PKI unidirectional protocol protinfo bits */
12372  #define OP_PCL_PKPROT_TEST                      0x0008
12373  #define OP_PCL_PKPROT_DECRYPT                   0x0004
12374 @@ -1105,6 +1144,12 @@
12375  #define OP_ALG_TYPE_CLASS1     (2 << OP_ALG_TYPE_SHIFT)
12376  #define OP_ALG_TYPE_CLASS2     (4 << OP_ALG_TYPE_SHIFT)
12377  
12378 +/* version register fields */
12379 +#define OP_VER_CCHA_NUM  0x000000ff /* Number CCHAs instantiated */
12380 +#define OP_VER_CCHA_MISC 0x0000ff00 /* CCHA Miscellaneous Information */
12381 +#define OP_VER_CCHA_REV  0x00ff0000 /* CCHA Revision Number */
12382 +#define OP_VER_CCHA_VID  0xff000000 /* CCHA Version ID */
12383 +
12384  #define OP_ALG_ALGSEL_SHIFT    16
12385  #define OP_ALG_ALGSEL_MASK     (0xff << OP_ALG_ALGSEL_SHIFT)
12386  #define OP_ALG_ALGSEL_SUBMASK  (0x0f << OP_ALG_ALGSEL_SHIFT)
12387 @@ -1124,6 +1169,8 @@
12388  #define OP_ALG_ALGSEL_KASUMI   (0x70 << OP_ALG_ALGSEL_SHIFT)
12389  #define OP_ALG_ALGSEL_CRC      (0x90 << OP_ALG_ALGSEL_SHIFT)
12390  #define OP_ALG_ALGSEL_SNOW_F9  (0xA0 << OP_ALG_ALGSEL_SHIFT)
12391 +#define OP_ALG_ALGSEL_CHACHA20 (0xD0 << OP_ALG_ALGSEL_SHIFT)
12392 +#define OP_ALG_ALGSEL_POLY1305 (0xE0 << OP_ALG_ALGSEL_SHIFT)
12393  
12394  #define OP_ALG_AAI_SHIFT       4
12395  #define OP_ALG_AAI_MASK                (0x1ff << OP_ALG_AAI_SHIFT)
12396 @@ -1171,6 +1218,11 @@
12397  #define OP_ALG_AAI_RNG4_AI     (0x80 << OP_ALG_AAI_SHIFT)
12398  #define OP_ALG_AAI_RNG4_SK     (0x100 << OP_ALG_AAI_SHIFT)
12399  
12400 +/* Chacha20 AAI set */
12401 +#define OP_ALG_AAI_AEAD        (0x002 << OP_ALG_AAI_SHIFT)
12402 +#define OP_ALG_AAI_KEYSTREAM   (0x001 << OP_ALG_AAI_SHIFT)
12403 +#define OP_ALG_AAI_BC8         (0x008 << OP_ALG_AAI_SHIFT)
12404 +
12405  /* hmac/smac AAI set */
12406  #define OP_ALG_AAI_HASH                (0x00 << OP_ALG_AAI_SHIFT)
12407  #define OP_ALG_AAI_HMAC                (0x01 << OP_ALG_AAI_SHIFT)
12408 @@ -1359,6 +1411,7 @@
12409  #define MOVE_SRC_MATH3         (0x07 << MOVE_SRC_SHIFT)
12410  #define MOVE_SRC_INFIFO                (0x08 << MOVE_SRC_SHIFT)
12411  #define MOVE_SRC_INFIFO_CL     (0x09 << MOVE_SRC_SHIFT)
12412 +#define MOVE_SRC_AUX_ABLK      (0x0a << MOVE_SRC_SHIFT)
12413  
12414  #define MOVE_DEST_SHIFT                16
12415  #define MOVE_DEST_MASK         (0x0f << MOVE_DEST_SHIFT)
12416 @@ -1385,6 +1438,10 @@
12417  
12418  #define MOVELEN_MRSEL_SHIFT    0
12419  #define MOVELEN_MRSEL_MASK     (0x3 << MOVE_LEN_SHIFT)
12420 +#define MOVELEN_MRSEL_MATH0    (0 << MOVELEN_MRSEL_SHIFT)
12421 +#define MOVELEN_MRSEL_MATH1    (1 << MOVELEN_MRSEL_SHIFT)
12422 +#define MOVELEN_MRSEL_MATH2    (2 << MOVELEN_MRSEL_SHIFT)
12423 +#define MOVELEN_MRSEL_MATH3    (3 << MOVELEN_MRSEL_SHIFT)
12424  
12425  /*
12426   * MATH Command Constructs
12427 @@ -1440,10 +1497,11 @@
12428  #define MATH_SRC1_REG2         (0x02 << MATH_SRC1_SHIFT)
12429  #define MATH_SRC1_REG3         (0x03 << MATH_SRC1_SHIFT)
12430  #define MATH_SRC1_IMM          (0x04 << MATH_SRC1_SHIFT)
12431 -#define MATH_SRC1_DPOVRD       (0x07 << MATH_SRC0_SHIFT)
12432 +#define MATH_SRC1_DPOVRD       (0x07 << MATH_SRC1_SHIFT)
12433  #define MATH_SRC1_INFIFO       (0x0a << MATH_SRC1_SHIFT)
12434  #define MATH_SRC1_OUTFIFO      (0x0b << MATH_SRC1_SHIFT)
12435  #define MATH_SRC1_ONE          (0x0c << MATH_SRC1_SHIFT)
12436 +#define MATH_SRC1_ZERO         (0x0f << MATH_SRC1_SHIFT)
12437  
12438  /* Destination selectors */
12439  #define MATH_DEST_SHIFT                8
12440 @@ -1452,6 +1510,7 @@
12441  #define MATH_DEST_REG1         (0x01 << MATH_DEST_SHIFT)
12442  #define MATH_DEST_REG2         (0x02 << MATH_DEST_SHIFT)
12443  #define MATH_DEST_REG3         (0x03 << MATH_DEST_SHIFT)
12444 +#define MATH_DEST_DPOVRD       (0x07 << MATH_DEST_SHIFT)
12445  #define MATH_DEST_SEQINLEN     (0x08 << MATH_DEST_SHIFT)
12446  #define MATH_DEST_SEQOUTLEN    (0x09 << MATH_DEST_SHIFT)
12447  #define MATH_DEST_VARSEQINLEN  (0x0a << MATH_DEST_SHIFT)
12448 @@ -1560,6 +1619,7 @@
12449  #define NFIFOENTRY_DTYPE_IV    (0x2 << NFIFOENTRY_DTYPE_SHIFT)
12450  #define NFIFOENTRY_DTYPE_SAD   (0x3 << NFIFOENTRY_DTYPE_SHIFT)
12451  #define NFIFOENTRY_DTYPE_ICV   (0xA << NFIFOENTRY_DTYPE_SHIFT)
12452 +#define NFIFOENTRY_DTYPE_POLY  (0xB << NFIFOENTRY_DTYPE_SHIFT)
12453  #define NFIFOENTRY_DTYPE_SKIP  (0xE << NFIFOENTRY_DTYPE_SHIFT)
12454  #define NFIFOENTRY_DTYPE_MSG   (0xF << NFIFOENTRY_DTYPE_SHIFT)
12455  
12456 @@ -1624,4 +1684,31 @@
12457  /* Frame Descriptor Command for Replacement Job Descriptor */
12458  #define FD_CMD_REPLACE_JOB_DESC                                0x20000000
12459  
12460 +/* CHA Control Register bits */
12461 +#define CCTRL_RESET_CHA_ALL          0x1
12462 +#define CCTRL_RESET_CHA_AESA         0x2
12463 +#define CCTRL_RESET_CHA_DESA         0x4
12464 +#define CCTRL_RESET_CHA_AFHA         0x8
12465 +#define CCTRL_RESET_CHA_KFHA         0x10
12466 +#define CCTRL_RESET_CHA_SF8A         0x20
12467 +#define CCTRL_RESET_CHA_PKHA         0x40
12468 +#define CCTRL_RESET_CHA_MDHA         0x80
12469 +#define CCTRL_RESET_CHA_CRCA         0x100
12470 +#define CCTRL_RESET_CHA_RNG          0x200
12471 +#define CCTRL_RESET_CHA_SF9A         0x400
12472 +#define CCTRL_RESET_CHA_ZUCE         0x800
12473 +#define CCTRL_RESET_CHA_ZUCA         0x1000
12474 +#define CCTRL_UNLOAD_PK_A0           0x10000
12475 +#define CCTRL_UNLOAD_PK_A1           0x20000
12476 +#define CCTRL_UNLOAD_PK_A2           0x40000
12477 +#define CCTRL_UNLOAD_PK_A3           0x80000
12478 +#define CCTRL_UNLOAD_PK_B0           0x100000
12479 +#define CCTRL_UNLOAD_PK_B1           0x200000
12480 +#define CCTRL_UNLOAD_PK_B2           0x400000
12481 +#define CCTRL_UNLOAD_PK_B3           0x800000
12482 +#define CCTRL_UNLOAD_PK_N            0x1000000
12483 +#define CCTRL_UNLOAD_PK_A            0x4000000
12484 +#define CCTRL_UNLOAD_PK_B            0x8000000
12485 +#define CCTRL_UNLOAD_SBOX            0x10000000
12486 +
12487  #endif /* DESC_H */
12488 --- a/drivers/crypto/caam/desc_constr.h
12489 +++ b/drivers/crypto/caam/desc_constr.h
12490 @@ -109,7 +109,7 @@ static inline void init_job_desc_shared(
12491         append_ptr(desc, ptr);
12492  }
12493  
12494 -static inline void append_data(u32 * const desc, void *data, int len)
12495 +static inline void append_data(u32 * const desc, const void *data, int len)
12496  {
12497         u32 *offset = desc_end(desc);
12498  
12499 @@ -172,7 +172,7 @@ static inline void append_cmd_ptr_extlen
12500         append_cmd(desc, len);
12501  }
12502  
12503 -static inline void append_cmd_data(u32 * const desc, void *data, int len,
12504 +static inline void append_cmd_data(u32 * const desc, const void *data, int len,
12505                                    u32 command)
12506  {
12507         append_cmd(desc, command | IMMEDIATE | len);
12508 @@ -189,6 +189,8 @@ static inline u32 *append_##cmd(u32 * co
12509  }
12510  APPEND_CMD_RET(jump, JUMP)
12511  APPEND_CMD_RET(move, MOVE)
12512 +APPEND_CMD_RET(moveb, MOVEB)
12513 +APPEND_CMD_RET(move_len, MOVE_LEN)
12514  
12515  static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
12516  {
12517 @@ -271,7 +273,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
12518  APPEND_SEQ_PTR_INTLEN(out, OUT)
12519  
12520  #define APPEND_CMD_PTR_TO_IMM(cmd, op) \
12521 -static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
12522 +static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
12523                                          unsigned int len, u32 options) \
12524  { \
12525         PRINT_POS; \
12526 @@ -312,7 +314,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_
12527   * from length of immediate data provided, e.g., split keys
12528   */
12529  #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
12530 -static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
12531 +static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
12532                                          unsigned int data_len, \
12533                                          unsigned int len, u32 options) \
12534  { \
12535 @@ -327,7 +329,11 @@ static inline void append_##cmd##_imm_##
12536                                              u32 options) \
12537  { \
12538         PRINT_POS; \
12539 -       append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \
12540 +       if (options & LDST_LEN_MASK) \
12541 +               append_cmd(desc, CMD_##op | IMMEDIATE | options); \
12542 +       else \
12543 +               append_cmd(desc, CMD_##op | IMMEDIATE | options | \
12544 +                          sizeof(type)); \
12545         append_cmd(desc, immediate); \
12546  }
12547  APPEND_CMD_RAW_IMM(load, LOAD, u32);
12548 @@ -452,7 +458,7 @@ struct alginfo {
12549         unsigned int keylen_pad;
12550         union {
12551                 dma_addr_t key_dma;
12552 -               void *key_virt;
12553 +               const void *key_virt;
12554         };
12555         bool key_inline;
12556  };
12557 @@ -496,4 +502,45 @@ static inline int desc_inline_query(unsi
12558         return (rem_bytes >= 0) ? 0 : -1;
12559  }
12560  
12561 +/**
12562 + * append_proto_dkp - Derived Key Protocol (DKP): key -> split key
12563 + * @desc: pointer to buffer used for descriptor construction
12564 + * @adata: pointer to authentication transform definitions.
12565 + *         keylen should be the length of initial key, while keylen_pad
12566 + *         the length of the derived (split) key.
12567 + *         Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
12568 + *         SHA256, SHA384, SHA512}.
12569 + */
12570 +static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
12571 +{
12572 +       u32 protid;
12573 +
12574 +       /*
12575 +        * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*}
12576 +        * to OP_PCLID_DKP_{MD5, SHA*}
12577 +        */
12578 +       protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) |
12579 +                (0x20 << OP_ALG_ALGSEL_SHIFT);
12580 +
12581 +       if (adata->key_inline) {
12582 +               int words;
12583 +
12584 +               append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
12585 +                                OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
12586 +                                adata->keylen);
12587 +               append_data(desc, adata->key_virt, adata->keylen);
12588 +
12589 +               /* Reserve space in descriptor buffer for the derived key */
12590 +               words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
12591 +                        ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
12592 +               if (words)
12593 +                       (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
12594 +       } else {
12595 +               append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
12596 +                                OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR |
12597 +                                adata->keylen);
12598 +               append_ptr(desc, adata->key_dma);
12599 +       }
12600 +}
12601 +
12602  #endif /* DESC_CONSTR_H */
12603 --- /dev/null
12604 +++ b/drivers/crypto/caam/dpseci.c
12605 @@ -0,0 +1,865 @@
12606 +/*
12607 + * Copyright 2013-2016 Freescale Semiconductor Inc.
12608 + * Copyright 2017 NXP
12609 + *
12610 + * Redistribution and use in source and binary forms, with or without
12611 + * modification, are permitted provided that the following conditions are met:
12612 + *     * Redistributions of source code must retain the above copyright
12613 + *      notice, this list of conditions and the following disclaimer.
12614 + *     * Redistributions in binary form must reproduce the above copyright
12615 + *      notice, this list of conditions and the following disclaimer in the
12616 + *      documentation and/or other materials provided with the distribution.
12617 + *     * Neither the names of the above-listed copyright holders nor the
12618 + *      names of any contributors may be used to endorse or promote products
12619 + *      derived from this software without specific prior written permission.
12620 + *
12621 + *
12622 + * ALTERNATIVELY, this software may be distributed under the terms of the
12623 + * GNU General Public License ("GPL") as published by the Free Software
12624 + * Foundation, either version 2 of that License or (at your option) any
12625 + * later version.
12626 + *
12627 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
12628 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
12629 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
12630 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
12631 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
12632 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
12633 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
12634 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
12635 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
12636 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
12637 + * POSSIBILITY OF SUCH DAMAGE.
12638 + */
12639 +
12640 +#include <linux/fsl/mc.h>
12641 +#include "../../../drivers/staging/fsl-mc/include/dpopr.h"
12642 +#include "dpseci.h"
12643 +#include "dpseci_cmd.h"
12644 +
12645 +/**
12646 + * dpseci_open() - Open a control session for the specified object
12647 + * @mc_io:     Pointer to MC portal's I/O object
12648 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12649 + * @dpseci_id: DPSECI unique ID
12650 + * @token:     Returned token; use in subsequent API calls
12651 + *
12652 + * This function can be used to open a control session for an already created
12653 + * object; an object may have been declared in the DPL or by calling the
12654 + * dpseci_create() function.
12655 + * This function returns a unique authentication token, associated with the
12656 + * specific object ID and the specific MC portal; this token must be used in all
12657 + * subsequent commands for this specific object.
12658 + *
12659 + * Return:     '0' on success, error code otherwise
12660 + */
12661 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
12662 +               u16 *token)
12663 +{
12664 +       struct fsl_mc_command cmd = { 0 };
12665 +       struct dpseci_cmd_open *cmd_params;
12666 +       int err;
12667 +
12668 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
12669 +                                         cmd_flags,
12670 +                                         0);
12671 +       cmd_params = (struct dpseci_cmd_open *)cmd.params;
12672 +       cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
12673 +       err = mc_send_command(mc_io, &cmd);
12674 +       if (err)
12675 +               return err;
12676 +
12677 +       *token = mc_cmd_hdr_read_token(&cmd);
12678 +
12679 +       return 0;
12680 +}
12681 +
12682 +/**
12683 + * dpseci_close() - Close the control session of the object
12684 + * @mc_io:     Pointer to MC portal's I/O object
12685 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12686 + * @token:     Token of DPSECI object
12687 + *
12688 + * After this function is called, no further operations are allowed on the
12689 + * object without opening a new control session.
12690 + *
12691 + * Return:     '0' on success, error code otherwise
12692 + */
12693 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
12694 +{
12695 +       struct fsl_mc_command cmd = { 0 };
12696 +
12697 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
12698 +                                         cmd_flags,
12699 +                                         token);
12700 +       return mc_send_command(mc_io, &cmd);
12701 +}
12702 +
12703 +/**
12704 + * dpseci_create() - Create the DPSECI object
12705 + * @mc_io:     Pointer to MC portal's I/O object
12706 + * @dprc_token:        Parent container token; '0' for default container
12707 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12708 + * @cfg:       Configuration structure
12709 + * @obj_id:    returned object id
12710 + *
12711 + * Create the DPSECI object, allocate required resources and perform required
12712 + * initialization.
12713 + *
12714 + * The object can be created either by declaring it in the DPL file, or by
12715 + * calling this function.
12716 + *
12717 + * The function accepts an authentication token of a parent container that this
12718 + * object should be assigned to. The token can be '0' so the object will be
12719 + * assigned to the default container.
12720 + * The newly created object can be opened with the returned object id and using
12721 + * the container's associated tokens and MC portals.
12722 + *
12723 + * Return:     '0' on success, error code otherwise
12724 + */
12725 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
12726 +                 const struct dpseci_cfg *cfg, u32 *obj_id)
12727 +{
12728 +       struct fsl_mc_command cmd = { 0 };
12729 +       struct dpseci_cmd_create *cmd_params;
12730 +       int i, err;
12731 +
12732 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
12733 +                                         cmd_flags,
12734 +                                         dprc_token);
12735 +       cmd_params = (struct dpseci_cmd_create *)cmd.params;
12736 +       for (i = 0; i < 8; i++)
12737 +               cmd_params->priorities[i] = cfg->priorities[i];
12738 +       for (i = 0; i < 8; i++)
12739 +               cmd_params->priorities2[i] = cfg->priorities[8 + i];
12740 +       cmd_params->num_tx_queues = cfg->num_tx_queues;
12741 +       cmd_params->num_rx_queues = cfg->num_rx_queues;
12742 +       cmd_params->options = cpu_to_le32(cfg->options);
12743 +       err = mc_send_command(mc_io, &cmd);
12744 +       if (err)
12745 +               return err;
12746 +
12747 +       *obj_id = mc_cmd_read_object_id(&cmd);
12748 +
12749 +       return 0;
12750 +}
12751 +
12752 +/**
12753 + * dpseci_destroy() - Destroy the DPSECI object and release all its resources
12754 + * @mc_io:     Pointer to MC portal's I/O object
12755 + * @dprc_token: Parent container token; '0' for default container
12756 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12757 + * @object_id: The object id; it must be a valid id within the container that
12758 + *             created this object
12759 + *
12760 + * The function accepts the authentication token of the parent container that
12761 + * created the object (not the one that currently owns the object). The object
12762 + * is searched within parent using the provided 'object_id'.
12763 + * All tokens to the object must be closed before calling destroy.
12764 + *
12765 + * Return:     '0' on success, error code otherwise
12766 + */
12767 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
12768 +                  u32 object_id)
12769 +{
12770 +       struct fsl_mc_command cmd = { 0 };
12771 +       struct dpseci_cmd_destroy *cmd_params;
12772 +
12773 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
12774 +                                         cmd_flags,
12775 +                                         dprc_token);
12776 +       cmd_params = (struct dpseci_cmd_destroy *)cmd.params;
12777 +       cmd_params->object_id = cpu_to_le32(object_id);
12778 +
12779 +       return mc_send_command(mc_io, &cmd);
12780 +}
12781 +
12782 +/**
12783 + * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
12784 + * @mc_io:     Pointer to MC portal's I/O object
12785 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12786 + * @token:     Token of DPSECI object
12787 + *
12788 + * Return:     '0' on success, error code otherwise
12789 + */
12790 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
12791 +{
12792 +       struct fsl_mc_command cmd = { 0 };
12793 +
12794 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
12795 +                                         cmd_flags,
12796 +                                         token);
12797 +       return mc_send_command(mc_io, &cmd);
12798 +}
12799 +
12800 +/**
12801 + * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames
12802 + * @mc_io:     Pointer to MC portal's I/O object
12803 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12804 + * @token:     Token of DPSECI object
12805 + *
12806 + * Return:     '0' on success, error code otherwise
12807 + */
12808 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
12809 +{
12810 +       struct fsl_mc_command cmd = { 0 };
12811 +
12812 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
12813 +                                         cmd_flags,
12814 +                                         token);
12815 +
12816 +       return mc_send_command(mc_io, &cmd);
12817 +}
12818 +
12819 +/**
12820 + * dpseci_is_enabled() - Check if the DPSECI is enabled.
12821 + * @mc_io:     Pointer to MC portal's I/O object
12822 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12823 + * @token:     Token of DPSECI object
12824 + * @en:                Returns '1' if object is enabled; '0' otherwise
12825 + *
12826 + * Return:     '0' on success, error code otherwise
12827 + */
12828 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
12829 +                     int *en)
12830 +{
12831 +       struct fsl_mc_command cmd = { 0 };
12832 +       struct dpseci_rsp_is_enabled *rsp_params;
12833 +       int err;
12834 +
12835 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
12836 +                                         cmd_flags,
12837 +                                         token);
12838 +       err = mc_send_command(mc_io, &cmd);
12839 +       if (err)
12840 +               return err;
12841 +
12842 +       rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
12843 +       *en = dpseci_get_field(rsp_params->is_enabled, ENABLE);
12844 +
12845 +       return 0;
12846 +}
12847 +
12848 +/**
12849 + * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
12850 + * @mc_io:     Pointer to MC portal's I/O object
12851 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12852 + * @token:     Token of DPSECI object
12853 + *
12854 + * Return:     '0' on success, error code otherwise
12855 + */
12856 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
12857 +{
12858 +       struct fsl_mc_command cmd = { 0 };
12859 +
12860 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
12861 +                                         cmd_flags,
12862 +                                         token);
12863 +
12864 +       return mc_send_command(mc_io, &cmd);
12865 +}
12866 +
12867 +/**
12868 + * dpseci_get_irq_enable() - Get overall interrupt state
12869 + * @mc_io:     Pointer to MC portal's I/O object
12870 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12871 + * @token:     Token of DPSECI object
12872 + * @irq_index: The interrupt index to configure
12873 + * @en:                Returned Interrupt state - enable = 1, disable = 0
12874 + *
12875 + * Return:     '0' on success, error code otherwise
12876 + */
12877 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
12878 +                         u8 irq_index, u8 *en)
12879 +{
12880 +       struct fsl_mc_command cmd = { 0 };
12881 +       struct dpseci_cmd_irq_enable *cmd_params;
12882 +       struct dpseci_rsp_get_irq_enable *rsp_params;
12883 +       int err;
12884 +
12885 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE,
12886 +                                         cmd_flags,
12887 +                                         token);
12888 +       cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
12889 +       cmd_params->irq_index = irq_index;
12890 +       err = mc_send_command(mc_io, &cmd);
12891 +       if (err)
12892 +               return err;
12893 +
12894 +       rsp_params = (struct dpseci_rsp_get_irq_enable *)cmd.params;
12895 +       *en = rsp_params->enable_state;
12896 +
12897 +       return 0;
12898 +}
12899 +
12900 +/**
12901 + * dpseci_set_irq_enable() - Set overall interrupt state.
12902 + * @mc_io:     Pointer to MC portal's I/O object
12903 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12904 + * @token:     Token of DPSECI object
12905 + * @irq_index: The interrupt index to configure
12906 + * @en:                Interrupt state - enable = 1, disable = 0
12907 + *
12908 + * Allows GPP software to control when interrupts are generated.
12909 + * Each interrupt can have up to 32 causes. The enable/disable control's the
12910 + * overall interrupt state. If the interrupt is disabled no causes will cause
12911 + * an interrupt.
12912 + *
12913 + * Return:     '0' on success, error code otherwise
12914 + */
12915 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
12916 +                         u8 irq_index, u8 en)
12917 +{
12918 +       struct fsl_mc_command cmd = { 0 };
12919 +       struct dpseci_cmd_irq_enable *cmd_params;
12920 +
12921 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
12922 +                                         cmd_flags,
12923 +                                         token);
12924 +       cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
12925 +       cmd_params->irq_index = irq_index;
12926 +       cmd_params->enable_state = en;
12927 +
12928 +       return mc_send_command(mc_io, &cmd);
12929 +}
12930 +
12931 +/**
12932 + * dpseci_get_irq_mask() - Get interrupt mask.
12933 + * @mc_io:     Pointer to MC portal's I/O object
12934 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12935 + * @token:     Token of DPSECI object
12936 + * @irq_index: The interrupt index to configure
12937 + * @mask:      Returned event mask to trigger interrupt
12938 + *
12939 + * Every interrupt can have up to 32 causes and the interrupt model supports
12940 + * masking/unmasking each cause independently.
12941 + *
12942 + * Return:     '0' on success, error code otherwise
12943 + */
12944 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
12945 +                       u8 irq_index, u32 *mask)
12946 +{
12947 +       struct fsl_mc_command cmd = { 0 };
12948 +       struct dpseci_cmd_irq_mask *cmd_params;
12949 +       int err;
12950 +
12951 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK,
12952 +                                         cmd_flags,
12953 +                                         token);
12954 +       cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
12955 +       cmd_params->irq_index = irq_index;
12956 +       err = mc_send_command(mc_io, &cmd);
12957 +       if (err)
12958 +               return err;
12959 +
12960 +       *mask = le32_to_cpu(cmd_params->mask);
12961 +
12962 +       return 0;
12963 +}
12964 +
12965 +/**
12966 + * dpseci_set_irq_mask() - Set interrupt mask.
12967 + * @mc_io:     Pointer to MC portal's I/O object
12968 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12969 + * @token:     Token of DPSECI object
12970 + * @irq_index: The interrupt index to configure
12971 + * @mask:      event mask to trigger interrupt;
12972 + *             each bit:
12973 + *                     0 = ignore event
12974 + *                     1 = consider event for asserting IRQ
12975 + *
12976 + * Every interrupt can have up to 32 causes and the interrupt model supports
12977 + * masking/unmasking each cause independently
12978 + *
12979 + * Return:     '0' on success, error code otherwise
12980 + */
12981 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
12982 +                       u8 irq_index, u32 mask)
12983 +{
12984 +       struct fsl_mc_command cmd = { 0 };
12985 +       struct dpseci_cmd_irq_mask *cmd_params;
12986 +
12987 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
12988 +                                         cmd_flags,
12989 +                                         token);
12990 +       cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
12991 +       cmd_params->mask = cpu_to_le32(mask);
12992 +       cmd_params->irq_index = irq_index;
12993 +
12994 +       return mc_send_command(mc_io, &cmd);
12995 +}
12996 +
12997 +/**
12998 + * dpseci_get_irq_status() - Get the current status of any pending interrupts
12999 + * @mc_io:     Pointer to MC portal's I/O object
13000 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13001 + * @token:     Token of DPSECI object
13002 + * @irq_index: The interrupt index to configure
13003 + * @status:    Returned interrupts status - one bit per cause:
13004 + *                     0 = no interrupt pending
13005 + *                     1 = interrupt pending
13006 + *
13007 + * Return:     '0' on success, error code otherwise
13008 + */
13009 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13010 +                         u8 irq_index, u32 *status)
13011 +{
13012 +       struct fsl_mc_command cmd = { 0 };
13013 +       struct dpseci_cmd_irq_status *cmd_params;
13014 +       int err;
13015 +
13016 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS,
13017 +                                         cmd_flags,
13018 +                                         token);
13019 +       cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
13020 +       cmd_params->status = cpu_to_le32(*status);
13021 +       cmd_params->irq_index = irq_index;
13022 +       err = mc_send_command(mc_io, &cmd);
13023 +       if (err)
13024 +               return err;
13025 +
13026 +       *status = le32_to_cpu(cmd_params->status);
13027 +
13028 +       return 0;
13029 +}
13030 +
13031 +/**
13032 + * dpseci_clear_irq_status() - Clear a pending interrupt's status
13033 + * @mc_io:     Pointer to MC portal's I/O object
13034 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13035 + * @token:     Token of DPSECI object
13036 + * @irq_index: The interrupt index to configure
13037 + * @status:    bits to clear (W1C) - one bit per cause:
13038 + *                     0 = don't change
13039 + *                     1 = clear status bit
13040 + *
13041 + * Return:     '0' on success, error code otherwise
13042 + */
13043 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13044 +                           u8 irq_index, u32 status)
13045 +{
13046 +       struct fsl_mc_command cmd = { 0 };
13047 +       struct dpseci_cmd_irq_status *cmd_params;
13048 +
13049 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
13050 +                                         cmd_flags,
13051 +                                         token);
13052 +       cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
13053 +       cmd_params->status = cpu_to_le32(status);
13054 +       cmd_params->irq_index = irq_index;
13055 +
13056 +       return mc_send_command(mc_io, &cmd);
13057 +}
13058 +
13059 +/**
13060 + * dpseci_get_attributes() - Retrieve DPSECI attributes
13061 + * @mc_io:     Pointer to MC portal's I/O object
13062 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13063 + * @token:     Token of DPSECI object
13064 + * @attr:      Returned object's attributes
13065 + *
13066 + * Return:     '0' on success, error code otherwise
13067 + */
13068 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13069 +                         struct dpseci_attr *attr)
13070 +{
13071 +       struct fsl_mc_command cmd = { 0 };
13072 +       struct dpseci_rsp_get_attributes *rsp_params;
13073 +       int err;
13074 +
13075 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
13076 +                                         cmd_flags,
13077 +                                         token);
13078 +       err = mc_send_command(mc_io, &cmd);
13079 +       if (err)
13080 +               return err;
13081 +
13082 +       rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params;
13083 +       attr->id = le32_to_cpu(rsp_params->id);
13084 +       attr->num_tx_queues = rsp_params->num_tx_queues;
13085 +       attr->num_rx_queues = rsp_params->num_rx_queues;
13086 +       attr->options = le32_to_cpu(rsp_params->options);
13087 +
13088 +       return 0;
13089 +}
13090 +
13091 +/**
13092 + * dpseci_set_rx_queue() - Set Rx queue configuration
13093 + * @mc_io:     Pointer to MC portal's I/O object
13094 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13095 + * @token:     Token of DPSECI object
13096 + * @queue:     Select the queue relative to number of priorities configured at
13097 + *             DPSECI creation; use DPSECI_ALL_QUEUES to configure all
13098 + *             Rx queues identically.
13099 + * @cfg:       Rx queue configuration
13100 + *
13101 + * Return:     '0' on success, error code otherwise
13102 + */
13103 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13104 +                       u8 queue, const struct dpseci_rx_queue_cfg *cfg)
13105 +{
13106 +       struct fsl_mc_command cmd = { 0 };
13107 +       struct dpseci_cmd_queue *cmd_params;
13108 +
13109 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
13110 +                                         cmd_flags,
13111 +                                         token);
13112 +       cmd_params = (struct dpseci_cmd_queue *)cmd.params;
13113 +       cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
13114 +       cmd_params->priority = cfg->dest_cfg.priority;
13115 +       cmd_params->queue = queue;
13116 +       dpseci_set_field(cmd_params->dest_type, DEST_TYPE,
13117 +                        cfg->dest_cfg.dest_type);
13118 +       cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
13119 +       cmd_params->options = cpu_to_le32(cfg->options);
13120 +       dpseci_set_field(cmd_params->order_preservation_en, ORDER_PRESERVATION,
13121 +                        cfg->order_preservation_en);
13122 +
13123 +       return mc_send_command(mc_io, &cmd);
13124 +}
13125 +
13126 +/**
13127 + * dpseci_get_rx_queue() - Retrieve Rx queue attributes
13128 + * @mc_io:     Pointer to MC portal's I/O object
13129 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13130 + * @token:     Token of DPSECI object
13131 + * @queue:     Select the queue relative to number of priorities configured at
13132 + *             DPSECI creation
13133 + * @attr:      Returned Rx queue attributes
13134 + *
13135 + * Return:     '0' on success, error code otherwise
13136 + */
13137 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13138 +                       u8 queue, struct dpseci_rx_queue_attr *attr)
13139 +{
13140 +       struct fsl_mc_command cmd = { 0 };
13141 +       struct dpseci_cmd_queue *cmd_params;
13142 +       int err;
13143 +
13144 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
13145 +                                         cmd_flags,
13146 +                                         token);
13147 +       cmd_params = (struct dpseci_cmd_queue *)cmd.params;
13148 +       cmd_params->queue = queue;
13149 +       err = mc_send_command(mc_io, &cmd);
13150 +       if (err)
13151 +               return err;
13152 +
13153 +       attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
13154 +       attr->dest_cfg.priority = cmd_params->priority;
13155 +       attr->dest_cfg.dest_type = dpseci_get_field(cmd_params->dest_type,
13156 +                                                   DEST_TYPE);
13157 +       attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
13158 +       attr->fqid = le32_to_cpu(cmd_params->fqid);
13159 +       attr->order_preservation_en =
13160 +               dpseci_get_field(cmd_params->order_preservation_en,
13161 +                                ORDER_PRESERVATION);
13162 +
13163 +       return 0;
13164 +}
13165 +
13166 +/**
13167 + * dpseci_get_tx_queue() - Retrieve Tx queue attributes
13168 + * @mc_io:     Pointer to MC portal's I/O object
13169 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13170 + * @token:     Token of DPSECI object
13171 + * @queue:     Select the queue relative to number of priorities configured at
13172 + *             DPSECI creation
13173 + * @attr:      Returned Tx queue attributes
13174 + *
13175 + * Return:     '0' on success, error code otherwise
13176 + */
13177 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13178 +                       u8 queue, struct dpseci_tx_queue_attr *attr)
13179 +{
13180 +       struct fsl_mc_command cmd = { 0 };
13181 +       struct dpseci_cmd_queue *cmd_params;
13182 +       struct dpseci_rsp_get_tx_queue *rsp_params;
13183 +       int err;
13184 +
13185 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
13186 +                                         cmd_flags,
13187 +                                         token);
13188 +       cmd_params = (struct dpseci_cmd_queue *)cmd.params;
13189 +       cmd_params->queue = queue;
13190 +       err = mc_send_command(mc_io, &cmd);
13191 +       if (err)
13192 +               return err;
13193 +
13194 +       rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
13195 +       attr->fqid = le32_to_cpu(rsp_params->fqid);
13196 +       attr->priority = rsp_params->priority;
13197 +
13198 +       return 0;
13199 +}
13200 +
13201 +/**
13202 + * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes
13203 + * @mc_io:     Pointer to MC portal's I/O object
13204 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13205 + * @token:     Token of DPSECI object
13206 + * @attr:      Returned SEC attributes
13207 + *
13208 + * Return:     '0' on success, error code otherwise
13209 + */
13210 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13211 +                       struct dpseci_sec_attr *attr)
13212 +{
13213 +       struct fsl_mc_command cmd = { 0 };
13214 +       struct dpseci_rsp_get_sec_attr *rsp_params;
13215 +       int err;
13216 +
13217 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
13218 +                                         cmd_flags,
13219 +                                         token);
13220 +       err = mc_send_command(mc_io, &cmd);
13221 +       if (err)
13222 +               return err;
13223 +
13224 +       rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
13225 +       attr->ip_id = le16_to_cpu(rsp_params->ip_id);
13226 +       attr->major_rev = rsp_params->major_rev;
13227 +       attr->minor_rev = rsp_params->minor_rev;
13228 +       attr->era = rsp_params->era;
13229 +       attr->deco_num = rsp_params->deco_num;
13230 +       attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
13231 +       attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
13232 +       attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
13233 +       attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
13234 +       attr->crc_acc_num = rsp_params->crc_acc_num;
13235 +       attr->pk_acc_num = rsp_params->pk_acc_num;
13236 +       attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
13237 +       attr->rng_acc_num = rsp_params->rng_acc_num;
13238 +       attr->md_acc_num = rsp_params->md_acc_num;
13239 +       attr->arc4_acc_num = rsp_params->arc4_acc_num;
13240 +       attr->des_acc_num = rsp_params->des_acc_num;
13241 +       attr->aes_acc_num = rsp_params->aes_acc_num;
13242 +       attr->ccha_acc_num = rsp_params->ccha_acc_num;
13243 +       attr->ptha_acc_num = rsp_params->ptha_acc_num;
13244 +
13245 +       return 0;
13246 +}
13247 +
13248 +/**
13249 + * dpseci_get_sec_counters() - Retrieve SEC accelerator counters
13250 + * @mc_io:     Pointer to MC portal's I/O object
13251 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13252 + * @token:     Token of DPSECI object
13253 + * @counters:  Returned SEC counters
13254 + *
13255 + * Return:     '0' on success, error code otherwise
13256 + */
13257 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13258 +                           struct dpseci_sec_counters *counters)
13259 +{
13260 +       struct fsl_mc_command cmd = { 0 };
13261 +       struct dpseci_rsp_get_sec_counters *rsp_params;
13262 +       int err;
13263 +
13264 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
13265 +                                         cmd_flags,
13266 +                                         token);
13267 +       err = mc_send_command(mc_io, &cmd);
13268 +       if (err)
13269 +               return err;
13270 +
13271 +       rsp_params = (struct dpseci_rsp_get_sec_counters *)cmd.params;
13272 +       counters->dequeued_requests =
13273 +               le64_to_cpu(rsp_params->dequeued_requests);
13274 +       counters->ob_enc_requests = le64_to_cpu(rsp_params->ob_enc_requests);
13275 +       counters->ib_dec_requests = le64_to_cpu(rsp_params->ib_dec_requests);
13276 +       counters->ob_enc_bytes = le64_to_cpu(rsp_params->ob_enc_bytes);
13277 +       counters->ob_prot_bytes = le64_to_cpu(rsp_params->ob_prot_bytes);
13278 +       counters->ib_dec_bytes = le64_to_cpu(rsp_params->ib_dec_bytes);
13279 +       counters->ib_valid_bytes = le64_to_cpu(rsp_params->ib_valid_bytes);
13280 +
13281 +       return 0;
13282 +}
13283 +
13284 +/**
13285 + * dpseci_get_api_version() - Get Data Path SEC Interface API version
13286 + * @mc_io:     Pointer to MC portal's I/O object
13287 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13288 + * @major_ver: Major version of data path sec API
13289 + * @minor_ver: Minor version of data path sec API
13290 + *
13291 + * Return:     '0' on success, error code otherwise
13292 + */
13293 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
13294 +                          u16 *major_ver, u16 *minor_ver)
13295 +{
13296 +       struct fsl_mc_command cmd = { 0 };
13297 +       struct dpseci_rsp_get_api_version *rsp_params;
13298 +       int err;
13299 +
13300 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
13301 +                                         cmd_flags, 0);
13302 +       err = mc_send_command(mc_io, &cmd);
13303 +       if (err)
13304 +               return err;
13305 +
13306 +       rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
13307 +       *major_ver = le16_to_cpu(rsp_params->major);
13308 +       *minor_ver = le16_to_cpu(rsp_params->minor);
13309 +
13310 +       return 0;
13311 +}
13312 +
13313 +/**
13314 + * dpseci_set_opr() - Set Order Restoration configuration
13315 + * @mc_io:     Pointer to MC portal's I/O object
13316 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13317 + * @token:     Token of DPSECI object
13318 + * @index:     The queue index
13319 + * @options:   Configuration mode options; can be OPR_OPT_CREATE or
13320 + *             OPR_OPT_RETIRE
13321 + * @cfg:       Configuration options for the OPR
13322 + *
13323 + * Return:     '0' on success, error code otherwise
13324 + */
13325 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
13326 +                  u8 options, struct opr_cfg *cfg)
13327 +{
13328 +       struct fsl_mc_command cmd = { 0 };
13329 +       struct dpseci_cmd_opr *cmd_params;
13330 +
13331 +       cmd.header = mc_encode_cmd_header(
13332 +                       DPSECI_CMDID_SET_OPR,
13333 +                       cmd_flags,
13334 +                       token);
13335 +       cmd_params = (struct dpseci_cmd_opr *)cmd.params;
13336 +       cmd_params->index = index;
13337 +       cmd_params->options = options;
13338 +       cmd_params->oloe = cfg->oloe;
13339 +       cmd_params->oeane = cfg->oeane;
13340 +       cmd_params->olws = cfg->olws;
13341 +       cmd_params->oa = cfg->oa;
13342 +       cmd_params->oprrws = cfg->oprrws;
13343 +
13344 +       return mc_send_command(mc_io, &cmd);
13345 +}
13346 +
13347 +/**
13348 + * dpseci_get_opr() - Retrieve Order Restoration config and query
13349 + * @mc_io:     Pointer to MC portal's I/O object
13350 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13351 + * @token:     Token of DPSECI object
13352 + * @index:     The queue index
13353 + * @cfg:       Returned OPR configuration
13354 + * @qry:       Returned OPR query
13355 + *
13356 + * Return:     '0' on success, error code otherwise
13357 + */
13358 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
13359 +                  struct opr_cfg *cfg, struct opr_qry *qry)
13360 +{
13361 +       struct fsl_mc_command cmd = { 0 };
13362 +       struct dpseci_cmd_opr *cmd_params;
13363 +       struct dpseci_rsp_get_opr *rsp_params;
13364 +       int err;
13365 +
13366 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR,
13367 +                                         cmd_flags,
13368 +                                         token);
13369 +       cmd_params = (struct dpseci_cmd_opr *)cmd.params;
13370 +       cmd_params->index = index;
13371 +       err = mc_send_command(mc_io, &cmd);
13372 +       if (err)
13373 +               return err;
13374 +
13375 +       rsp_params = (struct dpseci_rsp_get_opr *)cmd.params;
13376 +       qry->rip = dpseci_get_field(rsp_params->flags, OPR_RIP);
13377 +       qry->enable = dpseci_get_field(rsp_params->flags, OPR_ENABLE);
13378 +       cfg->oloe = rsp_params->oloe;
13379 +       cfg->oeane = rsp_params->oeane;
13380 +       cfg->olws = rsp_params->olws;
13381 +       cfg->oa = rsp_params->oa;
13382 +       cfg->oprrws = rsp_params->oprrws;
13383 +       qry->nesn = le16_to_cpu(rsp_params->nesn);
13384 +       qry->ndsn = le16_to_cpu(rsp_params->ndsn);
13385 +       qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
13386 +       qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_TSEQ_NLIS);
13387 +       qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
13388 +       qry->hseq_nlis = dpseci_get_field(rsp_params->hseq_nlis, OPR_HSEQ_NLIS);
13389 +       qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
13390 +       qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
13391 +       qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
13392 +       qry->opr_id = le16_to_cpu(rsp_params->opr_id);
13393 +
13394 +       return 0;
13395 +}
13396 +
13397 +/**
13398 + * dpseci_set_congestion_notification() - Set congestion group
13399 + *     notification configuration
13400 + * @mc_io:     Pointer to MC portal's I/O object
13401 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13402 + * @token:     Token of DPSECI object
13403 + * @cfg:       congestion notification configuration
13404 + *
13405 + * Return:     '0' on success, error code otherwise
13406 + */
13407 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
13408 +       u16 token, const struct dpseci_congestion_notification_cfg *cfg)
13409 +{
13410 +       struct fsl_mc_command cmd = { 0 };
13411 +       struct dpseci_cmd_congestion_notification *cmd_params;
13412 +
13413 +       cmd.header = mc_encode_cmd_header(
13414 +                       DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
13415 +                       cmd_flags,
13416 +                       token);
13417 +       cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
13418 +       cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
13419 +       cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
13420 +       cmd_params->priority = cfg->dest_cfg.priority;
13421 +       dpseci_set_field(cmd_params->options, CGN_DEST_TYPE,
13422 +                        cfg->dest_cfg.dest_type);
13423 +       dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units);
13424 +       cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
13425 +       cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
13426 +       cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
13427 +       cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
13428 +
13429 +       return mc_send_command(mc_io, &cmd);
13430 +}
13431 +
13432 +/**
13433 + * dpseci_get_congestion_notification() - Get congestion group notification
13434 + *     configuration
13435 + * @mc_io:     Pointer to MC portal's I/O object
13436 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13437 + * @token:     Token of DPSECI object
13438 + * @cfg:       congestion notification configuration
13439 + *
13440 + * Return:     '0' on success, error code otherwise
13441 + */
13442 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
13443 +       u16 token, struct dpseci_congestion_notification_cfg *cfg)
13444 +{
13445 +       struct fsl_mc_command cmd = { 0 };
13446 +       struct dpseci_cmd_congestion_notification *rsp_params;
13447 +       int err;
13448 +
13449 +       cmd.header = mc_encode_cmd_header(
13450 +                       DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
13451 +                       cmd_flags,
13452 +                       token);
13453 +       err = mc_send_command(mc_io, &cmd);
13454 +       if (err)
13455 +               return err;
13456 +
13457 +       rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
13458 +       cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
13459 +       cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
13460 +       cfg->dest_cfg.priority = rsp_params->priority;
13461 +       cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options,
13462 +                                                  CGN_DEST_TYPE);
13463 +       cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS);
13464 +       cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
13465 +       cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
13466 +       cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
13467 +       cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
13468 +
13469 +       return 0;
13470 +}
13471 --- /dev/null
13472 +++ b/drivers/crypto/caam/dpseci.h
13473 @@ -0,0 +1,433 @@
13474 +/*
13475 + * Copyright 2013-2016 Freescale Semiconductor Inc.
13476 + * Copyright 2017 NXP
13477 + *
13478 + * Redistribution and use in source and binary forms, with or without
13479 + * modification, are permitted provided that the following conditions are met:
13480 + *     * Redistributions of source code must retain the above copyright
13481 + *      notice, this list of conditions and the following disclaimer.
13482 + *     * Redistributions in binary form must reproduce the above copyright
13483 + *      notice, this list of conditions and the following disclaimer in the
13484 + *      documentation and/or other materials provided with the distribution.
13485 + *     * Neither the names of the above-listed copyright holders nor the
13486 + *      names of any contributors may be used to endorse or promote products
13487 + *      derived from this software without specific prior written permission.
13488 + *
13489 + *
13490 + * ALTERNATIVELY, this software may be distributed under the terms of the
13491 + * GNU General Public License ("GPL") as published by the Free Software
13492 + * Foundation, either version 2 of that License or (at your option) any
13493 + * later version.
13494 + *
13495 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
13496 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
13497 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
13498 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
13499 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
13500 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
13501 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
13502 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
13503 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
13504 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
13505 + * POSSIBILITY OF SUCH DAMAGE.
13506 + */
13507 +#ifndef _DPSECI_H_
13508 +#define _DPSECI_H_
13509 +
13510 +/*
13511 + * Data Path SEC Interface API
13512 + * Contains initialization APIs and runtime control APIs for DPSECI
13513 + */
13514 +
13515 +struct fsl_mc_io;
13516 +struct opr_cfg;
13517 +struct opr_qry;
13518 +
13519 +/**
13520 + * General DPSECI macros
13521 + */
13522 +
13523 +/**
13524 + * Maximum number of Tx/Rx queues per DPSECI object
13525 + */
13526 +#define DPSECI_MAX_QUEUE_NUM           16
13527 +
13528 +/**
13529 + * All queues considered; see dpseci_set_rx_queue()
13530 + */
13531 +#define DPSECI_ALL_QUEUES      (u8)(-1)
13532 +
13533 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
13534 +               u16 *token);
13535 +
13536 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
13537 +
13538 +/**
13539 + * Enable the Congestion Group support
13540 + */
13541 +#define DPSECI_OPT_HAS_CG              0x000020
13542 +
13543 +/**
13544 + * Enable the Order Restoration support
13545 + */
13546 +#define DPSECI_OPT_HAS_OPR             0x000040
13547 +
13548 +/**
13549 + * Order Point Records are shared for the entire DPSECI
13550 + */
13551 +#define DPSECI_OPT_OPR_SHARED          0x000080
13552 +
13553 +/**
13554 + * struct dpseci_cfg - Structure representing DPSECI configuration
13555 + * @options: Any combination of the following options:
13556 + *             DPSECI_OPT_HAS_CG
13557 + *             DPSECI_OPT_HAS_OPR
13558 + *             DPSECI_OPT_OPR_SHARED
13559 + * @num_tx_queues: num of queues towards the SEC
13560 + * @num_rx_queues: num of queues back from the SEC
13561 + * @priorities: Priorities for the SEC hardware processing;
13562 + *             each place in the array is the priority of the tx queue
13563 + *             towards the SEC;
13564 + *             valid priorities are configured with values 1-8;
13565 + */
13566 +struct dpseci_cfg {
13567 +       u32 options;
13568 +       u8 num_tx_queues;
13569 +       u8 num_rx_queues;
13570 +       u8 priorities[DPSECI_MAX_QUEUE_NUM];
13571 +};
13572 +
13573 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
13574 +                 const struct dpseci_cfg *cfg, u32 *obj_id);
13575 +
13576 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
13577 +                  u32 object_id);
13578 +
13579 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
13580 +
13581 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
13582 +
13583 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13584 +                     int *en);
13585 +
13586 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
13587 +
13588 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13589 +                         u8 irq_index, u8 *en);
13590 +
13591 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13592 +                         u8 irq_index, u8 en);
13593 +
13594 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13595 +                       u8 irq_index, u32 *mask);
13596 +
13597 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13598 +                       u8 irq_index, u32 mask);
13599 +
13600 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13601 +                         u8 irq_index, u32 *status);
13602 +
13603 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13604 +                           u8 irq_index, u32 status);
13605 +
13606 +/**
13607 + * struct dpseci_attr - Structure representing DPSECI attributes
13608 + * @id: DPSECI object ID
13609 + * @num_tx_queues: number of queues towards the SEC
13610 + * @num_rx_queues: number of queues back from the SEC
13611 + * @options: any combination of the following options:
13612 + *             DPSECI_OPT_HAS_CG
13613 + *             DPSECI_OPT_HAS_OPR
13614 + *             DPSECI_OPT_OPR_SHARED
13615 + */
13616 +struct dpseci_attr {
13617 +       int id;
13618 +       u8 num_tx_queues;
13619 +       u8 num_rx_queues;
13620 +       u32 options;
13621 +};
13622 +
13623 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13624 +                         struct dpseci_attr *attr);
13625 +
13626 +/**
13627 + * enum dpseci_dest - DPSECI destination types
13628 + * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
13629 + *     and does not generate FQDAN notifications; user is expected to dequeue
13630 + *     from the queue based on polling or other user-defined method
13631 + * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
13632 + *     notifications to the specified DPIO; user is expected to dequeue from
13633 + *     the queue only after notification is received
13634 + * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
13635 + *     FQDAN notifications, but is connected to the specified DPCON object;
13636 + *     user is expected to dequeue from the DPCON channel
13637 + */
13638 +enum dpseci_dest {
13639 +       DPSECI_DEST_NONE = 0,
13640 +       DPSECI_DEST_DPIO,
13641 +       DPSECI_DEST_DPCON
13642 +};
13643 +
13644 +/**
13645 + * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
13646 + * @dest_type: Destination type
13647 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
13648 + * @priority: Priority selection within the DPIO or DPCON channel; valid values
13649 + *     are 0-1 or 0-7, depending on the number of priorities in that channel;
13650 + *     not relevant for 'DPSECI_DEST_NONE' option
13651 + */
13652 +struct dpseci_dest_cfg {
13653 +       enum dpseci_dest dest_type;
13654 +       int dest_id;
13655 +       u8 priority;
13656 +};
13657 +
13658 +/**
13659 + * DPSECI queue modification options
13660 + */
13661 +
13662 +/**
13663 + * Select to modify the user's context associated with the queue
13664 + */
13665 +#define DPSECI_QUEUE_OPT_USER_CTX              0x00000001
13666 +
13667 +/**
13668 + * Select to modify the queue's destination
13669 + */
13670 +#define DPSECI_QUEUE_OPT_DEST                  0x00000002
13671 +
13672 +/**
13673 + * Select to modify the queue's order preservation
13674 + */
13675 +#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION    0x00000004
13676 +
13677 +/**
13678 + * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
13679 + * @options: Flags representing the suggested modifications to the queue;
13680 + *     Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
13681 + * @order_preservation_en: order preservation configuration for the rx queue
13682 + * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
13683 + * @user_ctx: User context value provided in the frame descriptor of each
13684 + *     dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained
13685 + *     in 'options'
13686 + * @dest_cfg: Queue destination parameters; valid only if
13687 + *     'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
13688 + */
13689 +struct dpseci_rx_queue_cfg {
13690 +       u32 options;
13691 +       int order_preservation_en;
13692 +       u64 user_ctx;
13693 +       struct dpseci_dest_cfg dest_cfg;
13694 +};
13695 +
13696 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13697 +                       u8 queue, const struct dpseci_rx_queue_cfg *cfg);
13698 +
13699 +/**
13700 + * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
13701 + * @user_ctx: User context value provided in the frame descriptor of each
13702 + *     dequeued frame
13703 + * @order_preservation_en: Status of the order preservation configuration on the
13704 + *     queue
13705 + * @dest_cfg: Queue destination configuration
13706 + * @fqid: Virtual FQID value to be used for dequeue operations
13707 + */
13708 +struct dpseci_rx_queue_attr {
13709 +       u64 user_ctx;
13710 +       int order_preservation_en;
13711 +       struct dpseci_dest_cfg dest_cfg;
13712 +       u32 fqid;
13713 +};
13714 +
13715 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13716 +                       u8 queue, struct dpseci_rx_queue_attr *attr);
13717 +
13718 +/**
13719 + * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
13720 + * @fqid: Virtual FQID to be used for sending frames to SEC hardware
13721 + * @priority: SEC hardware processing priority for the queue
13722 + */
13723 +struct dpseci_tx_queue_attr {
13724 +       u32 fqid;
13725 +       u8 priority;
13726 +};
13727 +
13728 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13729 +                       u8 queue, struct dpseci_tx_queue_attr *attr);
13730 +
13731 +/**
13732 + * struct dpseci_sec_attr - Structure representing attributes of the SEC
13733 + *     hardware accelerator
13734 + * @ip_id: ID for SEC
13735 + * @major_rev: Major revision number for SEC
13736 + * @minor_rev: Minor revision number for SEC
13737 + * @era: SEC Era
13738 + * @deco_num: The number of copies of the DECO that are implemented in this
13739 + *     version of SEC
13740 + * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this
13741 + *     version of SEC
13742 + * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this
13743 + *     version of SEC
13744 + * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
13745 + *     implemented in this version of SEC
13746 + * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
13747 + *     implemented in this version of SEC
13748 + * @crc_acc_num: The number of copies of the CRC module that are implemented in
13749 + *     this version of SEC
13750 + * @pk_acc_num:  The number of copies of the Public Key module that are
13751 + *     implemented in this version of SEC
13752 + * @kasumi_acc_num: The number of copies of the Kasumi module that are
13753 + *     implemented in this version of SEC
13754 + * @rng_acc_num: The number of copies of the Random Number Generator that are
13755 + *     implemented in this version of SEC
13756 + * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
13757 + *     implemented in this version of SEC
13758 + * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
13759 + *     in this version of SEC
13760 + * @des_acc_num: The number of copies of the DES module that are implemented in
13761 + *     this version of SEC
13762 + * @aes_acc_num: The number of copies of the AES module that are implemented in
13763 + *     this version of SEC
13764 + * @ccha_acc_num: The number of copies of the ChaCha20 module that are
13765 + *     implemented in this version of SEC.
13766 + * @ptha_acc_num: The number of copies of the Poly1305 module that are
13767 + *     implemented in this version of SEC.
13768 + **/
13769 +struct dpseci_sec_attr {
13770 +       u16 ip_id;
13771 +       u8 major_rev;
13772 +       u8 minor_rev;
13773 +       u8 era;
13774 +       u8 deco_num;
13775 +       u8 zuc_auth_acc_num;
13776 +       u8 zuc_enc_acc_num;
13777 +       u8 snow_f8_acc_num;
13778 +       u8 snow_f9_acc_num;
13779 +       u8 crc_acc_num;
13780 +       u8 pk_acc_num;
13781 +       u8 kasumi_acc_num;
13782 +       u8 rng_acc_num;
13783 +       u8 md_acc_num;
13784 +       u8 arc4_acc_num;
13785 +       u8 des_acc_num;
13786 +       u8 aes_acc_num;
13787 +       u8 ccha_acc_num;
13788 +       u8 ptha_acc_num;
13789 +};
13790 +
13791 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13792 +                       struct dpseci_sec_attr *attr);
13793 +
13794 +/**
13795 + * struct dpseci_sec_counters - Structure representing global SEC counters and
13796 + *                             not per dpseci counters
13797 + * @dequeued_requests: Number of Requests Dequeued
13798 + * @ob_enc_requests:   Number of Outbound Encrypt Requests
13799 + * @ib_dec_requests:   Number of Inbound Decrypt Requests
13800 + * @ob_enc_bytes:      Number of Outbound Bytes Encrypted
13801 + * @ob_prot_bytes:     Number of Outbound Bytes Protected
13802 + * @ib_dec_bytes:      Number of Inbound Bytes Decrypted
13803 + * @ib_valid_bytes:    Number of Inbound Bytes Validated
13804 + */
13805 +struct dpseci_sec_counters {
13806 +       u64 dequeued_requests;
13807 +       u64 ob_enc_requests;
13808 +       u64 ib_dec_requests;
13809 +       u64 ob_enc_bytes;
13810 +       u64 ob_prot_bytes;
13811 +       u64 ib_dec_bytes;
13812 +       u64 ib_valid_bytes;
13813 +};
13814 +
13815 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13816 +                           struct dpseci_sec_counters *counters);
13817 +
13818 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
13819 +                          u16 *major_ver, u16 *minor_ver);
13820 +
13821 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
13822 +                  u8 options, struct opr_cfg *cfg);
13823 +
13824 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
13825 +                  struct opr_cfg *cfg, struct opr_qry *qry);
13826 +
13827 +/**
13828 + * enum dpseci_congestion_unit - DPSECI congestion units
13829 + * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
13830 + * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
13831 + */
13832 +enum dpseci_congestion_unit {
13833 +       DPSECI_CONGESTION_UNIT_BYTES = 0,
13834 +       DPSECI_CONGESTION_UNIT_FRAMES
13835 +};
13836 +
13837 +/**
13838 + * CSCN message is written to message_iova once entering a
13839 + * congestion state (see 'threshold_entry')
13840 + */
13841 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER             0x00000001
13842 +
13843 +/**
13844 + * CSCN message is written to message_iova once exiting a
13845 + * congestion state (see 'threshold_exit')
13846 + */
13847 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT              0x00000002
13848 +
13849 +/**
13850 + * CSCN write will attempt to allocate into a cache (coherent write);
13851 + * valid only if 'DPSECI_CGN_MODE_WRITE_MEM_<X>' is selected
13852 + */
13853 +#define DPSECI_CGN_MODE_COHERENT_WRITE                 0x00000004
13854 +
13855 +/**
13856 + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
13857 + * DPIO/DPCON's WQ channel once entering a congestion state
13858 + * (see 'threshold_entry')
13859 + */
13860 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER           0x00000008
13861 +
13862 +/**
13863 + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
13864 + * DPIO/DPCON's WQ channel once exiting a congestion state
13865 + * (see 'threshold_exit')
13866 + */
13867 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT            0x00000010
13868 +
13869 +/**
13870 + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' when the CSCN is written
13871 + * to the sw-portal's DQRR, the DQRI interrupt is asserted immediately
13872 + * (if enabled)
13873 + */
13874 +#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED       0x00000020
13875 +
13876 +/**
13877 + * struct dpseci_congestion_notification_cfg - congestion notification
13878 + *     configuration
13879 + * @units: units type
13880 + * @threshold_entry: above this threshold we enter a congestion state.
13881 + *     set it to '0' to disable it
13882 + * @threshold_exit: below this threshold we exit the congestion state.
13883 + * @message_ctx: The context that will be part of the CSCN message
13884 + * @message_iova: I/O virtual address (must be in DMA-able memory),
13885 + *     must be 16B aligned;
13886 + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
13887 + * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
13888 + *     values
13889 + */
13890 +struct dpseci_congestion_notification_cfg {
13891 +       enum dpseci_congestion_unit units;
13892 +       u32 threshold_entry;
13893 +       u32 threshold_exit;
13894 +       u64 message_ctx;
13895 +       u64 message_iova;
13896 +       struct dpseci_dest_cfg dest_cfg;
13897 +       u16 notification_mode;
13898 +};
13899 +
13900 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
13901 +       u16 token, const struct dpseci_congestion_notification_cfg *cfg);
13902 +
13903 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
13904 +       u16 token, struct dpseci_congestion_notification_cfg *cfg);
13905 +
13906 +#endif /* _DPSECI_H_ */
13907 --- /dev/null
13908 +++ b/drivers/crypto/caam/dpseci_cmd.h
13909 @@ -0,0 +1,287 @@
13910 +/*
13911 + * Copyright 2013-2016 Freescale Semiconductor Inc.
13912 + * Copyright 2017 NXP
13913 + *
13914 + * Redistribution and use in source and binary forms, with or without
13915 + * modification, are permitted provided that the following conditions are met:
13916 + *     * Redistributions of source code must retain the above copyright
13917 + *      notice, this list of conditions and the following disclaimer.
13918 + *     * Redistributions in binary form must reproduce the above copyright
13919 + *      notice, this list of conditions and the following disclaimer in the
13920 + *      documentation and/or other materials provided with the distribution.
13921 + *     * Neither the names of the above-listed copyright holders nor the
13922 + *      names of any contributors may be used to endorse or promote products
13923 + *      derived from this software without specific prior written permission.
13924 + *
13925 + *
13926 + * ALTERNATIVELY, this software may be distributed under the terms of the
13927 + * GNU General Public License ("GPL") as published by the Free Software
13928 + * Foundation, either version 2 of that License or (at your option) any
13929 + * later version.
13930 + *
13931 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
13932 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
13933 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
13934 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
13935 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
13936 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
13937 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
13938 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
13939 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
13940 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
13941 + * POSSIBILITY OF SUCH DAMAGE.
13942 + */
13943 +
13944 +#ifndef _DPSECI_CMD_H_
13945 +#define _DPSECI_CMD_H_
13946 +
13947 +/* DPSECI Version */
13948 +#define DPSECI_VER_MAJOR                               5
13949 +#define DPSECI_VER_MINOR                               3
13950 +
13951 +#define DPSECI_VER(maj, min)   (((maj) << 16) | (min))
13952 +#define DPSECI_VERSION         DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR)
13953 +
13954 +/* Command versioning */
13955 +#define DPSECI_CMD_BASE_VERSION                1
13956 +#define DPSECI_CMD_BASE_VERSION_V2     2
13957 +#define DPSECI_CMD_BASE_VERSION_V3     3
13958 +#define DPSECI_CMD_ID_OFFSET           4
13959 +
13960 +#define DPSECI_CMD_V1(id)      (((id) << DPSECI_CMD_ID_OFFSET) | \
13961 +                                DPSECI_CMD_BASE_VERSION)
13962 +
13963 +#define DPSECI_CMD_V2(id)      (((id) << DPSECI_CMD_ID_OFFSET) | \
13964 +                                DPSECI_CMD_BASE_VERSION_V2)
13965 +
13966 +#define DPSECI_CMD_V3(id)      (((id) << DPSECI_CMD_ID_OFFSET) | \
13967 +                                DPSECI_CMD_BASE_VERSION_V3)
13968 +
13969 +/* Command IDs */
13970 +#define DPSECI_CMDID_CLOSE                             DPSECI_CMD_V1(0x800)
13971 +#define DPSECI_CMDID_OPEN                              DPSECI_CMD_V1(0x809)
13972 +#define DPSECI_CMDID_CREATE                            DPSECI_CMD_V3(0x909)
13973 +#define DPSECI_CMDID_DESTROY                           DPSECI_CMD_V1(0x989)
13974 +#define DPSECI_CMDID_GET_API_VERSION                   DPSECI_CMD_V1(0xa09)
13975 +
13976 +#define DPSECI_CMDID_ENABLE                            DPSECI_CMD_V1(0x002)
13977 +#define DPSECI_CMDID_DISABLE                           DPSECI_CMD_V1(0x003)
13978 +#define DPSECI_CMDID_GET_ATTR                          DPSECI_CMD_V1(0x004)
13979 +#define DPSECI_CMDID_RESET                             DPSECI_CMD_V1(0x005)
13980 +#define DPSECI_CMDID_IS_ENABLED                                DPSECI_CMD_V1(0x006)
13981 +
13982 +#define DPSECI_CMDID_SET_IRQ_ENABLE                    DPSECI_CMD_V1(0x012)
13983 +#define DPSECI_CMDID_GET_IRQ_ENABLE                    DPSECI_CMD_V1(0x013)
13984 +#define DPSECI_CMDID_SET_IRQ_MASK                      DPSECI_CMD_V1(0x014)
13985 +#define DPSECI_CMDID_GET_IRQ_MASK                      DPSECI_CMD_V1(0x015)
13986 +#define DPSECI_CMDID_GET_IRQ_STATUS                    DPSECI_CMD_V1(0x016)
13987 +#define DPSECI_CMDID_CLEAR_IRQ_STATUS                  DPSECI_CMD_V1(0x017)
13988 +
13989 +#define DPSECI_CMDID_SET_RX_QUEUE                      DPSECI_CMD_V1(0x194)
13990 +#define DPSECI_CMDID_GET_RX_QUEUE                      DPSECI_CMD_V1(0x196)
13991 +#define DPSECI_CMDID_GET_TX_QUEUE                      DPSECI_CMD_V1(0x197)
13992 +#define DPSECI_CMDID_GET_SEC_ATTR                      DPSECI_CMD_V2(0x198)
13993 +#define DPSECI_CMDID_GET_SEC_COUNTERS                  DPSECI_CMD_V1(0x199)
13994 +#define DPSECI_CMDID_SET_OPR                           DPSECI_CMD_V1(0x19A)
13995 +#define DPSECI_CMDID_GET_OPR                           DPSECI_CMD_V1(0x19B)
13996 +#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION       DPSECI_CMD_V1(0x170)
13997 +#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION       DPSECI_CMD_V1(0x171)
13998 +
13999 +/* Macros for accessing command fields smaller than 1 byte */
14000 +#define DPSECI_MASK(field)     \
14001 +       GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1,     \
14002 +               DPSECI_##field##_SHIFT)
14003 +
14004 +#define dpseci_set_field(var, field, val)      \
14005 +       ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
14006 +
14007 +#define dpseci_get_field(var, field)   \
14008 +       (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
14009 +
14010 +struct dpseci_cmd_open {
14011 +       __le32 dpseci_id;
14012 +};
14013 +
14014 +struct dpseci_cmd_create {
14015 +       u8 priorities[8];
14016 +       u8 num_tx_queues;
14017 +       u8 num_rx_queues;
14018 +       u8 pad0[6];
14019 +       __le32 options;
14020 +       __le32 pad1;
14021 +       u8 priorities2[8];
14022 +};
14023 +
14024 +struct dpseci_cmd_destroy {
14025 +       __le32 object_id;
14026 +};
14027 +
14028 +#define DPSECI_ENABLE_SHIFT    0
14029 +#define DPSECI_ENABLE_SIZE     1
14030 +
14031 +struct dpseci_rsp_is_enabled {
14032 +       u8 is_enabled;
14033 +};
14034 +
14035 +struct dpseci_cmd_irq_enable {
14036 +       u8 enable_state;
14037 +       u8 pad[3];
14038 +       u8 irq_index;
14039 +};
14040 +
14041 +struct dpseci_rsp_get_irq_enable {
14042 +       u8 enable_state;
14043 +};
14044 +
14045 +struct dpseci_cmd_irq_mask {
14046 +       __le32 mask;
14047 +       u8 irq_index;
14048 +};
14049 +
14050 +struct dpseci_cmd_irq_status {
14051 +       __le32 status;
14052 +       u8 irq_index;
14053 +};
14054 +
14055 +struct dpseci_rsp_get_attributes {
14056 +       __le32 id;
14057 +       __le32 pad0;
14058 +       u8 num_tx_queues;
14059 +       u8 num_rx_queues;
14060 +       u8 pad1[6];
14061 +       __le32 options;
14062 +};
14063 +
14064 +#define DPSECI_DEST_TYPE_SHIFT 0
14065 +#define DPSECI_DEST_TYPE_SIZE  4
14066 +
14067 +#define DPSECI_ORDER_PRESERVATION_SHIFT        0
14068 +#define DPSECI_ORDER_PRESERVATION_SIZE 1
14069 +
14070 +struct dpseci_cmd_queue {
14071 +       __le32 dest_id;
14072 +       u8 priority;
14073 +       u8 queue;
14074 +       u8 dest_type;
14075 +       u8 pad;
14076 +       __le64 user_ctx;
14077 +       union {
14078 +               __le32 options;
14079 +               __le32 fqid;
14080 +       };
14081 +       u8 order_preservation_en;
14082 +};
14083 +
14084 +struct dpseci_rsp_get_tx_queue {
14085 +       __le32 pad;
14086 +       __le32 fqid;
14087 +       u8 priority;
14088 +};
14089 +
14090 +struct dpseci_rsp_get_sec_attr {
14091 +       __le16 ip_id;
14092 +       u8 major_rev;
14093 +       u8 minor_rev;
14094 +       u8 era;
14095 +       u8 pad0[3];
14096 +       u8 deco_num;
14097 +       u8 zuc_auth_acc_num;
14098 +       u8 zuc_enc_acc_num;
14099 +       u8 pad1;
14100 +       u8 snow_f8_acc_num;
14101 +       u8 snow_f9_acc_num;
14102 +       u8 crc_acc_num;
14103 +       u8 pad2;
14104 +       u8 pk_acc_num;
14105 +       u8 kasumi_acc_num;
14106 +       u8 rng_acc_num;
14107 +       u8 pad3;
14108 +       u8 md_acc_num;
14109 +       u8 arc4_acc_num;
14110 +       u8 des_acc_num;
14111 +       u8 aes_acc_num;
14112 +       u8 ccha_acc_num;
14113 +       u8 ptha_acc_num;
14114 +};
14115 +
14116 +struct dpseci_rsp_get_sec_counters {
14117 +       __le64 dequeued_requests;
14118 +       __le64 ob_enc_requests;
14119 +       __le64 ib_dec_requests;
14120 +       __le64 ob_enc_bytes;
14121 +       __le64 ob_prot_bytes;
14122 +       __le64 ib_dec_bytes;
14123 +       __le64 ib_valid_bytes;
14124 +};
14125 +
14126 +struct dpseci_rsp_get_api_version {
14127 +       __le16 major;
14128 +       __le16 minor;
14129 +};
14130 +
14131 +struct dpseci_cmd_opr {
14132 +       __le16 pad;
14133 +       u8 index;
14134 +       u8 options;
14135 +       u8 pad1[7];
14136 +       u8 oloe;
14137 +       u8 oeane;
14138 +       u8 olws;
14139 +       u8 oa;
14140 +       u8 oprrws;
14141 +};
14142 +
14143 +#define DPSECI_OPR_RIP_SHIFT           0
14144 +#define DPSECI_OPR_RIP_SIZE            1
14145 +#define DPSECI_OPR_ENABLE_SHIFT                1
14146 +#define DPSECI_OPR_ENABLE_SIZE         1
14147 +#define DPSECI_OPR_TSEQ_NLIS_SHIFT     0
14148 +#define DPSECI_OPR_TSEQ_NLIS_SIZE      1
14149 +#define DPSECI_OPR_HSEQ_NLIS_SHIFT     0
14150 +#define DPSECI_OPR_HSEQ_NLIS_SIZE      1
14151 +
14152 +struct dpseci_rsp_get_opr {
14153 +       __le64 pad;
14154 +       u8 flags;
14155 +       u8 pad0[2];
14156 +       u8 oloe;
14157 +       u8 oeane;
14158 +       u8 olws;
14159 +       u8 oa;
14160 +       u8 oprrws;
14161 +       __le16 nesn;
14162 +       __le16 pad1;
14163 +       __le16 ndsn;
14164 +       __le16 pad2;
14165 +       __le16 ea_tseq;
14166 +       u8 tseq_nlis;
14167 +       u8 pad3;
14168 +       __le16 ea_hseq;
14169 +       u8 hseq_nlis;
14170 +       u8 pad4;
14171 +       __le16 ea_hptr;
14172 +       __le16 pad5;
14173 +       __le16 ea_tptr;
14174 +       __le16 pad6;
14175 +       __le16 opr_vid;
14176 +       __le16 pad7;
14177 +       __le16 opr_id;
14178 +};
14179 +
14180 +#define DPSECI_CGN_DEST_TYPE_SHIFT     0
14181 +#define DPSECI_CGN_DEST_TYPE_SIZE      4
14182 +#define DPSECI_CGN_UNITS_SHIFT         4
14183 +#define DPSECI_CGN_UNITS_SIZE          2
14184 +
14185 +struct dpseci_cmd_congestion_notification {
14186 +       __le32 dest_id;
14187 +       __le16 notification_mode;
14188 +       u8 priority;
14189 +       u8 options;
14190 +       __le64 message_iova;
14191 +       __le64 message_ctx;
14192 +       __le32 threshold_entry;
14193 +       __le32 threshold_exit;
14194 +};
14195 +
14196 +#endif /* _DPSECI_CMD_H_ */
14197 --- a/drivers/crypto/caam/error.c
14198 +++ b/drivers/crypto/caam/error.c
14199 @@ -50,6 +50,12 @@ void caam_dump_sg(const char *level, con
14200  #endif /* DEBUG */
14201  EXPORT_SYMBOL(caam_dump_sg);
14202  
14203 +bool caam_little_end;
14204 +EXPORT_SYMBOL(caam_little_end);
14205 +
14206 +bool caam_imx;
14207 +EXPORT_SYMBOL(caam_imx);
14208 +
14209  static const struct {
14210         u8 value;
14211         const char *error_text;
14212 @@ -108,6 +114,54 @@ static const struct {
14213         { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
14214  };
14215  
14216 +static const struct {
14217 +       u8 value;
14218 +       const char *error_text;
14219 +} qi_error_list[] = {
14220 +       { 0x1F, "Job terminated by FQ or ICID flush" },
14221 +       { 0x20, "FD format error"},
14222 +       { 0x21, "FD command format error"},
14223 +       { 0x23, "FL format error"},
14224 +       { 0x25, "CRJD specified in FD, but not enabled in FLC"},
14225 +       { 0x30, "Max. buffer size too small"},
14226 +       { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"},
14227 +       { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"},
14228 +       { 0x33, "Size over/underflow (allocate mode)"},
14229 +       { 0x34, "Size over/underflow (reuse mode)"},
14230 +       { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"},
14231 +       { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"},
14232 +       { 0x41, "SBC frame format not supported (allocate mode)"},
14233 +       { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"},
14234 +       { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"},
14235 +       { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"},
14236 +       { 0x45, "DHR correction underflow (reuse mode, single buffer format)"},
14237 +       { 0x46, "Annotation length exceeds offset (reuse mode)"},
14238 +       { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
14239 +       { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
14240 +       { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
14241 +       { 0x51, "Unsupported IF reuse mode"},
14242 +       { 0x52, "Unsupported FL use mode"},
14243 +       { 0x53, "Unsupported RJD use mode"},
14244 +       { 0x54, "Unsupported inline descriptor use mode"},
14245 +       { 0xC0, "Table buffer pool 0 depletion"},
14246 +       { 0xC1, "Table buffer pool 1 depletion"},
14247 +       { 0xC2, "Data buffer pool 0 depletion, no OF allocated"},
14248 +       { 0xC3, "Data buffer pool 1 depletion, no OF allocated"},
14249 +       { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"},
14250 +       { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"},
14251 +       { 0xD0, "FLC read error"},
14252 +       { 0xD1, "FL read error"},
14253 +       { 0xD2, "FL write error"},
14254 +       { 0xD3, "OF SGT write error"},
14255 +       { 0xD4, "PTA read error"},
14256 +       { 0xD5, "PTA write error"},
14257 +       { 0xD6, "OF SGT F-bit write error"},
14258 +       { 0xD7, "ASA write error"},
14259 +       { 0xE1, "FLC[ICR]=0 ICID error"},
14260 +       { 0xE2, "FLC[ICR]=1 ICID error"},
14261 +       { 0xE4, "source of ICID flush not trusted (BDI = 0)"},
14262 +};
14263 +
14264  static const char * const cha_id_list[] = {
14265         "",
14266         "AES",
14267 @@ -236,6 +290,27 @@ static void report_deco_status(struct de
14268                 status, error, idx_str, idx, err_str, err_err_code);
14269  }
14270  
14271 +static void report_qi_status(struct device *qidev, const u32 status,
14272 +                            const char *error)
14273 +{
14274 +       u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
14275 +       const char *err_str = "unidentified error value 0x";
14276 +       char err_err_code[3] = { 0 };
14277 +       int i;
14278 +
14279 +       for (i = 0; i < ARRAY_SIZE(qi_error_list); i++)
14280 +               if (qi_error_list[i].value == err_id)
14281 +                       break;
14282 +
14283 +       if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text)
14284 +               err_str = qi_error_list[i].error_text;
14285 +       else
14286 +               snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
14287 +
14288 +       dev_err(qidev, "%08x: %s: %s%s\n",
14289 +               status, error, err_str, err_err_code);
14290 +}
14291 +
14292  static void report_jr_status(struct device *jrdev, const u32 status,
14293                              const char *error)
14294  {
14295 @@ -250,7 +325,7 @@ static void report_cond_code_status(stru
14296                 status, error, __func__);
14297  }
14298  
14299 -void caam_jr_strstatus(struct device *jrdev, u32 status)
14300 +void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
14301  {
14302         static const struct stat_src {
14303                 void (*report_ssed)(struct device *jrdev, const u32 status,
14304 @@ -262,7 +337,7 @@ void caam_jr_strstatus(struct device *jr
14305                 { report_ccb_status, "CCB" },
14306                 { report_jump_status, "Jump" },
14307                 { report_deco_status, "DECO" },
14308 -               { NULL, "Queue Manager Interface" },
14309 +               { report_qi_status, "Queue Manager Interface" },
14310                 { report_jr_status, "Job Ring" },
14311                 { report_cond_code_status, "Condition Code" },
14312                 { NULL, NULL },
14313 @@ -288,4 +363,4 @@ void caam_jr_strstatus(struct device *jr
14314         else
14315                 dev_err(jrdev, "%d: unknown error source\n", ssrc);
14316  }
14317 -EXPORT_SYMBOL(caam_jr_strstatus);
14318 +EXPORT_SYMBOL(caam_strstatus);
14319 --- a/drivers/crypto/caam/error.h
14320 +++ b/drivers/crypto/caam/error.h
14321 @@ -8,7 +8,11 @@
14322  #ifndef CAAM_ERROR_H
14323  #define CAAM_ERROR_H
14324  #define CAAM_ERROR_STR_MAX 302
14325 -void caam_jr_strstatus(struct device *jrdev, u32 status);
14326 +
14327 +void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
14328 +
14329 +#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
14330 +#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
14331  
14332  void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
14333                   int rowsize, int groupsize, struct scatterlist *sg,
14334 --- a/drivers/crypto/caam/intern.h
14335 +++ b/drivers/crypto/caam/intern.h
14336 @@ -65,10 +65,6 @@ struct caam_drv_private_jr {
14337   * Driver-private storage for a single CAAM block instance
14338   */
14339  struct caam_drv_private {
14340 -#ifdef CONFIG_CAAM_QI
14341 -       struct device *qidev;
14342 -#endif
14343 -
14344         /* Physical-presence section */
14345         struct caam_ctrl __iomem *ctrl; /* controller region */
14346         struct caam_deco __iomem *deco; /* DECO/CCB views */
14347 @@ -76,14 +72,21 @@ struct caam_drv_private {
14348         struct caam_queue_if __iomem *qi; /* QI control region */
14349         struct caam_job_ring __iomem *jr[4];    /* JobR's register space */
14350  
14351 +       struct iommu_domain *domain;
14352 +
14353         /*
14354          * Detected geometry block. Filled in from device tree if powerpc,
14355          * or from register-based version detection code
14356          */
14357         u8 total_jobrs;         /* Total Job Rings in device */
14358         u8 qi_present;          /* Nonzero if QI present in device */
14359 +#ifdef CONFIG_CAAM_QI
14360 +       u8 qi_init;             /* Nonzero if QI has been initialized */
14361 +#endif
14362 +       u8 mc_en;               /* Nonzero if MC f/w is active */
14363         int secvio_irq;         /* Security violation interrupt number */
14364         int virt_en;            /* Virtualization enabled in CAAM */
14365 +       int era;                /* CAAM Era (internal HW revision) */
14366  
14367  #define        RNG4_MAX_HANDLES 2
14368         /* RNG4 block */
14369 @@ -108,8 +111,95 @@ struct caam_drv_private {
14370  #endif
14371  };
14372  
14373 -void caam_jr_algapi_init(struct device *dev);
14374 -void caam_jr_algapi_remove(struct device *dev);
14375 +#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API
14376 +
14377 +int caam_algapi_init(struct device *dev);
14378 +void caam_algapi_exit(void);
14379 +
14380 +#else
14381 +
14382 +static inline int caam_algapi_init(struct device *dev)
14383 +{
14384 +       return 0;
14385 +}
14386 +
14387 +static inline void caam_algapi_exit(void)
14388 +{
14389 +}
14390 +
14391 +#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API */
14392 +
14393 +#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API
14394 +
14395 +int caam_algapi_hash_init(struct device *dev);
14396 +void caam_algapi_hash_exit(void);
14397 +
14398 +#else
14399 +
14400 +static inline int caam_algapi_hash_init(struct device *dev)
14401 +{
14402 +       return 0;
14403 +}
14404 +
14405 +static inline void caam_algapi_hash_exit(void)
14406 +{
14407 +}
14408 +
14409 +#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API */
14410 +
14411 +#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API
14412 +
14413 +int caam_pkc_init(struct device *dev);
14414 +void caam_pkc_exit(void);
14415 +
14416 +#else
14417 +
14418 +static inline int caam_pkc_init(struct device *dev)
14419 +{
14420 +       return 0;
14421 +}
14422 +
14423 +static inline void caam_pkc_exit(void)
14424 +{
14425 +}
14426 +
14427 +#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API */
14428 +
14429 +#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API
14430 +
14431 +int caam_rng_init(struct device *dev);
14432 +void caam_rng_exit(void);
14433 +
14434 +#else
14435 +
14436 +static inline int caam_rng_init(struct device *dev)
14437 +{
14438 +       return 0;
14439 +}
14440 +
14441 +static inline void caam_rng_exit(void)
14442 +{
14443 +}
14444 +
14445 +#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API */
14446 +
14447 +#ifdef CONFIG_CAAM_QI
14448 +
14449 +int caam_qi_algapi_init(struct device *dev);
14450 +void caam_qi_algapi_exit(void);
14451 +
14452 +#else
14453 +
14454 +static inline int caam_qi_algapi_init(struct device *dev)
14455 +{
14456 +       return 0;
14457 +}
14458 +
14459 +static inline void caam_qi_algapi_exit(void)
14460 +{
14461 +}
14462 +
14463 +#endif /* CONFIG_CAAM_QI */
14464  
14465  #ifdef CONFIG_DEBUG_FS
14466  static int caam_debugfs_u64_get(void *data, u64 *val)
14467 --- a/drivers/crypto/caam/jr.c
14468 +++ b/drivers/crypto/caam/jr.c
14469 @@ -23,6 +23,52 @@ struct jr_driver_data {
14470  
14471  static struct jr_driver_data driver_data;
14472  
14473 +static int jr_driver_probed;
14474 +
14475 +int caam_jr_driver_probed(void)
14476 +{
14477 +       return jr_driver_probed;
14478 +}
14479 +EXPORT_SYMBOL(caam_jr_driver_probed);
14480 +
14481 +static DEFINE_MUTEX(algs_lock);
14482 +static unsigned int active_devs;
14483 +
14484 +static void register_algs(struct device *dev)
14485 +{
14486 +       mutex_lock(&algs_lock);
14487 +
14488 +       if (++active_devs != 1)
14489 +               goto algs_unlock;
14490 +
14491 +       caam_algapi_init(dev);
14492 +       caam_algapi_hash_init(dev);
14493 +       caam_pkc_init(dev);
14494 +       caam_rng_init(dev);
14495 +       caam_qi_algapi_init(dev);
14496 +
14497 +algs_unlock:
14498 +       mutex_unlock(&algs_lock);
14499 +}
14500 +
14501 +static void unregister_algs(void)
14502 +{
14503 +       mutex_lock(&algs_lock);
14504 +
14505 +       if (--active_devs != 0)
14506 +               goto algs_unlock;
14507 +
14508 +       caam_qi_algapi_exit();
14509 +
14510 +       caam_rng_exit();
14511 +       caam_pkc_exit();
14512 +       caam_algapi_hash_exit();
14513 +       caam_algapi_exit();
14514 +
14515 +algs_unlock:
14516 +       mutex_unlock(&algs_lock);
14517 +}
14518 +
14519  static int caam_reset_hw_jr(struct device *dev)
14520  {
14521         struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
14522 @@ -108,6 +154,9 @@ static int caam_jr_remove(struct platfor
14523                 return -EBUSY;
14524         }
14525  
14526 +       /* Unregister JR-based RNG & crypto algorithms */
14527 +       unregister_algs();
14528 +
14529         /* Remove the node from Physical JobR list maintained by driver */
14530         spin_lock(&driver_data.jr_alloc_lock);
14531         list_del(&jrpriv->list_node);
14532 @@ -119,6 +168,8 @@ static int caam_jr_remove(struct platfor
14533                 dev_err(jrdev, "Failed to shut down job ring\n");
14534         irq_dispose_mapping(jrpriv->irq);
14535  
14536 +       jr_driver_probed--;
14537 +
14538         return ret;
14539  }
14540  
14541 @@ -282,6 +333,36 @@ struct device *caam_jr_alloc(void)
14542  EXPORT_SYMBOL(caam_jr_alloc);
14543  
14544  /**
14545 + * caam_jridx_alloc() - Alloc a specific job ring based on its index.
14546 + *
14547 + * returns :  pointer to the newly allocated physical
14548 + *           JobR dev can be written to if successful.
14549 + **/
14550 +struct device *caam_jridx_alloc(int idx)
14551 +{
14552 +       struct caam_drv_private_jr *jrpriv;
14553 +       struct device *dev = ERR_PTR(-ENODEV);
14554 +
14555 +       spin_lock(&driver_data.jr_alloc_lock);
14556 +
14557 +       if (list_empty(&driver_data.jr_list))
14558 +               goto end;
14559 +
14560 +       list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
14561 +               if (jrpriv->ridx == idx) {
14562 +                       atomic_inc(&jrpriv->tfm_count);
14563 +                       dev = jrpriv->dev;
14564 +                       break;
14565 +               }
14566 +       }
14567 +
14568 +end:
14569 +       spin_unlock(&driver_data.jr_alloc_lock);
14570 +       return dev;
14571 +}
14572 +EXPORT_SYMBOL(caam_jridx_alloc);
14573 +
14574 +/**
14575   * caam_jr_free() - Free the Job Ring
14576   * @rdev     - points to the dev that identifies the Job ring to
14577   *             be released.
14578 @@ -539,6 +620,9 @@ static int caam_jr_probe(struct platform
14579  
14580         atomic_set(&jrpriv->tfm_count, 0);
14581  
14582 +       register_algs(jrdev->parent);
14583 +       jr_driver_probed++;
14584 +
14585         return 0;
14586  }
14587  
14588 --- a/drivers/crypto/caam/jr.h
14589 +++ b/drivers/crypto/caam/jr.h
14590 @@ -9,7 +9,9 @@
14591  #define JR_H
14592  
14593  /* Prototypes for backend-level services exposed to APIs */
14594 +int caam_jr_driver_probed(void);
14595  struct device *caam_jr_alloc(void);
14596 +struct device *caam_jridx_alloc(int idx);
14597  void caam_jr_free(struct device *rdev);
14598  int caam_jr_enqueue(struct device *dev, u32 *desc,
14599                     void (*cbk)(struct device *dev, u32 *desc, u32 status,
14600 --- a/drivers/crypto/caam/key_gen.c
14601 +++ b/drivers/crypto/caam/key_gen.c
14602 @@ -11,36 +11,6 @@
14603  #include "desc_constr.h"
14604  #include "key_gen.h"
14605  
14606 -/**
14607 - * split_key_len - Compute MDHA split key length for a given algorithm
14608 - * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
14609 - *        SHA224, SHA384, SHA512.
14610 - *
14611 - * Return: MDHA split key length
14612 - */
14613 -static inline u32 split_key_len(u32 hash)
14614 -{
14615 -       /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
14616 -       static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
14617 -       u32 idx;
14618 -
14619 -       idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
14620 -
14621 -       return (u32)(mdpadlen[idx] * 2);
14622 -}
14623 -
14624 -/**
14625 - * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
14626 - * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
14627 - *        SHA224, SHA384, SHA512.
14628 - *
14629 - * Return: MDHA split key pad length
14630 - */
14631 -static inline u32 split_key_pad_len(u32 hash)
14632 -{
14633 -       return ALIGN(split_key_len(hash), 16);
14634 -}
14635 -
14636  void split_key_done(struct device *dev, u32 *desc, u32 err,
14637                            void *context)
14638  {
14639 --- a/drivers/crypto/caam/key_gen.h
14640 +++ b/drivers/crypto/caam/key_gen.h
14641 @@ -6,6 +6,36 @@
14642   *
14643   */
14644  
14645 +/**
14646 + * split_key_len - Compute MDHA split key length for a given algorithm
14647 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
14648 + *        SHA224, SHA384, SHA512.
14649 + *
14650 + * Return: MDHA split key length
14651 + */
14652 +static inline u32 split_key_len(u32 hash)
14653 +{
14654 +       /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
14655 +       static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
14656 +       u32 idx;
14657 +
14658 +       idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
14659 +
14660 +       return (u32)(mdpadlen[idx] * 2);
14661 +}
14662 +
14663 +/**
14664 + * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
14665 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
14666 + *        SHA224, SHA384, SHA512.
14667 + *
14668 + * Return: MDHA split key pad length
14669 + */
14670 +static inline u32 split_key_pad_len(u32 hash)
14671 +{
14672 +       return ALIGN(split_key_len(hash), 16);
14673 +}
14674 +
14675  struct split_key_result {
14676         struct completion completion;
14677         int err;
14678 --- a/drivers/crypto/caam/qi.c
14679 +++ b/drivers/crypto/caam/qi.c
14680 @@ -9,7 +9,7 @@
14681  
14682  #include <linux/cpumask.h>
14683  #include <linux/kthread.h>
14684 -#include <soc/fsl/qman.h>
14685 +#include <linux/fsl_qman.h>
14686  
14687  #include "regs.h"
14688  #include "qi.h"
14689 @@ -58,11 +58,9 @@ static DEFINE_PER_CPU(int, last_cpu);
14690  /*
14691   * caam_qi_priv - CAAM QI backend private params
14692   * @cgr: QMan congestion group
14693 - * @qi_pdev: platform device for QI backend
14694   */
14695  struct caam_qi_priv {
14696         struct qman_cgr cgr;
14697 -       struct platform_device *qi_pdev;
14698  };
14699  
14700  static struct caam_qi_priv qipriv ____cacheline_aligned;
14701 @@ -102,26 +100,34 @@ static int mod_init_cpu;
14702   */
14703  static struct kmem_cache *qi_cache;
14704  
14705 +static void *caam_iova_to_virt(struct iommu_domain *domain,
14706 +                              dma_addr_t iova_addr)
14707 +{
14708 +       phys_addr_t phys_addr;
14709 +
14710 +       phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
14711 +
14712 +       return phys_to_virt(phys_addr);
14713 +}
14714 +
14715  int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
14716  {
14717         struct qm_fd fd;
14718 -       dma_addr_t addr;
14719         int ret;
14720         int num_retries = 0;
14721  
14722 -       qm_fd_clear_fd(&fd);
14723 -       qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1]));
14724 -
14725 -       addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
14726 +       fd.cmd = 0;
14727 +       fd.format = qm_fd_compound;
14728 +       fd.cong_weight = caam32_to_cpu(req->fd_sgt[1].length);
14729 +       fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
14730                               DMA_BIDIRECTIONAL);
14731 -       if (dma_mapping_error(qidev, addr)) {
14732 +       if (dma_mapping_error(qidev, fd.addr)) {
14733                 dev_err(qidev, "DMA mapping error for QI enqueue request\n");
14734                 return -EIO;
14735         }
14736 -       qm_fd_addr_set64(&fd, addr);
14737  
14738         do {
14739 -               ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
14740 +               ret = qman_enqueue(req->drv_ctx->req_fq, &fd, 0);
14741                 if (likely(!ret))
14742                         return 0;
14743  
14744 @@ -137,20 +143,21 @@ int caam_qi_enqueue(struct device *qidev
14745  EXPORT_SYMBOL(caam_qi_enqueue);
14746  
14747  static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
14748 -                          const union qm_mr_entry *msg)
14749 +                          const struct qm_mr_entry *msg)
14750  {
14751         const struct qm_fd *fd;
14752         struct caam_drv_req *drv_req;
14753         struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
14754 +       struct caam_drv_private *priv = dev_get_drvdata(qidev);
14755  
14756         fd = &msg->ern.fd;
14757  
14758 -       if (qm_fd_get_format(fd) != qm_fd_compound) {
14759 +       if (fd->format != qm_fd_compound) {
14760                 dev_err(qidev, "Non-compound FD from CAAM\n");
14761                 return;
14762         }
14763  
14764 -       drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
14765 +       drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
14766         if (!drv_req) {
14767                 dev_err(qidev,
14768                         "Can't find original request for CAAM response\n");
14769 @@ -180,20 +187,22 @@ static struct qman_fq *create_caam_req_f
14770         req_fq->cb.fqs = NULL;
14771  
14772         ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
14773 -                               QMAN_FQ_FLAG_TO_DCPORTAL, req_fq);
14774 +                               QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_LOCKED,
14775 +                            req_fq);
14776         if (ret) {
14777                 dev_err(qidev, "Failed to create session req FQ\n");
14778                 goto create_req_fq_fail;
14779         }
14780  
14781 -       memset(&opts, 0, sizeof(opts));
14782 -       opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
14783 -                                  QM_INITFQ_WE_CONTEXTB |
14784 -                                  QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
14785 -       opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
14786 -       qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2);
14787 -       opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq));
14788 -       qm_fqd_context_a_set64(&opts.fqd, hwdesc);
14789 +       opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
14790 +                      QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
14791 +                      QM_INITFQ_WE_CGID;
14792 +       opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE;
14793 +       opts.fqd.dest.channel = qm_channel_caam;
14794 +       opts.fqd.dest.wq = 2;
14795 +       opts.fqd.context_b = qman_fq_fqid(rsp_fq);
14796 +       opts.fqd.context_a.hi = upper_32_bits(hwdesc);
14797 +       opts.fqd.context_a.lo = lower_32_bits(hwdesc);
14798         opts.fqd.cgid = qipriv.cgr.cgrid;
14799  
14800         ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
14801 @@ -207,7 +216,7 @@ static struct qman_fq *create_caam_req_f
14802         return req_fq;
14803  
14804  init_req_fq_fail:
14805 -       qman_destroy_fq(req_fq);
14806 +       qman_destroy_fq(req_fq, 0);
14807  create_req_fq_fail:
14808         kfree(req_fq);
14809         return ERR_PTR(ret);
14810 @@ -275,7 +284,7 @@ empty_fq:
14811         if (ret)
14812                 dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
14813  
14814 -       qman_destroy_fq(fq);
14815 +       qman_destroy_fq(fq, 0);
14816         kfree(fq);
14817  
14818         return ret;
14819 @@ -292,7 +301,7 @@ static int empty_caam_fq(struct qman_fq
14820                 if (ret)
14821                         return ret;
14822  
14823 -               if (!qm_mcr_np_get(&np, frm_cnt))
14824 +               if (!np.frm_cnt)
14825                         break;
14826  
14827                 msleep(20);
14828 @@ -495,7 +504,7 @@ EXPORT_SYMBOL(caam_drv_ctx_rel);
14829  int caam_qi_shutdown(struct device *qidev)
14830  {
14831         int i, ret;
14832 -       struct caam_qi_priv *priv = dev_get_drvdata(qidev);
14833 +       struct caam_qi_priv *priv = &qipriv;
14834         const cpumask_t *cpus = qman_affine_cpus();
14835         struct cpumask old_cpumask = current->cpus_allowed;
14836  
14837 @@ -528,7 +537,6 @@ int caam_qi_shutdown(struct device *qide
14838         /* Now that we're done with the CGRs, restore the cpus allowed mask */
14839         set_cpus_allowed_ptr(current, &old_cpumask);
14840  
14841 -       platform_device_unregister(priv->qi_pdev);
14842         return ret;
14843  }
14844  
14845 @@ -572,22 +580,28 @@ static enum qman_cb_dqrr_result caam_rsp
14846         struct caam_drv_req *drv_req;
14847         const struct qm_fd *fd;
14848         struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
14849 -       u32 status;
14850 +       struct caam_drv_private *priv = dev_get_drvdata(qidev);
14851  
14852         if (caam_qi_napi_schedule(p, caam_napi))
14853                 return qman_cb_dqrr_stop;
14854  
14855         fd = &dqrr->fd;
14856 -       status = be32_to_cpu(fd->status);
14857 -       if (unlikely(status))
14858 -               dev_err(qidev, "Error: %#x in CAAM response FD\n", status);
14859 +       if (unlikely(fd->status)) {
14860 +               u32 ssrc = fd->status & JRSTA_SSRC_MASK;
14861 +               u8 err_id = fd->status & JRSTA_CCBERR_ERRID_MASK;
14862  
14863 -       if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
14864 +               if (ssrc != JRSTA_SSRC_CCB_ERROR ||
14865 +                   err_id != JRSTA_CCBERR_ERRID_ICVCHK)
14866 +                       dev_err(qidev, "Error: %#x in CAAM response FD\n",
14867 +                               fd->status);
14868 +       }
14869 +
14870 +       if (unlikely(fd->format != qm_fd_compound)) {
14871                 dev_err(qidev, "Non-compound FD from CAAM\n");
14872                 return qman_cb_dqrr_consume;
14873         }
14874  
14875 -       drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
14876 +       drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
14877         if (unlikely(!drv_req)) {
14878                 dev_err(qidev,
14879                         "Can't find original request for caam response\n");
14880 @@ -597,7 +611,7 @@ static enum qman_cb_dqrr_result caam_rsp
14881         dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
14882                          sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
14883  
14884 -       drv_req->cbk(drv_req, status);
14885 +       drv_req->cbk(drv_req, fd->status);
14886         return qman_cb_dqrr_consume;
14887  }
14888  
14889 @@ -621,17 +635,18 @@ static int alloc_rsp_fq_cpu(struct devic
14890                 return -ENODEV;
14891         }
14892  
14893 -       memset(&opts, 0, sizeof(opts));
14894 -       opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
14895 -                                  QM_INITFQ_WE_CONTEXTB |
14896 -                                  QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
14897 -       opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING |
14898 -                                      QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
14899 -       qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3);
14900 +       opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
14901 +               QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
14902 +               QM_INITFQ_WE_CGID;
14903 +       opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING | QM_FQCTRL_CPCSTASH |
14904 +                          QM_FQCTRL_CGE;
14905 +       opts.fqd.dest.channel = qman_affine_channel(cpu);
14906 +       opts.fqd.dest.wq = 3;
14907         opts.fqd.cgid = qipriv.cgr.cgrid;
14908         opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
14909                                                 QM_STASHING_EXCL_DATA;
14910 -       qm_fqd_set_stashing(&opts.fqd, 0, 1, 1);
14911 +       opts.fqd.context_a.stashing.data_cl = 1;
14912 +       opts.fqd.context_a.stashing.context_cl = 1;
14913  
14914         ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
14915         if (ret) {
14916 @@ -650,9 +665,8 @@ static int init_cgr(struct device *qidev
14917  {
14918         int ret;
14919         struct qm_mcc_initcgr opts;
14920 -       const u64 cpus = *(u64 *)qman_affine_cpus();
14921 -       const int num_cpus = hweight64(cpus);
14922 -       const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;
14923 +       const u64 val = (u64)cpumask_weight(qman_affine_cpus()) *
14924 +                       MAX_RSP_FQ_BACKLOG_PER_CPU;
14925  
14926         ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
14927         if (ret) {
14928 @@ -662,8 +676,7 @@ static int init_cgr(struct device *qidev
14929  
14930         qipriv.cgr.cb = cgr_cb;
14931         memset(&opts, 0, sizeof(opts));
14932 -       opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES |
14933 -                                  QM_CGR_WE_MODE);
14934 +       opts.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | QM_CGR_WE_MODE;
14935         opts.cgr.cscn_en = QM_CGR_EN;
14936         opts.cgr.mode = QMAN_CGR_MODE_FRAME;
14937         qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
14938 @@ -708,15 +721,10 @@ static void free_rsp_fqs(void)
14939  int caam_qi_init(struct platform_device *caam_pdev)
14940  {
14941         int err, i;
14942 -       struct platform_device *qi_pdev;
14943         struct device *ctrldev = &caam_pdev->dev, *qidev;
14944         struct caam_drv_private *ctrlpriv;
14945         const cpumask_t *cpus = qman_affine_cpus();
14946         struct cpumask old_cpumask = current->cpus_allowed;
14947 -       static struct platform_device_info qi_pdev_info = {
14948 -               .name = "caam_qi",
14949 -               .id = PLATFORM_DEVID_NONE
14950 -       };
14951  
14952         /*
14953          * QMAN requires CGRs to be removed from same CPU+portal from where it
14954 @@ -728,24 +736,13 @@ int caam_qi_init(struct platform_device
14955         mod_init_cpu = cpumask_first(cpus);
14956         set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
14957  
14958 -       qi_pdev_info.parent = ctrldev;
14959 -       qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
14960 -       qi_pdev = platform_device_register_full(&qi_pdev_info);
14961 -       if (IS_ERR(qi_pdev))
14962 -               return PTR_ERR(qi_pdev);
14963 -       set_dma_ops(&qi_pdev->dev, get_dma_ops(ctrldev));
14964 -
14965         ctrlpriv = dev_get_drvdata(ctrldev);
14966 -       qidev = &qi_pdev->dev;
14967 -
14968 -       qipriv.qi_pdev = qi_pdev;
14969 -       dev_set_drvdata(qidev, &qipriv);
14970 +       qidev = ctrldev;
14971  
14972         /* Initialize the congestion detection */
14973         err = init_cgr(qidev);
14974         if (err) {
14975                 dev_err(qidev, "CGR initialization failed: %d\n", err);
14976 -               platform_device_unregister(qi_pdev);
14977                 return err;
14978         }
14979  
14980 @@ -754,7 +751,6 @@ int caam_qi_init(struct platform_device
14981         if (err) {
14982                 dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
14983                 free_rsp_fqs();
14984 -               platform_device_unregister(qi_pdev);
14985                 return err;
14986         }
14987  
14988 @@ -777,15 +773,11 @@ int caam_qi_init(struct platform_device
14989                 napi_enable(irqtask);
14990         }
14991  
14992 -       /* Hook up QI device to parent controlling caam device */
14993 -       ctrlpriv->qidev = qidev;
14994 -
14995         qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
14996                                      SLAB_CACHE_DMA, NULL);
14997         if (!qi_cache) {
14998                 dev_err(qidev, "Can't allocate CAAM cache\n");
14999                 free_rsp_fqs();
15000 -               platform_device_unregister(qi_pdev);
15001                 return -ENOMEM;
15002         }
15003  
15004 @@ -795,6 +787,8 @@ int caam_qi_init(struct platform_device
15005         debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
15006                             &times_congested, &caam_fops_u64_ro);
15007  #endif
15008 +
15009 +       ctrlpriv->qi_init = 1;
15010         dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
15011         return 0;
15012  }
15013 --- a/drivers/crypto/caam/qi.h
15014 +++ b/drivers/crypto/caam/qi.h
15015 @@ -9,7 +9,7 @@
15016  #ifndef __QI_H__
15017  #define __QI_H__
15018  
15019 -#include <soc/fsl/qman.h>
15020 +#include <linux/fsl_qman.h>
15021  #include "compat.h"
15022  #include "desc.h"
15023  #include "desc_constr.h"
15024 --- a/drivers/crypto/caam/regs.h
15025 +++ b/drivers/crypto/caam/regs.h
15026 @@ -3,6 +3,7 @@
15027   * CAAM hardware register-level view
15028   *
15029   * Copyright 2008-2011 Freescale Semiconductor, Inc.
15030 + * Copyright 2018 NXP
15031   */
15032  
15033  #ifndef REGS_H
15034 @@ -211,6 +212,47 @@ struct jr_outentry {
15035         u32 jrstatus;   /* Status for completed descriptor */
15036  } __packed;
15037  
15038 +/* Version registers (Era 10+) e80-eff */
15039 +struct version_regs {
15040 +       u32 crca;       /* CRCA_VERSION */
15041 +       u32 afha;       /* AFHA_VERSION */
15042 +       u32 kfha;       /* KFHA_VERSION */
15043 +       u32 pkha;       /* PKHA_VERSION */
15044 +       u32 aesa;       /* AESA_VERSION */
15045 +       u32 mdha;       /* MDHA_VERSION */
15046 +       u32 desa;       /* DESA_VERSION */
15047 +       u32 snw8a;      /* SNW8A_VERSION */
15048 +       u32 snw9a;      /* SNW9A_VERSION */
15049 +       u32 zuce;       /* ZUCE_VERSION */
15050 +       u32 zuca;       /* ZUCA_VERSION */
15051 +       u32 ccha;       /* CCHA_VERSION */
15052 +       u32 ptha;       /* PTHA_VERSION */
15053 +       u32 rng;        /* RNG_VERSION */
15054 +       u32 trng;       /* TRNG_VERSION */
15055 +       u32 aaha;       /* AAHA_VERSION */
15056 +       u32 rsvd[10];
15057 +       u32 sr;         /* SR_VERSION */
15058 +       u32 dma;        /* DMA_VERSION */
15059 +       u32 ai;         /* AI_VERSION */
15060 +       u32 qi;         /* QI_VERSION */
15061 +       u32 jr;         /* JR_VERSION */
15062 +       u32 deco;       /* DECO_VERSION */
15063 +};
15064 +
15065 +/* Version registers bitfields */
15066 +
15067 +/* Number of CHAs instantiated */
15068 +#define CHA_VER_NUM_MASK       0xffull
15069 +/* CHA Miscellaneous Information */
15070 +#define CHA_VER_MISC_SHIFT     8
15071 +#define CHA_VER_MISC_MASK      (0xffull << CHA_VER_MISC_SHIFT)
15072 +/* CHA Revision Number */
15073 +#define CHA_VER_REV_SHIFT      16
15074 +#define CHA_VER_REV_MASK       (0xffull << CHA_VER_REV_SHIFT)
15075 +/* CHA Version ID */
15076 +#define CHA_VER_VID_SHIFT      24
15077 +#define CHA_VER_VID_MASK       (0xffull << CHA_VER_VID_SHIFT)
15078 +
15079  /*
15080   * caam_perfmon - Performance Monitor/Secure Memory Status/
15081   *                CAAM Global Status/Component Version IDs
15082 @@ -223,15 +265,13 @@ struct jr_outentry {
15083  #define CHA_NUM_MS_DECONUM_MASK        (0xfull << CHA_NUM_MS_DECONUM_SHIFT)
15084  
15085  /*
15086 - * CHA version IDs / instantiation bitfields
15087 + * CHA version IDs / instantiation bitfields (< Era 10)
15088   * Defined for use with the cha_id fields in perfmon, but the same shift/mask
15089   * selectors can be used to pull out the number of instantiated blocks within
15090   * cha_num fields in perfmon because the locations are the same.
15091   */
15092  #define CHA_ID_LS_AES_SHIFT    0
15093  #define CHA_ID_LS_AES_MASK     (0xfull << CHA_ID_LS_AES_SHIFT)
15094 -#define CHA_ID_LS_AES_LP       (0x3ull << CHA_ID_LS_AES_SHIFT)
15095 -#define CHA_ID_LS_AES_HP       (0x4ull << CHA_ID_LS_AES_SHIFT)
15096  
15097  #define CHA_ID_LS_DES_SHIFT    4
15098  #define CHA_ID_LS_DES_MASK     (0xfull << CHA_ID_LS_DES_SHIFT)
15099 @@ -241,9 +281,6 @@ struct jr_outentry {
15100  
15101  #define CHA_ID_LS_MD_SHIFT     12
15102  #define CHA_ID_LS_MD_MASK      (0xfull << CHA_ID_LS_MD_SHIFT)
15103 -#define CHA_ID_LS_MD_LP256     (0x0ull << CHA_ID_LS_MD_SHIFT)
15104 -#define CHA_ID_LS_MD_LP512     (0x1ull << CHA_ID_LS_MD_SHIFT)
15105 -#define CHA_ID_LS_MD_HP                (0x2ull << CHA_ID_LS_MD_SHIFT)
15106  
15107  #define CHA_ID_LS_RNG_SHIFT    16
15108  #define CHA_ID_LS_RNG_MASK     (0xfull << CHA_ID_LS_RNG_SHIFT)
15109 @@ -269,6 +306,13 @@ struct jr_outentry {
15110  #define CHA_ID_MS_JR_SHIFT     28
15111  #define CHA_ID_MS_JR_MASK      (0xfull << CHA_ID_MS_JR_SHIFT)
15112  
15113 +/* Specific CHA version IDs */
15114 +#define CHA_VER_VID_AES_LP     0x3ull
15115 +#define CHA_VER_VID_AES_HP     0x4ull
15116 +#define CHA_VER_VID_MD_LP256   0x0ull
15117 +#define CHA_VER_VID_MD_LP512   0x1ull
15118 +#define CHA_VER_VID_MD_HP      0x2ull
15119 +
15120  struct sec_vid {
15121         u16 ip_id;
15122         u8 maj_rev;
15123 @@ -473,8 +517,10 @@ struct caam_ctrl {
15124                 struct rng4tst r4tst[2];
15125         };
15126  
15127 -       u32 rsvd9[448];
15128 +       u32 rsvd9[416];
15129  
15130 +       /* Version registers - introduced with era 10           e80-eff */
15131 +       struct version_regs vreg;
15132         /* Performance Monitor                                  f00-fff */
15133         struct caam_perfmon perfmon;
15134  };
15135 @@ -564,8 +610,10 @@ struct caam_job_ring {
15136         u32 rsvd11;
15137         u32 jrcommand;  /* JRCRx - JobR command */
15138  
15139 -       u32 rsvd12[932];
15140 +       u32 rsvd12[900];
15141  
15142 +       /* Version registers - introduced with era 10           e80-eff */
15143 +       struct version_regs vreg;
15144         /* Performance Monitor                                  f00-fff */
15145         struct caam_perfmon perfmon;
15146  };
15147 @@ -627,6 +675,8 @@ struct caam_job_ring {
15148  #define JRSTA_DECOERR_INVSIGN       0x86
15149  #define JRSTA_DECOERR_DSASIGN       0x87
15150  
15151 +#define JRSTA_QIERR_ERROR_MASK      0x00ff
15152 +
15153  #define JRSTA_CCBERR_JUMP           0x08000000
15154  #define JRSTA_CCBERR_INDEX_MASK     0xff00
15155  #define JRSTA_CCBERR_INDEX_SHIFT    8
15156 @@ -870,13 +920,19 @@ struct caam_deco {
15157         u32 rsvd29[48];
15158         u32 descbuf[64];        /* DxDESB - Descriptor buffer */
15159         u32 rscvd30[193];
15160 -#define DESC_DBG_DECO_STAT_HOST_ERR    0x00D00000
15161  #define DESC_DBG_DECO_STAT_VALID       0x80000000
15162  #define DESC_DBG_DECO_STAT_MASK                0x00F00000
15163 +#define DESC_DBG_DECO_STAT_SHIFT       20
15164         u32 desc_dbg;           /* DxDDR - DECO Debug Register */
15165 -       u32 rsvd31[126];
15166 +       u32 rsvd31[13];
15167 +#define DESC_DER_DECO_STAT_MASK                0x000F0000
15168 +#define DESC_DER_DECO_STAT_SHIFT       16
15169 +       u32 dbg_exec;           /* DxDER - DECO Debug Exec Register */
15170 +       u32 rsvd32[112];
15171  };
15172  
15173 +#define DECO_STAT_HOST_ERR     0xD
15174 +
15175  #define DECO_JQCR_WHL          0x20000000
15176  #define DECO_JQCR_FOUR         0x10000000
15177  
15178 --- a/drivers/crypto/caam/sg_sw_qm.h
15179 +++ b/drivers/crypto/caam/sg_sw_qm.h
15180 @@ -34,46 +34,61 @@
15181  #ifndef __SG_SW_QM_H
15182  #define __SG_SW_QM_H
15183  
15184 -#include <soc/fsl/qman.h>
15185 +#include <linux/fsl_qman.h>
15186  #include "regs.h"
15187  
15188 +static inline void cpu_to_hw_sg(struct qm_sg_entry *qm_sg_ptr)
15189 +{
15190 +       dma_addr_t addr = qm_sg_ptr->opaque;
15191 +
15192 +       qm_sg_ptr->opaque = cpu_to_caam64(addr);
15193 +       qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
15194 +}
15195 +
15196  static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
15197 -                                 u16 offset)
15198 +                                 u32 len, u16 offset)
15199  {
15200 -       qm_sg_entry_set64(qm_sg_ptr, dma);
15201 +       qm_sg_ptr->addr = dma;
15202 +       qm_sg_ptr->length = len;
15203         qm_sg_ptr->__reserved2 = 0;
15204         qm_sg_ptr->bpid = 0;
15205 -       qm_sg_ptr->offset = cpu_to_be16(offset & QM_SG_OFF_MASK);
15206 +       qm_sg_ptr->__reserved3 = 0;
15207 +       qm_sg_ptr->offset = offset & QM_SG_OFFSET_MASK;
15208 +
15209 +       cpu_to_hw_sg(qm_sg_ptr);
15210  }
15211  
15212  static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
15213                                     dma_addr_t dma, u32 len, u16 offset)
15214  {
15215 -       __dma_to_qm_sg(qm_sg_ptr, dma, offset);
15216 -       qm_sg_entry_set_len(qm_sg_ptr, len);
15217 +       qm_sg_ptr->extension = 0;
15218 +       qm_sg_ptr->final = 0;
15219 +       __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
15220  }
15221  
15222  static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
15223                                          dma_addr_t dma, u32 len, u16 offset)
15224  {
15225 -       __dma_to_qm_sg(qm_sg_ptr, dma, offset);
15226 -       qm_sg_entry_set_f(qm_sg_ptr, len);
15227 +       qm_sg_ptr->extension = 0;
15228 +       qm_sg_ptr->final = 1;
15229 +       __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
15230  }
15231  
15232  static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
15233                                         dma_addr_t dma, u32 len, u16 offset)
15234  {
15235 -       __dma_to_qm_sg(qm_sg_ptr, dma, offset);
15236 -       qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | (len & QM_SG_LEN_MASK));
15237 +       qm_sg_ptr->extension = 1;
15238 +       qm_sg_ptr->final = 0;
15239 +       __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
15240  }
15241  
15242  static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
15243                                              dma_addr_t dma, u32 len,
15244                                              u16 offset)
15245  {
15246 -       __dma_to_qm_sg(qm_sg_ptr, dma, offset);
15247 -       qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | QM_SG_FIN |
15248 -                                    (len & QM_SG_LEN_MASK));
15249 +       qm_sg_ptr->extension = 1;
15250 +       qm_sg_ptr->final = 1;
15251 +       __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
15252  }
15253  
15254  /*
15255 @@ -102,7 +117,10 @@ static inline void sg_to_qm_sg_last(stru
15256                                     struct qm_sg_entry *qm_sg_ptr, u16 offset)
15257  {
15258         qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
15259 -       qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
15260 +
15261 +       qm_sg_ptr->sgt_efl = caam32_to_cpu(qm_sg_ptr->sgt_efl);
15262 +       qm_sg_ptr->final = 1;
15263 +       qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
15264  }
15265  
15266  #endif /* __SG_SW_QM_H */
15267 --- a/drivers/crypto/talitos.c
15268 +++ b/drivers/crypto/talitos.c
15269 @@ -1241,6 +1241,14 @@ static int ipsec_esp(struct talitos_edes
15270         ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
15271                                  sg_count, areq->assoclen, tbl_off, elen);
15272  
15273 +       /*
15274 +        * In case of SEC 2.x+, cipher in len must include only the ciphertext,
15275 +        * while extent is used for ICV len.
15276 +        */
15277 +       if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
15278 +           (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
15279 +               desc->ptr[4].len = cpu_to_be16(cryptlen);
15280 +
15281         if (ret > 1) {
15282                 tbl_off += ret;
15283                 sync_needed = true;
15284 --- a/include/crypto/chacha20.h
15285 +++ b/include/crypto/chacha20.h
15286 @@ -13,6 +13,7 @@
15287  #define CHACHA20_IV_SIZE       16
15288  #define CHACHA20_KEY_SIZE      32
15289  #define CHACHA20_BLOCK_SIZE    64
15290 +#define CHACHAPOLY_IV_SIZE     12
15291  
15292  struct chacha20_ctx {
15293         u32 key[8];