6b6104dd05b216e1c2e64efecb04653ba153e6b3
[oweals/openwrt.git] / target / linux / layerscape / patches-4.9 / 804-crypto-support-layerscape.patch
1 From 9c9579d76ccd6e738ab98c9b4c73c168912cdb8a Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Wed, 27 Sep 2017 15:02:01 +0800
4 Subject: [PATCH] crypto: support layerscape
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 This is a integrated patch for layerscape sec support.
10
11 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
12 Signed-off-by: Fabio Estevam <festevam@gmail.com>
13 Signed-off-by: Arnd Bergmann <arnd@arndb.de>
14 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
15 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
16 Signed-off-by: Eric Biggers <ebiggers@google.com>
17 Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
18 Signed-off-by: Xulin Sun <xulin.sun@windriver.com>
19 Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
20 Signed-off-by: Marcus Folkesson <marcus.folkesson@gmail.com>
21 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
22 Signed-off-by: Andrew Lutomirski <luto@kernel.org>
23 Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
24 Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
25 Signed-off-by: Marcelo Cerri <marcelo.cerri@canonical.com>
26 Signed-off-by: Arvind Yadav <arvind.yadav.cs@gmail.com>
27 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
28 Signed-off-by: Laura Abbott <labbott@redhat.com>
29 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
30 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
31 ---
32  crypto/Kconfig                                    |   30 +
33  crypto/Makefile                                   |    4 +
34  crypto/acompress.c                                |  169 +
35  crypto/algboss.c                                  |   12 +-
36  crypto/crypto_user.c                              |   19 +
37  crypto/scompress.c                                |  356 ++
38  crypto/tcrypt.c                                   |   17 +-
39  crypto/testmgr.c                                  | 1701 ++++----
40  crypto/testmgr.h                                  | 1125 +++---
41  crypto/tls.c                                      |  607 +++
42  drivers/crypto/caam/Kconfig                       |   72 +-
43  drivers/crypto/caam/Makefile                      |   15 +-
44  drivers/crypto/caam/caamalg.c                     | 2125 +++-------
45  drivers/crypto/caam/caamalg_desc.c                | 1913 +++++++++
46  drivers/crypto/caam/caamalg_desc.h                |  127 +
47  drivers/crypto/caam/caamalg_qi.c                  | 2877 +++++++++++++
48  drivers/crypto/caam/caamalg_qi2.c                 | 4428 +++++++++++++++++++++
49  drivers/crypto/caam/caamalg_qi2.h                 |  265 ++
50  drivers/crypto/caam/caamhash.c                    |  521 +--
51  drivers/crypto/caam/caampkc.c                     |  471 ++-
52  drivers/crypto/caam/caampkc.h                     |   58 +
53  drivers/crypto/caam/caamrng.c                     |   16 +-
54  drivers/crypto/caam/compat.h                      |    1 +
55  drivers/crypto/caam/ctrl.c                        |  356 +-
56  drivers/crypto/caam/ctrl.h                        |    2 +
57  drivers/crypto/caam/desc.h                        |   55 +-
58  drivers/crypto/caam/desc_constr.h                 |  139 +-
59  drivers/crypto/caam/dpseci.c                      |  859 ++++
60  drivers/crypto/caam/dpseci.h                      |  395 ++
61  drivers/crypto/caam/dpseci_cmd.h                  |  261 ++
62  drivers/crypto/caam/error.c                       |  127 +-
63  drivers/crypto/caam/error.h                       |   10 +-
64  drivers/crypto/caam/intern.h                      |   31 +-
65  drivers/crypto/caam/jr.c                          |   97 +-
66  drivers/crypto/caam/jr.h                          |    2 +
67  drivers/crypto/caam/key_gen.c                     |   32 +-
68  drivers/crypto/caam/key_gen.h                     |   36 +-
69  drivers/crypto/caam/pdb.h                         |   62 +
70  drivers/crypto/caam/pkc_desc.c                    |   36 +
71  drivers/crypto/caam/qi.c                          |  797 ++++
72  drivers/crypto/caam/qi.h                          |  204 +
73  drivers/crypto/caam/regs.h                        |   63 +-
74  drivers/crypto/caam/sg_sw_qm.h                    |  126 +
75  drivers/crypto/caam/sg_sw_qm2.h                   |   81 +
76  drivers/crypto/caam/sg_sw_sec4.h                  |   60 +-
77  drivers/net/wireless/rsi/rsi_91x_usb.c            |    2 +-
78  drivers/staging/wilc1000/linux_wlan.c             |    2 +-
79  drivers/staging/wilc1000/wilc_wfi_cfgoperations.c |    2 +-
80  include/crypto/acompress.h                        |  269 ++
81  include/crypto/internal/acompress.h               |   81 +
82  include/crypto/internal/scompress.h               |  136 +
83  include/linux/crypto.h                            |    3 +
84  include/uapi/linux/cryptouser.h                   |    5 +
85  scripts/spelling.txt                              |    3 +
86  sound/soc/amd/acp-pcm-dma.c                       |    2 +-
87  55 files changed, 17310 insertions(+), 3955 deletions(-)
88  create mode 100644 crypto/acompress.c
89  create mode 100644 crypto/scompress.c
90  create mode 100644 crypto/tls.c
91  create mode 100644 drivers/crypto/caam/caamalg_desc.c
92  create mode 100644 drivers/crypto/caam/caamalg_desc.h
93  create mode 100644 drivers/crypto/caam/caamalg_qi.c
94  create mode 100644 drivers/crypto/caam/caamalg_qi2.c
95  create mode 100644 drivers/crypto/caam/caamalg_qi2.h
96  create mode 100644 drivers/crypto/caam/dpseci.c
97  create mode 100644 drivers/crypto/caam/dpseci.h
98  create mode 100644 drivers/crypto/caam/dpseci_cmd.h
99  create mode 100644 drivers/crypto/caam/qi.c
100  create mode 100644 drivers/crypto/caam/qi.h
101  create mode 100644 drivers/crypto/caam/sg_sw_qm.h
102  create mode 100644 drivers/crypto/caam/sg_sw_qm2.h
103  create mode 100644 include/crypto/acompress.h
104  create mode 100644 include/crypto/internal/acompress.h
105  create mode 100644 include/crypto/internal/scompress.h
106
107 diff --git a/crypto/Kconfig b/crypto/Kconfig
108 index 17be110a..00e145e2 100644
109 --- a/crypto/Kconfig
110 +++ b/crypto/Kconfig
111 @@ -102,6 +102,15 @@ config CRYPTO_KPP
112         select CRYPTO_ALGAPI
113         select CRYPTO_KPP2
114  
115 +config CRYPTO_ACOMP2
116 +       tristate
117 +       select CRYPTO_ALGAPI2
118 +
119 +config CRYPTO_ACOMP
120 +       tristate
121 +       select CRYPTO_ALGAPI
122 +       select CRYPTO_ACOMP2
123 +
124  config CRYPTO_RSA
125         tristate "RSA algorithm"
126         select CRYPTO_AKCIPHER
127 @@ -138,6 +147,7 @@ config CRYPTO_MANAGER2
128         select CRYPTO_BLKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
129         select CRYPTO_AKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
130         select CRYPTO_KPP2 if !CRYPTO_MANAGER_DISABLE_TESTS
131 +       select CRYPTO_ACOMP2 if !CRYPTO_MANAGER_DISABLE_TESTS
132  
133  config CRYPTO_USER
134         tristate "Userspace cryptographic algorithm configuration"
135 @@ -295,6 +305,26 @@ config CRYPTO_ECHAINIV
136           a sequence number xored with a salt.  This is the default
137           algorithm for CBC.
138  
139 +config CRYPTO_TLS
140 +       tristate "TLS support"
141 +       select CRYPTO_AEAD
142 +       select CRYPTO_BLKCIPHER
143 +       select CRYPTO_MANAGER
144 +       select CRYPTO_HASH
145 +       select CRYPTO_NULL
146 +       select CRYPTO_AUTHENC
147 +       help
148 +         Support for TLS 1.0 record encryption and decryption
149 +
150 +         This module adds support for encryption/decryption of TLS 1.0 frames
151 +         using blockcipher algorithms. The name of the resulting algorithm is
152 +         "tls10(hmac(<digest>),cbc(<cipher>))". By default, the generic base
153 +         algorithms are used (e.g. aes-generic, sha1-generic), but hardware
154 +         accelerated versions will be used automatically if available.
155 +
156 +         User-space applications (OpenSSL, GnuTLS) can offload TLS 1.0
157 +         operations through AF_ALG or cryptodev interfaces
158 +
159  comment "Block modes"
160  
161  config CRYPTO_CBC
162 diff --git a/crypto/Makefile b/crypto/Makefile
163 index 9e52b3c5..936d2b73 100644
164 --- a/crypto/Makefile
165 +++ b/crypto/Makefile
166 @@ -51,6 +51,9 @@ rsa_generic-y += rsa_helper.o
167  rsa_generic-y += rsa-pkcs1pad.o
168  obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
169  
170 +obj-$(CONFIG_CRYPTO_ACOMP2) += acompress.o
171 +obj-$(CONFIG_CRYPTO_ACOMP2) += scompress.o
172 +
173  cryptomgr-y := algboss.o testmgr.o
174  
175  obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
176 @@ -115,6 +118,7 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o
177  obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
178  obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
179  obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
180 +obj-$(CONFIG_CRYPTO_TLS) += tls.o
181  obj-$(CONFIG_CRYPTO_LZO) += lzo.o
182  obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
183  obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
184 diff --git a/crypto/acompress.c b/crypto/acompress.c
185 new file mode 100644
186 index 00000000..887783d8
187 --- /dev/null
188 +++ b/crypto/acompress.c
189 @@ -0,0 +1,169 @@
190 +/*
191 + * Asynchronous Compression operations
192 + *
193 + * Copyright (c) 2016, Intel Corporation
194 + * Authors: Weigang Li <weigang.li@intel.com>
195 + *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
196 + *
197 + * This program is free software; you can redistribute it and/or modify it
198 + * under the terms of the GNU General Public License as published by the Free
199 + * Software Foundation; either version 2 of the License, or (at your option)
200 + * any later version.
201 + *
202 + */
203 +#include <linux/errno.h>
204 +#include <linux/kernel.h>
205 +#include <linux/module.h>
206 +#include <linux/seq_file.h>
207 +#include <linux/slab.h>
208 +#include <linux/string.h>
209 +#include <linux/crypto.h>
210 +#include <crypto/algapi.h>
211 +#include <linux/cryptouser.h>
212 +#include <net/netlink.h>
213 +#include <crypto/internal/acompress.h>
214 +#include <crypto/internal/scompress.h>
215 +#include "internal.h"
216 +
217 +static const struct crypto_type crypto_acomp_type;
218 +
219 +#ifdef CONFIG_NET
220 +static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
221 +{
222 +       struct crypto_report_acomp racomp;
223 +
224 +       strncpy(racomp.type, "acomp", sizeof(racomp.type));
225 +
226 +       if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
227 +                   sizeof(struct crypto_report_acomp), &racomp))
228 +               goto nla_put_failure;
229 +       return 0;
230 +
231 +nla_put_failure:
232 +       return -EMSGSIZE;
233 +}
234 +#else
235 +static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
236 +{
237 +       return -ENOSYS;
238 +}
239 +#endif
240 +
241 +static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
242 +       __attribute__ ((unused));
243 +
244 +static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
245 +{
246 +       seq_puts(m, "type         : acomp\n");
247 +}
248 +
249 +static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
250 +{
251 +       struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
252 +       struct acomp_alg *alg = crypto_acomp_alg(acomp);
253 +
254 +       alg->exit(acomp);
255 +}
256 +
257 +static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
258 +{
259 +       struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
260 +       struct acomp_alg *alg = crypto_acomp_alg(acomp);
261 +
262 +       if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
263 +               return crypto_init_scomp_ops_async(tfm);
264 +
265 +       acomp->compress = alg->compress;
266 +       acomp->decompress = alg->decompress;
267 +       acomp->dst_free = alg->dst_free;
268 +       acomp->reqsize = alg->reqsize;
269 +
270 +       if (alg->exit)
271 +               acomp->base.exit = crypto_acomp_exit_tfm;
272 +
273 +       if (alg->init)
274 +               return alg->init(acomp);
275 +
276 +       return 0;
277 +}
278 +
279 +static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
280 +{
281 +       int extsize = crypto_alg_extsize(alg);
282 +
283 +       if (alg->cra_type != &crypto_acomp_type)
284 +               extsize += sizeof(struct crypto_scomp *);
285 +
286 +       return extsize;
287 +}
288 +
289 +static const struct crypto_type crypto_acomp_type = {
290 +       .extsize = crypto_acomp_extsize,
291 +       .init_tfm = crypto_acomp_init_tfm,
292 +#ifdef CONFIG_PROC_FS
293 +       .show = crypto_acomp_show,
294 +#endif
295 +       .report = crypto_acomp_report,
296 +       .maskclear = ~CRYPTO_ALG_TYPE_MASK,
297 +       .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
298 +       .type = CRYPTO_ALG_TYPE_ACOMPRESS,
299 +       .tfmsize = offsetof(struct crypto_acomp, base),
300 +};
301 +
302 +struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
303 +                                       u32 mask)
304 +{
305 +       return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask);
306 +}
307 +EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
308 +
309 +struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp)
310 +{
311 +       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
312 +       struct acomp_req *req;
313 +
314 +       req = __acomp_request_alloc(acomp);
315 +       if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type))
316 +               return crypto_acomp_scomp_alloc_ctx(req);
317 +
318 +       return req;
319 +}
320 +EXPORT_SYMBOL_GPL(acomp_request_alloc);
321 +
322 +void acomp_request_free(struct acomp_req *req)
323 +{
324 +       struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
325 +       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
326 +
327 +       if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
328 +               crypto_acomp_scomp_free_ctx(req);
329 +
330 +       if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) {
331 +               acomp->dst_free(req->dst);
332 +               req->dst = NULL;
333 +       }
334 +
335 +       __acomp_request_free(req);
336 +}
337 +EXPORT_SYMBOL_GPL(acomp_request_free);
338 +
339 +int crypto_register_acomp(struct acomp_alg *alg)
340 +{
341 +       struct crypto_alg *base = &alg->base;
342 +
343 +       base->cra_type = &crypto_acomp_type;
344 +       base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
345 +       base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
346 +
347 +       return crypto_register_alg(base);
348 +}
349 +EXPORT_SYMBOL_GPL(crypto_register_acomp);
350 +
351 +int crypto_unregister_acomp(struct acomp_alg *alg)
352 +{
353 +       return crypto_unregister_alg(&alg->base);
354 +}
355 +EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
356 +
357 +MODULE_LICENSE("GPL");
358 +MODULE_DESCRIPTION("Asynchronous compression type");
359 diff --git a/crypto/algboss.c b/crypto/algboss.c
360 index 4bde25d6..ccb85e17 100644
361 --- a/crypto/algboss.c
362 +++ b/crypto/algboss.c
363 @@ -247,17 +247,9 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg)
364         memcpy(param->alg, alg->cra_name, sizeof(param->alg));
365         type = alg->cra_flags;
366  
367 -       /* This piece of crap needs to disappear into per-type test hooks. */
368 -#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
369 -       type |= CRYPTO_ALG_TESTED;
370 -#else
371 -       if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
372 -             CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
373 -           ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
374 -            CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
375 -                                        alg->cra_ablkcipher.ivsize))
376 +       /* Do not test internal algorithms. */
377 +       if (type & CRYPTO_ALG_INTERNAL)
378                 type |= CRYPTO_ALG_TESTED;
379 -#endif
380  
381         param->type = type;
382  
383 diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
384 index 1c570548..a90404a0 100644
385 --- a/crypto/crypto_user.c
386 +++ b/crypto/crypto_user.c
387 @@ -112,6 +112,21 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
388         return -EMSGSIZE;
389  }
390  
391 +static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
392 +{
393 +       struct crypto_report_acomp racomp;
394 +
395 +       strncpy(racomp.type, "acomp", sizeof(racomp.type));
396 +
397 +       if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
398 +                   sizeof(struct crypto_report_acomp), &racomp))
399 +               goto nla_put_failure;
400 +       return 0;
401 +
402 +nla_put_failure:
403 +       return -EMSGSIZE;
404 +}
405 +
406  static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
407  {
408         struct crypto_report_akcipher rakcipher;
409 @@ -186,7 +201,11 @@ static int crypto_report_one(struct crypto_alg *alg,
410                         goto nla_put_failure;
411  
412                 break;
413 +       case CRYPTO_ALG_TYPE_ACOMPRESS:
414 +               if (crypto_report_acomp(skb, alg))
415 +                       goto nla_put_failure;
416  
417 +               break;
418         case CRYPTO_ALG_TYPE_AKCIPHER:
419                 if (crypto_report_akcipher(skb, alg))
420                         goto nla_put_failure;
421 diff --git a/crypto/scompress.c b/crypto/scompress.c
422 new file mode 100644
423 index 00000000..35e396d1
424 --- /dev/null
425 +++ b/crypto/scompress.c
426 @@ -0,0 +1,356 @@
427 +/*
428 + * Synchronous Compression operations
429 + *
430 + * Copyright 2015 LG Electronics Inc.
431 + * Copyright (c) 2016, Intel Corporation
432 + * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
433 + *
434 + * This program is free software; you can redistribute it and/or modify it
435 + * under the terms of the GNU General Public License as published by the Free
436 + * Software Foundation; either version 2 of the License, or (at your option)
437 + * any later version.
438 + *
439 + */
440 +#include <linux/errno.h>
441 +#include <linux/kernel.h>
442 +#include <linux/module.h>
443 +#include <linux/seq_file.h>
444 +#include <linux/slab.h>
445 +#include <linux/string.h>
446 +#include <linux/crypto.h>
447 +#include <linux/vmalloc.h>
448 +#include <crypto/algapi.h>
449 +#include <linux/cryptouser.h>
450 +#include <net/netlink.h>
451 +#include <linux/scatterlist.h>
452 +#include <crypto/scatterwalk.h>
453 +#include <crypto/internal/acompress.h>
454 +#include <crypto/internal/scompress.h>
455 +#include "internal.h"
456 +
457 +static const struct crypto_type crypto_scomp_type;
458 +static void * __percpu *scomp_src_scratches;
459 +static void * __percpu *scomp_dst_scratches;
460 +static int scomp_scratch_users;
461 +static DEFINE_MUTEX(scomp_lock);
462 +
463 +#ifdef CONFIG_NET
464 +static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
465 +{
466 +       struct crypto_report_comp rscomp;
467 +
468 +       strncpy(rscomp.type, "scomp", sizeof(rscomp.type));
469 +
470 +       if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
471 +                   sizeof(struct crypto_report_comp), &rscomp))
472 +               goto nla_put_failure;
473 +       return 0;
474 +
475 +nla_put_failure:
476 +       return -EMSGSIZE;
477 +}
478 +#else
479 +static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
480 +{
481 +       return -ENOSYS;
482 +}
483 +#endif
484 +
485 +static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
486 +       __attribute__ ((unused));
487 +
488 +static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
489 +{
490 +       seq_puts(m, "type         : scomp\n");
491 +}
492 +
493 +static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
494 +{
495 +       return 0;
496 +}
497 +
498 +static void crypto_scomp_free_scratches(void * __percpu *scratches)
499 +{
500 +       int i;
501 +
502 +       if (!scratches)
503 +               return;
504 +
505 +       for_each_possible_cpu(i)
506 +               vfree(*per_cpu_ptr(scratches, i));
507 +
508 +       free_percpu(scratches);
509 +}
510 +
511 +static void * __percpu *crypto_scomp_alloc_scratches(void)
512 +{
513 +       void * __percpu *scratches;
514 +       int i;
515 +
516 +       scratches = alloc_percpu(void *);
517 +       if (!scratches)
518 +               return NULL;
519 +
520 +       for_each_possible_cpu(i) {
521 +               void *scratch;
522 +
523 +               scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
524 +               if (!scratch)
525 +                       goto error;
526 +               *per_cpu_ptr(scratches, i) = scratch;
527 +       }
528 +
529 +       return scratches;
530 +
531 +error:
532 +       crypto_scomp_free_scratches(scratches);
533 +       return NULL;
534 +}
535 +
536 +static void crypto_scomp_free_all_scratches(void)
537 +{
538 +       if (!--scomp_scratch_users) {
539 +               crypto_scomp_free_scratches(scomp_src_scratches);
540 +               crypto_scomp_free_scratches(scomp_dst_scratches);
541 +               scomp_src_scratches = NULL;
542 +               scomp_dst_scratches = NULL;
543 +       }
544 +}
545 +
546 +static int crypto_scomp_alloc_all_scratches(void)
547 +{
548 +       if (!scomp_scratch_users++) {
549 +               scomp_src_scratches = crypto_scomp_alloc_scratches();
550 +               if (!scomp_src_scratches)
551 +                       return -ENOMEM;
552 +               scomp_dst_scratches = crypto_scomp_alloc_scratches();
553 +               if (!scomp_dst_scratches)
554 +                       return -ENOMEM;
555 +       }
556 +       return 0;
557 +}
558 +
559 +static void crypto_scomp_sg_free(struct scatterlist *sgl)
560 +{
561 +       int i, n;
562 +       struct page *page;
563 +
564 +       if (!sgl)
565 +               return;
566 +
567 +       n = sg_nents(sgl);
568 +       for_each_sg(sgl, sgl, n, i) {
569 +               page = sg_page(sgl);
570 +               if (page)
571 +                       __free_page(page);
572 +       }
573 +
574 +       kfree(sgl);
575 +}
576 +
577 +static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp)
578 +{
579 +       struct scatterlist *sgl;
580 +       struct page *page;
581 +       int i, n;
582 +
583 +       n = ((size - 1) >> PAGE_SHIFT) + 1;
584 +
585 +       sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp);
586 +       if (!sgl)
587 +               return NULL;
588 +
589 +       sg_init_table(sgl, n);
590 +
591 +       for (i = 0; i < n; i++) {
592 +               page = alloc_page(gfp);
593 +               if (!page)
594 +                       goto err;
595 +               sg_set_page(sgl + i, page, PAGE_SIZE, 0);
596 +       }
597 +
598 +       return sgl;
599 +
600 +err:
601 +       sg_mark_end(sgl + i);
602 +       crypto_scomp_sg_free(sgl);
603 +       return NULL;
604 +}
605 +
606 +static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
607 +{
608 +       struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
609 +       void **tfm_ctx = acomp_tfm_ctx(tfm);
610 +       struct crypto_scomp *scomp = *tfm_ctx;
611 +       void **ctx = acomp_request_ctx(req);
612 +       const int cpu = get_cpu();
613 +       u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
614 +       u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
615 +       int ret;
616 +
617 +       if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
618 +               ret = -EINVAL;
619 +               goto out;
620 +       }
621 +
622 +       if (req->dst && !req->dlen) {
623 +               ret = -EINVAL;
624 +               goto out;
625 +       }
626 +
627 +       if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
628 +               req->dlen = SCOMP_SCRATCH_SIZE;
629 +
630 +       scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
631 +       if (dir)
632 +               ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
633 +                                           scratch_dst, &req->dlen, *ctx);
634 +       else
635 +               ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
636 +                                             scratch_dst, &req->dlen, *ctx);
637 +       if (!ret) {
638 +               if (!req->dst) {
639 +                       req->dst = crypto_scomp_sg_alloc(req->dlen,
640 +                                  req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
641 +                                  GFP_KERNEL : GFP_ATOMIC);
642 +                       if (!req->dst)
643 +                               goto out;
644 +               }
645 +               scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
646 +                                        1);
647 +       }
648 +out:
649 +       put_cpu();
650 +       return ret;
651 +}
652 +
653 +static int scomp_acomp_compress(struct acomp_req *req)
654 +{
655 +       return scomp_acomp_comp_decomp(req, 1);
656 +}
657 +
658 +static int scomp_acomp_decompress(struct acomp_req *req)
659 +{
660 +       return scomp_acomp_comp_decomp(req, 0);
661 +}
662 +
663 +static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
664 +{
665 +       struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
666 +
667 +       crypto_free_scomp(*ctx);
668 +}
669 +
670 +int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
671 +{
672 +       struct crypto_alg *calg = tfm->__crt_alg;
673 +       struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
674 +       struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
675 +       struct crypto_scomp *scomp;
676 +
677 +       if (!crypto_mod_get(calg))
678 +               return -EAGAIN;
679 +
680 +       scomp = crypto_create_tfm(calg, &crypto_scomp_type);
681 +       if (IS_ERR(scomp)) {
682 +               crypto_mod_put(calg);
683 +               return PTR_ERR(scomp);
684 +       }
685 +
686 +       *ctx = scomp;
687 +       tfm->exit = crypto_exit_scomp_ops_async;
688 +
689 +       crt->compress = scomp_acomp_compress;
690 +       crt->decompress = scomp_acomp_decompress;
691 +       crt->dst_free = crypto_scomp_sg_free;
692 +       crt->reqsize = sizeof(void *);
693 +
694 +       return 0;
695 +}
696 +
697 +struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
698 +{
699 +       struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
700 +       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
701 +       struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
702 +       struct crypto_scomp *scomp = *tfm_ctx;
703 +       void *ctx;
704 +
705 +       ctx = crypto_scomp_alloc_ctx(scomp);
706 +       if (IS_ERR(ctx)) {
707 +               kfree(req);
708 +               return NULL;
709 +       }
710 +
711 +       *req->__ctx = ctx;
712 +
713 +       return req;
714 +}
715 +
716 +void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
717 +{
718 +       struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
719 +       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
720 +       struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
721 +       struct crypto_scomp *scomp = *tfm_ctx;
722 +       void *ctx = *req->__ctx;
723 +
724 +       if (ctx)
725 +               crypto_scomp_free_ctx(scomp, ctx);
726 +}
727 +
728 +static const struct crypto_type crypto_scomp_type = {
729 +       .extsize = crypto_alg_extsize,
730 +       .init_tfm = crypto_scomp_init_tfm,
731 +#ifdef CONFIG_PROC_FS
732 +       .show = crypto_scomp_show,
733 +#endif
734 +       .report = crypto_scomp_report,
735 +       .maskclear = ~CRYPTO_ALG_TYPE_MASK,
736 +       .maskset = CRYPTO_ALG_TYPE_MASK,
737 +       .type = CRYPTO_ALG_TYPE_SCOMPRESS,
738 +       .tfmsize = offsetof(struct crypto_scomp, base),
739 +};
740 +
741 +int crypto_register_scomp(struct scomp_alg *alg)
742 +{
743 +       struct crypto_alg *base = &alg->base;
744 +       int ret = -ENOMEM;
745 +
746 +       mutex_lock(&scomp_lock);
747 +       if (crypto_scomp_alloc_all_scratches())
748 +               goto error;
749 +
750 +       base->cra_type = &crypto_scomp_type;
751 +       base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
752 +       base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
753 +
754 +       ret = crypto_register_alg(base);
755 +       if (ret)
756 +               goto error;
757 +
758 +       mutex_unlock(&scomp_lock);
759 +       return ret;
760 +
761 +error:
762 +       crypto_scomp_free_all_scratches();
763 +       mutex_unlock(&scomp_lock);
764 +       return ret;
765 +}
766 +EXPORT_SYMBOL_GPL(crypto_register_scomp);
767 +
768 +int crypto_unregister_scomp(struct scomp_alg *alg)
769 +{
770 +       int ret;
771 +
772 +       mutex_lock(&scomp_lock);
773 +       ret = crypto_unregister_alg(&alg->base);
774 +       crypto_scomp_free_all_scratches();
775 +       mutex_unlock(&scomp_lock);
776 +
777 +       return ret;
778 +}
779 +EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
780 +
781 +MODULE_LICENSE("GPL");
782 +MODULE_DESCRIPTION("Synchronous compression type");
783 diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
784 index ae22f05d..bbb35eed 100644
785 --- a/crypto/tcrypt.c
786 +++ b/crypto/tcrypt.c
787 @@ -74,7 +74,7 @@ static char *check[] = {
788         "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta",  "fcrypt",
789         "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
790         "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
791 -       NULL
792 +       "rsa", NULL
793  };
794  
795  struct tcrypt_result {
796 @@ -1329,6 +1329,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
797                 ret += tcrypt_test("hmac(sha3-512)");
798                 break;
799  
800 +       case 115:
801 +               ret += tcrypt_test("rsa");
802 +               break;
803 +
804         case 150:
805                 ret += tcrypt_test("ansi_cprng");
806                 break;
807 @@ -1390,6 +1394,9 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
808         case 190:
809                 ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
810                 break;
811 +       case 191:
812 +               ret += tcrypt_test("tls10(hmac(sha1),cbc(aes))");
813 +               break;
814         case 200:
815                 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
816                                 speed_template_16_24_32);
817 @@ -1404,9 +1411,9 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
818                 test_cipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
819                                 speed_template_32_40_48);
820                 test_cipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
821 -                               speed_template_32_48_64);
822 +                               speed_template_32_64);
823                 test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
824 -                               speed_template_32_48_64);
825 +                               speed_template_32_64);
826                 test_cipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
827                                 speed_template_16_24_32);
828                 test_cipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
829 @@ -1837,9 +1844,9 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
830                 test_acipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
831                                    speed_template_32_40_48);
832                 test_acipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
833 -                                  speed_template_32_48_64);
834 +                                  speed_template_32_64);
835                 test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
836 -                                  speed_template_32_48_64);
837 +                                  speed_template_32_64);
838                 test_acipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
839                                    speed_template_16_24_32);
840                 test_acipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
841 diff --git a/crypto/testmgr.c b/crypto/testmgr.c
842 index 62dffa00..73d91fba 100644
843 --- a/crypto/testmgr.c
844 +++ b/crypto/testmgr.c
845 @@ -33,6 +33,7 @@
846  #include <crypto/drbg.h>
847  #include <crypto/akcipher.h>
848  #include <crypto/kpp.h>
849 +#include <crypto/acompress.h>
850  
851  #include "internal.h"
852  
853 @@ -62,7 +63,7 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
854   */
855  #define IDX1           32
856  #define IDX2           32400
857 -#define IDX3           1
858 +#define IDX3           1511
859  #define IDX4           8193
860  #define IDX5           22222
861  #define IDX6           17101
862 @@ -82,47 +83,54 @@ struct tcrypt_result {
863  
864  struct aead_test_suite {
865         struct {
866 -               struct aead_testvec *vecs;
867 +               const struct aead_testvec *vecs;
868                 unsigned int count;
869         } enc, dec;
870  };
871  
872  struct cipher_test_suite {
873         struct {
874 -               struct cipher_testvec *vecs;
875 +               const struct cipher_testvec *vecs;
876                 unsigned int count;
877         } enc, dec;
878  };
879  
880  struct comp_test_suite {
881         struct {
882 -               struct comp_testvec *vecs;
883 +               const struct comp_testvec *vecs;
884                 unsigned int count;
885         } comp, decomp;
886  };
887  
888  struct hash_test_suite {
889 -       struct hash_testvec *vecs;
890 +       const struct hash_testvec *vecs;
891         unsigned int count;
892  };
893  
894  struct cprng_test_suite {
895 -       struct cprng_testvec *vecs;
896 +       const struct cprng_testvec *vecs;
897         unsigned int count;
898  };
899  
900  struct drbg_test_suite {
901 -       struct drbg_testvec *vecs;
902 +       const struct drbg_testvec *vecs;
903         unsigned int count;
904  };
905  
906 +struct tls_test_suite {
907 +       struct {
908 +               struct tls_testvec *vecs;
909 +               unsigned int count;
910 +       } enc, dec;
911 +};
912 +
913  struct akcipher_test_suite {
914 -       struct akcipher_testvec *vecs;
915 +       const struct akcipher_testvec *vecs;
916         unsigned int count;
917  };
918  
919  struct kpp_test_suite {
920 -       struct kpp_testvec *vecs;
921 +       const struct kpp_testvec *vecs;
922         unsigned int count;
923  };
924  
925 @@ -139,12 +147,14 @@ struct alg_test_desc {
926                 struct hash_test_suite hash;
927                 struct cprng_test_suite cprng;
928                 struct drbg_test_suite drbg;
929 +               struct tls_test_suite tls;
930                 struct akcipher_test_suite akcipher;
931                 struct kpp_test_suite kpp;
932         } suite;
933  };
934  
935 -static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
936 +static const unsigned int IDX[8] = {
937 +       IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
938  
939  static void hexdump(unsigned char *buf, unsigned int len)
940  {
941 @@ -202,7 +212,7 @@ static int wait_async_op(struct tcrypt_result *tr, int ret)
942  }
943  
944  static int ahash_partial_update(struct ahash_request **preq,
945 -       struct crypto_ahash *tfm, struct hash_testvec *template,
946 +       struct crypto_ahash *tfm, const struct hash_testvec *template,
947         void *hash_buff, int k, int temp, struct scatterlist *sg,
948         const char *algo, char *result, struct tcrypt_result *tresult)
949  {
950 @@ -259,11 +269,12 @@ static int ahash_partial_update(struct ahash_request **preq,
951         return ret;
952  }
953  
954 -static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
955 -                      unsigned int tcount, bool use_digest,
956 -                      const int align_offset)
957 +static int __test_hash(struct crypto_ahash *tfm,
958 +                      const struct hash_testvec *template, unsigned int tcount,
959 +                      bool use_digest, const int align_offset)
960  {
961         const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
962 +       size_t digest_size = crypto_ahash_digestsize(tfm);
963         unsigned int i, j, k, temp;
964         struct scatterlist sg[8];
965         char *result;
966 @@ -274,7 +285,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
967         char *xbuf[XBUFSIZE];
968         int ret = -ENOMEM;
969  
970 -       result = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL);
971 +       result = kmalloc(digest_size, GFP_KERNEL);
972         if (!result)
973                 return ret;
974         key = kmalloc(MAX_KEYLEN, GFP_KERNEL);
975 @@ -304,7 +315,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
976                         goto out;
977  
978                 j++;
979 -               memset(result, 0, MAX_DIGEST_SIZE);
980 +               memset(result, 0, digest_size);
981  
982                 hash_buff = xbuf[0];
983                 hash_buff += align_offset;
984 @@ -379,7 +390,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
985                         continue;
986  
987                 j++;
988 -               memset(result, 0, MAX_DIGEST_SIZE);
989 +               memset(result, 0, digest_size);
990  
991                 temp = 0;
992                 sg_init_table(sg, template[i].np);
993 @@ -457,7 +468,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
994                         continue;
995  
996                 j++;
997 -               memset(result, 0, MAX_DIGEST_SIZE);
998 +               memset(result, 0, digest_size);
999  
1000                 ret = -EINVAL;
1001                 hash_buff = xbuf[0];
1002 @@ -536,7 +547,8 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
1003         return ret;
1004  }
1005  
1006 -static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
1007 +static int test_hash(struct crypto_ahash *tfm,
1008 +                    const struct hash_testvec *template,
1009                      unsigned int tcount, bool use_digest)
1010  {
1011         unsigned int alignmask;
1012 @@ -564,7 +576,7 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
1013  }
1014  
1015  static int __test_aead(struct crypto_aead *tfm, int enc,
1016 -                      struct aead_testvec *template, unsigned int tcount,
1017 +                      const struct aead_testvec *template, unsigned int tcount,
1018                        const bool diff_dst, const int align_offset)
1019  {
1020         const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
1021 @@ -955,7 +967,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
1022  }
1023  
1024  static int test_aead(struct crypto_aead *tfm, int enc,
1025 -                    struct aead_testvec *template, unsigned int tcount)
1026 +                    const struct aead_testvec *template, unsigned int tcount)
1027  {
1028         unsigned int alignmask;
1029         int ret;
1030 @@ -987,8 +999,236 @@ static int test_aead(struct crypto_aead *tfm, int enc,
1031         return 0;
1032  }
1033  
1034 +static int __test_tls(struct crypto_aead *tfm, int enc,
1035 +                     struct tls_testvec *template, unsigned int tcount,
1036 +                     const bool diff_dst)
1037 +{
1038 +       const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
1039 +       unsigned int i, k, authsize;
1040 +       char *q;
1041 +       struct aead_request *req;
1042 +       struct scatterlist *sg;
1043 +       struct scatterlist *sgout;
1044 +       const char *e, *d;
1045 +       struct tcrypt_result result;
1046 +       void *input;
1047 +       void *output;
1048 +       void *assoc;
1049 +       char *iv;
1050 +       char *key;
1051 +       char *xbuf[XBUFSIZE];
1052 +       char *xoutbuf[XBUFSIZE];
1053 +       char *axbuf[XBUFSIZE];
1054 +       int ret = -ENOMEM;
1055 +
1056 +       if (testmgr_alloc_buf(xbuf))
1057 +               goto out_noxbuf;
1058 +
1059 +       if (diff_dst && testmgr_alloc_buf(xoutbuf))
1060 +               goto out_nooutbuf;
1061 +
1062 +       if (testmgr_alloc_buf(axbuf))
1063 +               goto out_noaxbuf;
1064 +
1065 +       iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
1066 +       if (!iv)
1067 +               goto out_noiv;
1068 +
1069 +       key = kzalloc(MAX_KEYLEN, GFP_KERNEL);
1070 +       if (!key)
1071 +               goto out_nokey;
1072 +
1073 +       sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 2 : 1), GFP_KERNEL);
1074 +       if (!sg)
1075 +               goto out_nosg;
1076 +
1077 +       sgout = sg + 8;
1078 +
1079 +       d = diff_dst ? "-ddst" : "";
1080 +       e = enc ? "encryption" : "decryption";
1081 +
1082 +       init_completion(&result.completion);
1083 +
1084 +       req = aead_request_alloc(tfm, GFP_KERNEL);
1085 +       if (!req) {
1086 +               pr_err("alg: tls%s: Failed to allocate request for %s\n",
1087 +                      d, algo);
1088 +               goto out;
1089 +       }
1090 +
1091 +       aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1092 +                                 tcrypt_complete, &result);
1093 +
1094 +       for (i = 0; i < tcount; i++) {
1095 +               input = xbuf[0];
1096 +               assoc = axbuf[0];
1097 +
1098 +               ret = -EINVAL;
1099 +               if (WARN_ON(template[i].ilen > PAGE_SIZE ||
1100 +                           template[i].alen > PAGE_SIZE))
1101 +                       goto out;
1102 +
1103 +               memcpy(assoc, template[i].assoc, template[i].alen);
1104 +               memcpy(input, template[i].input, template[i].ilen);
1105 +
1106 +               if (template[i].iv)
1107 +                       memcpy(iv, template[i].iv, MAX_IVLEN);
1108 +               else
1109 +                       memset(iv, 0, MAX_IVLEN);
1110 +
1111 +               crypto_aead_clear_flags(tfm, ~0);
1112 +
1113 +               if (template[i].klen > MAX_KEYLEN) {
1114 +                       pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
1115 +                              d, i, algo, template[i].klen, MAX_KEYLEN);
1116 +                       ret = -EINVAL;
1117 +                       goto out;
1118 +               }
1119 +               memcpy(key, template[i].key, template[i].klen);
1120 +
1121 +               ret = crypto_aead_setkey(tfm, key, template[i].klen);
1122 +               if (!ret == template[i].fail) {
1123 +                       pr_err("alg: tls%s: setkey failed on test %d for %s: flags=%x\n",
1124 +                              d, i, algo, crypto_aead_get_flags(tfm));
1125 +                       goto out;
1126 +               } else if (ret)
1127 +                       continue;
1128 +
1129 +               authsize = 20;
1130 +               ret = crypto_aead_setauthsize(tfm, authsize);
1131 +               if (ret) {
1132 +                       pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n",
1133 +                              d, authsize, i, algo);
1134 +                       goto out;
1135 +               }
1136 +
1137 +               k = !!template[i].alen;
1138 +               sg_init_table(sg, k + 1);
1139 +               sg_set_buf(&sg[0], assoc, template[i].alen);
1140 +               sg_set_buf(&sg[k], input, (enc ? template[i].rlen :
1141 +                                          template[i].ilen));
1142 +               output = input;
1143 +
1144 +               if (diff_dst) {
1145 +                       sg_init_table(sgout, k + 1);
1146 +                       sg_set_buf(&sgout[0], assoc, template[i].alen);
1147 +
1148 +                       output = xoutbuf[0];
1149 +                       sg_set_buf(&sgout[k], output,
1150 +                                  (enc ? template[i].rlen : template[i].ilen));
1151 +               }
1152 +
1153 +               aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
1154 +                                      template[i].ilen, iv);
1155 +
1156 +               aead_request_set_ad(req, template[i].alen);
1157 +
1158 +               ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
1159 +
1160 +               switch (ret) {
1161 +               case 0:
1162 +                       if (template[i].novrfy) {
1163 +                               /* verification was supposed to fail */
1164 +                               pr_err("alg: tls%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
1165 +                                      d, e, i, algo);
1166 +                               /* so really, we got a bad message */
1167 +                               ret = -EBADMSG;
1168 +                               goto out;
1169 +                       }
1170 +                       break;
1171 +               case -EINPROGRESS:
1172 +               case -EBUSY:
1173 +                       wait_for_completion(&result.completion);
1174 +                       reinit_completion(&result.completion);
1175 +                       ret = result.err;
1176 +                       if (!ret)
1177 +                               break;
1178 +               case -EBADMSG:
1179 +                       /* verification failure was expected */
1180 +                       if (template[i].novrfy)
1181 +                               continue;
1182 +                       /* fall through */
1183 +               default:
1184 +                       pr_err("alg: tls%s: %s failed on test %d for %s: ret=%d\n",
1185 +                              d, e, i, algo, -ret);
1186 +                       goto out;
1187 +               }
1188 +
1189 +               q = output;
1190 +               if (memcmp(q, template[i].result, template[i].rlen)) {
1191 +                       pr_err("alg: tls%s: Test %d failed on %s for %s\n",
1192 +                              d, i, e, algo);
1193 +                       hexdump(q, template[i].rlen);
1194 +                       pr_err("should be:\n");
1195 +                       hexdump(template[i].result, template[i].rlen);
1196 +                       ret = -EINVAL;
1197 +                       goto out;
1198 +               }
1199 +       }
1200 +
1201 +out:
1202 +       aead_request_free(req);
1203 +
1204 +       kfree(sg);
1205 +out_nosg:
1206 +       kfree(key);
1207 +out_nokey:
1208 +       kfree(iv);
1209 +out_noiv:
1210 +       testmgr_free_buf(axbuf);
1211 +out_noaxbuf:
1212 +       if (diff_dst)
1213 +               testmgr_free_buf(xoutbuf);
1214 +out_nooutbuf:
1215 +       testmgr_free_buf(xbuf);
1216 +out_noxbuf:
1217 +       return ret;
1218 +}
1219 +
1220 +static int test_tls(struct crypto_aead *tfm, int enc,
1221 +                   struct tls_testvec *template, unsigned int tcount)
1222 +{
1223 +       int ret;
1224 +       /* test 'dst == src' case */
1225 +       ret = __test_tls(tfm, enc, template, tcount, false);
1226 +       if (ret)
1227 +               return ret;
1228 +       /* test 'dst != src' case */
1229 +       return __test_tls(tfm, enc, template, tcount, true);
1230 +}
1231 +
1232 +static int alg_test_tls(const struct alg_test_desc *desc, const char *driver,
1233 +                       u32 type, u32 mask)
1234 +{
1235 +       struct crypto_aead *tfm;
1236 +       int err = 0;
1237 +
1238 +       tfm = crypto_alloc_aead(driver, type, mask);
1239 +       if (IS_ERR(tfm)) {
1240 +               pr_err("alg: aead: Failed to load transform for %s: %ld\n",
1241 +                      driver, PTR_ERR(tfm));
1242 +               return PTR_ERR(tfm);
1243 +       }
1244 +
1245 +       if (desc->suite.tls.enc.vecs) {
1246 +               err = test_tls(tfm, ENCRYPT, desc->suite.tls.enc.vecs,
1247 +                              desc->suite.tls.enc.count);
1248 +               if (err)
1249 +                       goto out;
1250 +       }
1251 +
1252 +       if (!err && desc->suite.tls.dec.vecs)
1253 +               err = test_tls(tfm, DECRYPT, desc->suite.tls.dec.vecs,
1254 +                              desc->suite.tls.dec.count);
1255 +
1256 +out:
1257 +       crypto_free_aead(tfm);
1258 +       return err;
1259 +}
1260 +
1261  static int test_cipher(struct crypto_cipher *tfm, int enc,
1262 -                      struct cipher_testvec *template, unsigned int tcount)
1263 +                      const struct cipher_testvec *template,
1264 +                      unsigned int tcount)
1265  {
1266         const char *algo = crypto_tfm_alg_driver_name(crypto_cipher_tfm(tfm));
1267         unsigned int i, j, k;
1268 @@ -1066,7 +1306,8 @@ static int test_cipher(struct crypto_cipher *tfm, int enc,
1269  }
1270  
1271  static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
1272 -                          struct cipher_testvec *template, unsigned int tcount,
1273 +                          const struct cipher_testvec *template,
1274 +                          unsigned int tcount,
1275                            const bool diff_dst, const int align_offset)
1276  {
1277         const char *algo =
1278 @@ -1330,7 +1571,8 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
1279  }
1280  
1281  static int test_skcipher(struct crypto_skcipher *tfm, int enc,
1282 -                        struct cipher_testvec *template, unsigned int tcount)
1283 +                        const struct cipher_testvec *template,
1284 +                        unsigned int tcount)
1285  {
1286         unsigned int alignmask;
1287         int ret;
1288 @@ -1362,8 +1604,10 @@ static int test_skcipher(struct crypto_skcipher *tfm, int enc,
1289         return 0;
1290  }
1291  
1292 -static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate,
1293 -                    struct comp_testvec *dtemplate, int ctcount, int dtcount)
1294 +static int test_comp(struct crypto_comp *tfm,
1295 +                    const struct comp_testvec *ctemplate,
1296 +                    const struct comp_testvec *dtemplate,
1297 +                    int ctcount, int dtcount)
1298  {
1299         const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm));
1300         unsigned int i;
1301 @@ -1442,7 +1686,154 @@ static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate,
1302         return ret;
1303  }
1304  
1305 -static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
1306 +static int test_acomp(struct crypto_acomp *tfm,
1307 +                     const struct comp_testvec *ctemplate,
1308 +                     const struct comp_testvec *dtemplate,
1309 +                     int ctcount, int dtcount)
1310 +{
1311 +       const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
1312 +       unsigned int i;
1313 +       char *output;
1314 +       int ret;
1315 +       struct scatterlist src, dst;
1316 +       struct acomp_req *req;
1317 +       struct tcrypt_result result;
1318 +
1319 +       output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
1320 +       if (!output)
1321 +               return -ENOMEM;
1322 +
1323 +       for (i = 0; i < ctcount; i++) {
1324 +               unsigned int dlen = COMP_BUF_SIZE;
1325 +               int ilen = ctemplate[i].inlen;
1326 +               void *input_vec;
1327 +
1328 +               input_vec = kmemdup(ctemplate[i].input, ilen, GFP_KERNEL);
1329 +               if (!input_vec) {
1330 +                       ret = -ENOMEM;
1331 +                       goto out;
1332 +               }
1333 +
1334 +               memset(output, 0, dlen);
1335 +               init_completion(&result.completion);
1336 +               sg_init_one(&src, input_vec, ilen);
1337 +               sg_init_one(&dst, output, dlen);
1338 +
1339 +               req = acomp_request_alloc(tfm);
1340 +               if (!req) {
1341 +                       pr_err("alg: acomp: request alloc failed for %s\n",
1342 +                              algo);
1343 +                       kfree(input_vec);
1344 +                       ret = -ENOMEM;
1345 +                       goto out;
1346 +               }
1347 +
1348 +               acomp_request_set_params(req, &src, &dst, ilen, dlen);
1349 +               acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1350 +                                          tcrypt_complete, &result);
1351 +
1352 +               ret = wait_async_op(&result, crypto_acomp_compress(req));
1353 +               if (ret) {
1354 +                       pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
1355 +                              i + 1, algo, -ret);
1356 +                       kfree(input_vec);
1357 +                       acomp_request_free(req);
1358 +                       goto out;
1359 +               }
1360 +
1361 +               if (req->dlen != ctemplate[i].outlen) {
1362 +                       pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
1363 +                              i + 1, algo, req->dlen);
1364 +                       ret = -EINVAL;
1365 +                       kfree(input_vec);
1366 +                       acomp_request_free(req);
1367 +                       goto out;
1368 +               }
1369 +
1370 +               if (memcmp(output, ctemplate[i].output, req->dlen)) {
1371 +                       pr_err("alg: acomp: Compression test %d failed for %s\n",
1372 +                              i + 1, algo);
1373 +                       hexdump(output, req->dlen);
1374 +                       ret = -EINVAL;
1375 +                       kfree(input_vec);
1376 +                       acomp_request_free(req);
1377 +                       goto out;
1378 +               }
1379 +
1380 +               kfree(input_vec);
1381 +               acomp_request_free(req);
1382 +       }
1383 +
1384 +       for (i = 0; i < dtcount; i++) {
1385 +               unsigned int dlen = COMP_BUF_SIZE;
1386 +               int ilen = dtemplate[i].inlen;
1387 +               void *input_vec;
1388 +
1389 +               input_vec = kmemdup(dtemplate[i].input, ilen, GFP_KERNEL);
1390 +               if (!input_vec) {
1391 +                       ret = -ENOMEM;
1392 +                       goto out;
1393 +               }
1394 +
1395 +               memset(output, 0, dlen);
1396 +               init_completion(&result.completion);
1397 +               sg_init_one(&src, input_vec, ilen);
1398 +               sg_init_one(&dst, output, dlen);
1399 +
1400 +               req = acomp_request_alloc(tfm);
1401 +               if (!req) {
1402 +                       pr_err("alg: acomp: request alloc failed for %s\n",
1403 +                              algo);
1404 +                       kfree(input_vec);
1405 +                       ret = -ENOMEM;
1406 +                       goto out;
1407 +               }
1408 +
1409 +               acomp_request_set_params(req, &src, &dst, ilen, dlen);
1410 +               acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1411 +                                          tcrypt_complete, &result);
1412 +
1413 +               ret = wait_async_op(&result, crypto_acomp_decompress(req));
1414 +               if (ret) {
1415 +                       pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
1416 +                              i + 1, algo, -ret);
1417 +                       kfree(input_vec);
1418 +                       acomp_request_free(req);
1419 +                       goto out;
1420 +               }
1421 +
1422 +               if (req->dlen != dtemplate[i].outlen) {
1423 +                       pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n",
1424 +                              i + 1, algo, req->dlen);
1425 +                       ret = -EINVAL;
1426 +                       kfree(input_vec);
1427 +                       acomp_request_free(req);
1428 +                       goto out;
1429 +               }
1430 +
1431 +               if (memcmp(output, dtemplate[i].output, req->dlen)) {
1432 +                       pr_err("alg: acomp: Decompression test %d failed for %s\n",
1433 +                              i + 1, algo);
1434 +                       hexdump(output, req->dlen);
1435 +                       ret = -EINVAL;
1436 +                       kfree(input_vec);
1437 +                       acomp_request_free(req);
1438 +                       goto out;
1439 +               }
1440 +
1441 +               kfree(input_vec);
1442 +               acomp_request_free(req);
1443 +       }
1444 +
1445 +       ret = 0;
1446 +
1447 +out:
1448 +       kfree(output);
1449 +       return ret;
1450 +}
1451 +
1452 +static int test_cprng(struct crypto_rng *tfm,
1453 +                     const struct cprng_testvec *template,
1454                       unsigned int tcount)
1455  {
1456         const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
1457 @@ -1509,7 +1900,7 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
1458         struct crypto_aead *tfm;
1459         int err = 0;
1460  
1461 -       tfm = crypto_alloc_aead(driver, type | CRYPTO_ALG_INTERNAL, mask);
1462 +       tfm = crypto_alloc_aead(driver, type, mask);
1463         if (IS_ERR(tfm)) {
1464                 printk(KERN_ERR "alg: aead: Failed to load transform for %s: "
1465                        "%ld\n", driver, PTR_ERR(tfm));
1466 @@ -1538,7 +1929,7 @@ static int alg_test_cipher(const struct alg_test_desc *desc,
1467         struct crypto_cipher *tfm;
1468         int err = 0;
1469  
1470 -       tfm = crypto_alloc_cipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1471 +       tfm = crypto_alloc_cipher(driver, type, mask);
1472         if (IS_ERR(tfm)) {
1473                 printk(KERN_ERR "alg: cipher: Failed to load transform for "
1474                        "%s: %ld\n", driver, PTR_ERR(tfm));
1475 @@ -1567,7 +1958,7 @@ static int alg_test_skcipher(const struct alg_test_desc *desc,
1476         struct crypto_skcipher *tfm;
1477         int err = 0;
1478  
1479 -       tfm = crypto_alloc_skcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1480 +       tfm = crypto_alloc_skcipher(driver, type, mask);
1481         if (IS_ERR(tfm)) {
1482                 printk(KERN_ERR "alg: skcipher: Failed to load transform for "
1483                        "%s: %ld\n", driver, PTR_ERR(tfm));
1484 @@ -1593,22 +1984,38 @@ static int alg_test_skcipher(const struct alg_test_desc *desc,
1485  static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
1486                          u32 type, u32 mask)
1487  {
1488 -       struct crypto_comp *tfm;
1489 +       struct crypto_comp *comp;
1490 +       struct crypto_acomp *acomp;
1491         int err;
1492 +       u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
1493 +
1494 +       if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) {
1495 +               acomp = crypto_alloc_acomp(driver, type, mask);
1496 +               if (IS_ERR(acomp)) {
1497 +                       pr_err("alg: acomp: Failed to load transform for %s: %ld\n",
1498 +                              driver, PTR_ERR(acomp));
1499 +                       return PTR_ERR(acomp);
1500 +               }
1501 +               err = test_acomp(acomp, desc->suite.comp.comp.vecs,
1502 +                                desc->suite.comp.decomp.vecs,
1503 +                                desc->suite.comp.comp.count,
1504 +                                desc->suite.comp.decomp.count);
1505 +               crypto_free_acomp(acomp);
1506 +       } else {
1507 +               comp = crypto_alloc_comp(driver, type, mask);
1508 +               if (IS_ERR(comp)) {
1509 +                       pr_err("alg: comp: Failed to load transform for %s: %ld\n",
1510 +                              driver, PTR_ERR(comp));
1511 +                       return PTR_ERR(comp);
1512 +               }
1513  
1514 -       tfm = crypto_alloc_comp(driver, type, mask);
1515 -       if (IS_ERR(tfm)) {
1516 -               printk(KERN_ERR "alg: comp: Failed to load transform for %s: "
1517 -                      "%ld\n", driver, PTR_ERR(tfm));
1518 -               return PTR_ERR(tfm);
1519 -       }
1520 -
1521 -       err = test_comp(tfm, desc->suite.comp.comp.vecs,
1522 -                       desc->suite.comp.decomp.vecs,
1523 -                       desc->suite.comp.comp.count,
1524 -                       desc->suite.comp.decomp.count);
1525 +               err = test_comp(comp, desc->suite.comp.comp.vecs,
1526 +                               desc->suite.comp.decomp.vecs,
1527 +                               desc->suite.comp.comp.count,
1528 +                               desc->suite.comp.decomp.count);
1529  
1530 -       crypto_free_comp(tfm);
1531 +               crypto_free_comp(comp);
1532 +       }
1533         return err;
1534  }
1535  
1536 @@ -1618,7 +2025,7 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
1537         struct crypto_ahash *tfm;
1538         int err;
1539  
1540 -       tfm = crypto_alloc_ahash(driver, type | CRYPTO_ALG_INTERNAL, mask);
1541 +       tfm = crypto_alloc_ahash(driver, type, mask);
1542         if (IS_ERR(tfm)) {
1543                 printk(KERN_ERR "alg: hash: Failed to load transform for %s: "
1544                        "%ld\n", driver, PTR_ERR(tfm));
1545 @@ -1646,7 +2053,7 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
1546         if (err)
1547                 goto out;
1548  
1549 -       tfm = crypto_alloc_shash(driver, type | CRYPTO_ALG_INTERNAL, mask);
1550 +       tfm = crypto_alloc_shash(driver, type, mask);
1551         if (IS_ERR(tfm)) {
1552                 printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
1553                        "%ld\n", driver, PTR_ERR(tfm));
1554 @@ -1688,7 +2095,7 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
1555         struct crypto_rng *rng;
1556         int err;
1557  
1558 -       rng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
1559 +       rng = crypto_alloc_rng(driver, type, mask);
1560         if (IS_ERR(rng)) {
1561                 printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
1562                        "%ld\n", driver, PTR_ERR(rng));
1563 @@ -1703,7 +2110,7 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
1564  }
1565  
1566  
1567 -static int drbg_cavs_test(struct drbg_testvec *test, int pr,
1568 +static int drbg_cavs_test(const struct drbg_testvec *test, int pr,
1569                           const char *driver, u32 type, u32 mask)
1570  {
1571         int ret = -EAGAIN;
1572 @@ -1715,7 +2122,7 @@ static int drbg_cavs_test(struct drbg_testvec *test, int pr,
1573         if (!buf)
1574                 return -ENOMEM;
1575  
1576 -       drng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
1577 +       drng = crypto_alloc_rng(driver, type, mask);
1578         if (IS_ERR(drng)) {
1579                 printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
1580                        "%s\n", driver);
1581 @@ -1777,7 +2184,7 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
1582         int err = 0;
1583         int pr = 0;
1584         int i = 0;
1585 -       struct drbg_testvec *template = desc->suite.drbg.vecs;
1586 +       const struct drbg_testvec *template = desc->suite.drbg.vecs;
1587         unsigned int tcount = desc->suite.drbg.count;
1588  
1589         if (0 == memcmp(driver, "drbg_pr_", 8))
1590 @@ -1796,7 +2203,7 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
1591  
1592  }
1593  
1594 -static int do_test_kpp(struct crypto_kpp *tfm, struct kpp_testvec *vec,
1595 +static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
1596                        const char *alg)
1597  {
1598         struct kpp_request *req;
1599 @@ -1888,7 +2295,7 @@ static int do_test_kpp(struct crypto_kpp *tfm, struct kpp_testvec *vec,
1600  }
1601  
1602  static int test_kpp(struct crypto_kpp *tfm, const char *alg,
1603 -                   struct kpp_testvec *vecs, unsigned int tcount)
1604 +                   const struct kpp_testvec *vecs, unsigned int tcount)
1605  {
1606         int ret, i;
1607  
1608 @@ -1909,7 +2316,7 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver,
1609         struct crypto_kpp *tfm;
1610         int err = 0;
1611  
1612 -       tfm = crypto_alloc_kpp(driver, type | CRYPTO_ALG_INTERNAL, mask);
1613 +       tfm = crypto_alloc_kpp(driver, type, mask);
1614         if (IS_ERR(tfm)) {
1615                 pr_err("alg: kpp: Failed to load tfm for %s: %ld\n",
1616                        driver, PTR_ERR(tfm));
1617 @@ -1924,7 +2331,7 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver,
1618  }
1619  
1620  static int test_akcipher_one(struct crypto_akcipher *tfm,
1621 -                            struct akcipher_testvec *vecs)
1622 +                            const struct akcipher_testvec *vecs)
1623  {
1624         char *xbuf[XBUFSIZE];
1625         struct akcipher_request *req;
1626 @@ -2044,7 +2451,8 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
1627  }
1628  
1629  static int test_akcipher(struct crypto_akcipher *tfm, const char *alg,
1630 -                        struct akcipher_testvec *vecs, unsigned int tcount)
1631 +                        const struct akcipher_testvec *vecs,
1632 +                        unsigned int tcount)
1633  {
1634         const char *algo =
1635                 crypto_tfm_alg_driver_name(crypto_akcipher_tfm(tfm));
1636 @@ -2068,7 +2476,7 @@ static int alg_test_akcipher(const struct alg_test_desc *desc,
1637         struct crypto_akcipher *tfm;
1638         int err = 0;
1639  
1640 -       tfm = crypto_alloc_akcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1641 +       tfm = crypto_alloc_akcipher(driver, type, mask);
1642         if (IS_ERR(tfm)) {
1643                 pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n",
1644                        driver, PTR_ERR(tfm));
1645 @@ -2088,112 +2496,23 @@ static int alg_test_null(const struct alg_test_desc *desc,
1646         return 0;
1647  }
1648  
1649 +#define __VECS(tv)     { .vecs = tv, .count = ARRAY_SIZE(tv) }
1650 +
1651  /* Please keep this list sorted by algorithm name. */
1652  static const struct alg_test_desc alg_test_descs[] = {
1653         {
1654 -               .alg = "__cbc-cast5-avx",
1655 -               .test = alg_test_null,
1656 -       }, {
1657 -               .alg = "__cbc-cast6-avx",
1658 -               .test = alg_test_null,
1659 -       }, {
1660 -               .alg = "__cbc-serpent-avx",
1661 -               .test = alg_test_null,
1662 -       }, {
1663 -               .alg = "__cbc-serpent-avx2",
1664 -               .test = alg_test_null,
1665 -       }, {
1666 -               .alg = "__cbc-serpent-sse2",
1667 -               .test = alg_test_null,
1668 -       }, {
1669 -               .alg = "__cbc-twofish-avx",
1670 -               .test = alg_test_null,
1671 -       }, {
1672 -               .alg = "__driver-cbc-aes-aesni",
1673 -               .test = alg_test_null,
1674 -               .fips_allowed = 1,
1675 -       }, {
1676 -               .alg = "__driver-cbc-camellia-aesni",
1677 -               .test = alg_test_null,
1678 -       }, {
1679 -               .alg = "__driver-cbc-camellia-aesni-avx2",
1680 -               .test = alg_test_null,
1681 -       }, {
1682 -               .alg = "__driver-cbc-cast5-avx",
1683 -               .test = alg_test_null,
1684 -       }, {
1685 -               .alg = "__driver-cbc-cast6-avx",
1686 -               .test = alg_test_null,
1687 -       }, {
1688 -               .alg = "__driver-cbc-serpent-avx",
1689 -               .test = alg_test_null,
1690 -       }, {
1691 -               .alg = "__driver-cbc-serpent-avx2",
1692 -               .test = alg_test_null,
1693 -       }, {
1694 -               .alg = "__driver-cbc-serpent-sse2",
1695 -               .test = alg_test_null,
1696 -       }, {
1697 -               .alg = "__driver-cbc-twofish-avx",
1698 -               .test = alg_test_null,
1699 -       }, {
1700 -               .alg = "__driver-ecb-aes-aesni",
1701 -               .test = alg_test_null,
1702 -               .fips_allowed = 1,
1703 -       }, {
1704 -               .alg = "__driver-ecb-camellia-aesni",
1705 -               .test = alg_test_null,
1706 -       }, {
1707 -               .alg = "__driver-ecb-camellia-aesni-avx2",
1708 -               .test = alg_test_null,
1709 -       }, {
1710 -               .alg = "__driver-ecb-cast5-avx",
1711 -               .test = alg_test_null,
1712 -       }, {
1713 -               .alg = "__driver-ecb-cast6-avx",
1714 -               .test = alg_test_null,
1715 -       }, {
1716 -               .alg = "__driver-ecb-serpent-avx",
1717 -               .test = alg_test_null,
1718 -       }, {
1719 -               .alg = "__driver-ecb-serpent-avx2",
1720 -               .test = alg_test_null,
1721 -       }, {
1722 -               .alg = "__driver-ecb-serpent-sse2",
1723 -               .test = alg_test_null,
1724 -       }, {
1725 -               .alg = "__driver-ecb-twofish-avx",
1726 -               .test = alg_test_null,
1727 -       }, {
1728 -               .alg = "__driver-gcm-aes-aesni",
1729 -               .test = alg_test_null,
1730 -               .fips_allowed = 1,
1731 -       }, {
1732 -               .alg = "__ghash-pclmulqdqni",
1733 -               .test = alg_test_null,
1734 -               .fips_allowed = 1,
1735 -       }, {
1736                 .alg = "ansi_cprng",
1737                 .test = alg_test_cprng,
1738                 .suite = {
1739 -                       .cprng = {
1740 -                               .vecs = ansi_cprng_aes_tv_template,
1741 -                               .count = ANSI_CPRNG_AES_TEST_VECTORS
1742 -                       }
1743 +                       .cprng = __VECS(ansi_cprng_aes_tv_template)
1744                 }
1745         }, {
1746                 .alg = "authenc(hmac(md5),ecb(cipher_null))",
1747                 .test = alg_test_aead,
1748                 .suite = {
1749                         .aead = {
1750 -                               .enc = {
1751 -                                       .vecs = hmac_md5_ecb_cipher_null_enc_tv_template,
1752 -                                       .count = HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS
1753 -                               },
1754 -                               .dec = {
1755 -                                       .vecs = hmac_md5_ecb_cipher_null_dec_tv_template,
1756 -                                       .count = HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS
1757 -                               }
1758 +                               .enc = __VECS(hmac_md5_ecb_cipher_null_enc_tv_template),
1759 +                               .dec = __VECS(hmac_md5_ecb_cipher_null_dec_tv_template)
1760                         }
1761                 }
1762         }, {
1763 @@ -2201,12 +2520,7 @@ static const struct alg_test_desc alg_test_descs[] = {
1764                 .test = alg_test_aead,
1765                 .suite = {
1766                         .aead = {
1767 -                               .enc = {
1768 -                                       .vecs =
1769 -                                       hmac_sha1_aes_cbc_enc_tv_temp,
1770 -                                       .count =
1771 -                                       HMAC_SHA1_AES_CBC_ENC_TEST_VEC
1772 -                               }
1773 +                               .enc = __VECS(hmac_sha1_aes_cbc_enc_tv_temp)
1774                         }
1775                 }
1776         }, {
1777 @@ -2214,12 +2528,7 @@ static const struct alg_test_desc alg_test_descs[] = {
1778                 .test = alg_test_aead,
1779                 .suite = {
1780                         .aead = {
1781 -                               .enc = {
1782 -                                       .vecs =
1783 -                                       hmac_sha1_des_cbc_enc_tv_temp,
1784 -                                       .count =
1785 -                                       HMAC_SHA1_DES_CBC_ENC_TEST_VEC
1786 -                               }
1787 +                               .enc = __VECS(hmac_sha1_des_cbc_enc_tv_temp)
1788                         }
1789                 }
1790         }, {
1791 @@ -2228,12 +2537,7 @@ static const struct alg_test_desc alg_test_descs[] = {
1792                 .fips_allowed = 1,
1793                 .suite = {
1794                         .aead = {
1795 -                               .enc = {
1796 -                                       .vecs =
1797 -                                       hmac_sha1_des3_ede_cbc_enc_tv_temp,
1798 -                                       .count =
1799 -                                       HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC
1800 -                               }
1801 +                               .enc = __VECS(hmac_sha1_des3_ede_cbc_enc_tv_temp)
1802                         }
1803                 }
1804         }, {
1805 @@ -2245,18 +2549,8 @@ static const struct alg_test_desc alg_test_descs[] = {
1806                 .test = alg_test_aead,
1807                 .suite = {
1808                         .aead = {
1809 -                               .enc = {
1810 -                                       .vecs =
1811 -                                       hmac_sha1_ecb_cipher_null_enc_tv_temp,
1812 -                                       .count =
1813 -                                       HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC
1814 -                               },
1815 -                               .dec = {
1816 -                                       .vecs =
1817 -                                       hmac_sha1_ecb_cipher_null_dec_tv_temp,
1818 -                                       .count =
1819 -                                       HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC
1820 -                               }
1821 +                               .enc = __VECS(hmac_sha1_ecb_cipher_null_enc_tv_temp),
1822 +                               .dec = __VECS(hmac_sha1_ecb_cipher_null_dec_tv_temp)
1823                         }
1824                 }
1825         }, {
1826 @@ -2268,12 +2562,7 @@ static const struct alg_test_desc alg_test_descs[] = {
1827                 .test = alg_test_aead,
1828                 .suite = {
1829                         .aead = {
1830 -                               .enc = {
1831 -                                       .vecs =
1832 -                                       hmac_sha224_des_cbc_enc_tv_temp,
1833 -                                       .count =
1834 -                                       HMAC_SHA224_DES_CBC_ENC_TEST_VEC
1835 -                               }
1836 +                               .enc = __VECS(hmac_sha224_des_cbc_enc_tv_temp)
1837                         }
1838                 }
1839         }, {
1840 @@ -2282,12 +2571,7 @@ static const struct alg_test_desc alg_test_descs[] = {
1841                 .fips_allowed = 1,
1842                 .suite = {
1843                         .aead = {
1844 -                               .enc = {
1845 -                                       .vecs =
1846 -                                       hmac_sha224_des3_ede_cbc_enc_tv_temp,
1847 -                                       .count =
1848 -                                       HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC
1849 -                               }
1850 +                               .enc = __VECS(hmac_sha224_des3_ede_cbc_enc_tv_temp)
1851                         }
1852                 }
1853         }, {
1854 @@ -2296,12 +2580,7 @@ static const struct alg_test_desc alg_test_descs[] = {
1855                 .fips_allowed = 1,
1856                 .suite = {
1857                         .aead = {
1858 -                               .enc = {
1859 -                                       .vecs =
1860 -                                       hmac_sha256_aes_cbc_enc_tv_temp,
1861 -                                       .count =
1862 -                                       HMAC_SHA256_AES_CBC_ENC_TEST_VEC
1863 -                               }
1864 +                               .enc = __VECS(hmac_sha256_aes_cbc_enc_tv_temp)
1865                         }
1866                 }
1867         }, {
1868 @@ -2309,12 +2588,7 @@ static const struct alg_test_desc alg_test_descs[] = {
1869                 .test = alg_test_aead,
1870                 .suite = {
1871                         .aead = {
1872 -                               .enc = {
1873 -                                       .vecs =
1874 -                                       hmac_sha256_des_cbc_enc_tv_temp,
1875 -                                       .count =
1876 -                                       HMAC_SHA256_DES_CBC_ENC_TEST_VEC
1877 -                               }
1878 +                               .enc = __VECS(hmac_sha256_des_cbc_enc_tv_temp)
1879                         }
1880                 }
1881         }, {
1882 @@ -2323,12 +2597,7 @@ static const struct alg_test_desc alg_test_descs[] = {
1883                 .fips_allowed = 1,
1884                 .suite = {
1885                         .aead = {
1886 -                               .enc = {
1887 -                                       .vecs =
1888 -                                       hmac_sha256_des3_ede_cbc_enc_tv_temp,
1889 -                                       .count =
1890 -                                       HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC
1891 -                               }
1892 +                               .enc = __VECS(hmac_sha256_des3_ede_cbc_enc_tv_temp)
1893                         }
1894                 }
1895         }, {
1896 @@ -2344,12 +2613,7 @@ static const struct alg_test_desc alg_test_descs[] = {
1897                 .test = alg_test_aead,
1898                 .suite = {
1899                         .aead = {
1900 -                               .enc = {
1901 -                                       .vecs =
1902 -                                       hmac_sha384_des_cbc_enc_tv_temp,
1903 -                                       .count =
1904 -                                       HMAC_SHA384_DES_CBC_ENC_TEST_VEC
1905 -                               }
1906 +                               .enc = __VECS(hmac_sha384_des_cbc_enc_tv_temp)
1907                         }
1908                 }
1909         }, {
1910 @@ -2358,12 +2622,7 @@ static const struct alg_test_desc alg_test_descs[] = {
1911                 .fips_allowed = 1,
1912                 .suite = {
1913                         .aead = {
1914 -                               .enc = {
1915 -                                       .vecs =
1916 -                                       hmac_sha384_des3_ede_cbc_enc_tv_temp,
1917 -                                       .count =
1918 -                                       HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC
1919 -                               }
1920 +                               .enc = __VECS(hmac_sha384_des3_ede_cbc_enc_tv_temp)
1921                         }
1922                 }
1923         }, {
1924 @@ -2380,12 +2639,7 @@ static const struct alg_test_desc alg_test_descs[] = {
1925                 .test = alg_test_aead,
1926                 .suite = {
1927                         .aead = {
1928 -                               .enc = {
1929 -                                       .vecs =
1930 -                                       hmac_sha512_aes_cbc_enc_tv_temp,
1931 -                                       .count =
1932 -                                       HMAC_SHA512_AES_CBC_ENC_TEST_VEC
1933 -                               }
1934 +                               .enc = __VECS(hmac_sha512_aes_cbc_enc_tv_temp)
1935                         }
1936                 }
1937         }, {
1938 @@ -2393,12 +2647,7 @@ static const struct alg_test_desc alg_test_descs[] = {
1939                 .test = alg_test_aead,
1940                 .suite = {
1941                         .aead = {
1942 -                               .enc = {
1943 -                                       .vecs =
1944 -                                       hmac_sha512_des_cbc_enc_tv_temp,
1945 -                                       .count =
1946 -                                       HMAC_SHA512_DES_CBC_ENC_TEST_VEC
1947 -                               }
1948 +                               .enc = __VECS(hmac_sha512_des_cbc_enc_tv_temp)
1949                         }
1950                 }
1951         }, {
1952 @@ -2407,12 +2656,7 @@ static const struct alg_test_desc alg_test_descs[] = {
1953                 .fips_allowed = 1,
1954                 .suite = {
1955                         .aead = {
1956 -                               .enc = {
1957 -                                       .vecs =
1958 -                                       hmac_sha512_des3_ede_cbc_enc_tv_temp,
1959 -                                       .count =
1960 -                                       HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC
1961 -                               }
1962 +                               .enc = __VECS(hmac_sha512_des3_ede_cbc_enc_tv_temp)
1963                         }
1964                 }
1965         }, {
1966 @@ -2429,14 +2673,8 @@ static const struct alg_test_desc alg_test_descs[] = {
1967                 .fips_allowed = 1,
1968                 .suite = {
1969                         .cipher = {
1970 -                               .enc = {
1971 -                                       .vecs = aes_cbc_enc_tv_template,
1972 -                                       .count = AES_CBC_ENC_TEST_VECTORS
1973 -                               },
1974 -                               .dec = {
1975 -                                       .vecs = aes_cbc_dec_tv_template,
1976 -                                       .count = AES_CBC_DEC_TEST_VECTORS
1977 -                               }
1978 +                               .enc = __VECS(aes_cbc_enc_tv_template),
1979 +                               .dec = __VECS(aes_cbc_dec_tv_template)
1980                         }
1981                 }
1982         }, {
1983 @@ -2444,14 +2682,8 @@ static const struct alg_test_desc alg_test_descs[] = {
1984                 .test = alg_test_skcipher,
1985                 .suite = {
1986                         .cipher = {
1987 -                               .enc = {
1988 -                                       .vecs = anubis_cbc_enc_tv_template,
1989 -                                       .count = ANUBIS_CBC_ENC_TEST_VECTORS
1990 -                               },
1991 -                               .dec = {
1992 -                                       .vecs = anubis_cbc_dec_tv_template,
1993 -                                       .count = ANUBIS_CBC_DEC_TEST_VECTORS
1994 -                               }
1995 +                               .enc = __VECS(anubis_cbc_enc_tv_template),
1996 +                               .dec = __VECS(anubis_cbc_dec_tv_template)
1997                         }
1998                 }
1999         }, {
2000 @@ -2459,14 +2691,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2001                 .test = alg_test_skcipher,
2002                 .suite = {
2003                         .cipher = {
2004 -                               .enc = {
2005 -                                       .vecs = bf_cbc_enc_tv_template,
2006 -                                       .count = BF_CBC_ENC_TEST_VECTORS
2007 -                               },
2008 -                               .dec = {
2009 -                                       .vecs = bf_cbc_dec_tv_template,
2010 -                                       .count = BF_CBC_DEC_TEST_VECTORS
2011 -                               }
2012 +                               .enc = __VECS(bf_cbc_enc_tv_template),
2013 +                               .dec = __VECS(bf_cbc_dec_tv_template)
2014                         }
2015                 }
2016         }, {
2017 @@ -2474,14 +2700,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2018                 .test = alg_test_skcipher,
2019                 .suite = {
2020                         .cipher = {
2021 -                               .enc = {
2022 -                                       .vecs = camellia_cbc_enc_tv_template,
2023 -                                       .count = CAMELLIA_CBC_ENC_TEST_VECTORS
2024 -                               },
2025 -                               .dec = {
2026 -                                       .vecs = camellia_cbc_dec_tv_template,
2027 -                                       .count = CAMELLIA_CBC_DEC_TEST_VECTORS
2028 -                               }
2029 +                               .enc = __VECS(camellia_cbc_enc_tv_template),
2030 +                               .dec = __VECS(camellia_cbc_dec_tv_template)
2031                         }
2032                 }
2033         }, {
2034 @@ -2489,14 +2709,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2035                 .test = alg_test_skcipher,
2036                 .suite = {
2037                         .cipher = {
2038 -                               .enc = {
2039 -                                       .vecs = cast5_cbc_enc_tv_template,
2040 -                                       .count = CAST5_CBC_ENC_TEST_VECTORS
2041 -                               },
2042 -                               .dec = {
2043 -                                       .vecs = cast5_cbc_dec_tv_template,
2044 -                                       .count = CAST5_CBC_DEC_TEST_VECTORS
2045 -                               }
2046 +                               .enc = __VECS(cast5_cbc_enc_tv_template),
2047 +                               .dec = __VECS(cast5_cbc_dec_tv_template)
2048                         }
2049                 }
2050         }, {
2051 @@ -2504,14 +2718,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2052                 .test = alg_test_skcipher,
2053                 .suite = {
2054                         .cipher = {
2055 -                               .enc = {
2056 -                                       .vecs = cast6_cbc_enc_tv_template,
2057 -                                       .count = CAST6_CBC_ENC_TEST_VECTORS
2058 -                               },
2059 -                               .dec = {
2060 -                                       .vecs = cast6_cbc_dec_tv_template,
2061 -                                       .count = CAST6_CBC_DEC_TEST_VECTORS
2062 -                               }
2063 +                               .enc = __VECS(cast6_cbc_enc_tv_template),
2064 +                               .dec = __VECS(cast6_cbc_dec_tv_template)
2065                         }
2066                 }
2067         }, {
2068 @@ -2519,14 +2727,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2069                 .test = alg_test_skcipher,
2070                 .suite = {
2071                         .cipher = {
2072 -                               .enc = {
2073 -                                       .vecs = des_cbc_enc_tv_template,
2074 -                                       .count = DES_CBC_ENC_TEST_VECTORS
2075 -                               },
2076 -                               .dec = {
2077 -                                       .vecs = des_cbc_dec_tv_template,
2078 -                                       .count = DES_CBC_DEC_TEST_VECTORS
2079 -                               }
2080 +                               .enc = __VECS(des_cbc_enc_tv_template),
2081 +                               .dec = __VECS(des_cbc_dec_tv_template)
2082                         }
2083                 }
2084         }, {
2085 @@ -2535,14 +2737,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2086                 .fips_allowed = 1,
2087                 .suite = {
2088                         .cipher = {
2089 -                               .enc = {
2090 -                                       .vecs = des3_ede_cbc_enc_tv_template,
2091 -                                       .count = DES3_EDE_CBC_ENC_TEST_VECTORS
2092 -                               },
2093 -                               .dec = {
2094 -                                       .vecs = des3_ede_cbc_dec_tv_template,
2095 -                                       .count = DES3_EDE_CBC_DEC_TEST_VECTORS
2096 -                               }
2097 +                               .enc = __VECS(des3_ede_cbc_enc_tv_template),
2098 +                               .dec = __VECS(des3_ede_cbc_dec_tv_template)
2099                         }
2100                 }
2101         }, {
2102 @@ -2550,14 +2746,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2103                 .test = alg_test_skcipher,
2104                 .suite = {
2105                         .cipher = {
2106 -                               .enc = {
2107 -                                       .vecs = serpent_cbc_enc_tv_template,
2108 -                                       .count = SERPENT_CBC_ENC_TEST_VECTORS
2109 -                               },
2110 -                               .dec = {
2111 -                                       .vecs = serpent_cbc_dec_tv_template,
2112 -                                       .count = SERPENT_CBC_DEC_TEST_VECTORS
2113 -                               }
2114 +                               .enc = __VECS(serpent_cbc_enc_tv_template),
2115 +                               .dec = __VECS(serpent_cbc_dec_tv_template)
2116                         }
2117                 }
2118         }, {
2119 @@ -2565,30 +2755,25 @@ static const struct alg_test_desc alg_test_descs[] = {
2120                 .test = alg_test_skcipher,
2121                 .suite = {
2122                         .cipher = {
2123 -                               .enc = {
2124 -                                       .vecs = tf_cbc_enc_tv_template,
2125 -                                       .count = TF_CBC_ENC_TEST_VECTORS
2126 -                               },
2127 -                               .dec = {
2128 -                                       .vecs = tf_cbc_dec_tv_template,
2129 -                                       .count = TF_CBC_DEC_TEST_VECTORS
2130 -                               }
2131 +                               .enc = __VECS(tf_cbc_enc_tv_template),
2132 +                               .dec = __VECS(tf_cbc_dec_tv_template)
2133                         }
2134                 }
2135 +       }, {
2136 +               .alg = "cbcmac(aes)",
2137 +               .fips_allowed = 1,
2138 +               .test = alg_test_hash,
2139 +               .suite = {
2140 +                       .hash = __VECS(aes_cbcmac_tv_template)
2141 +               }
2142         }, {
2143                 .alg = "ccm(aes)",
2144                 .test = alg_test_aead,
2145                 .fips_allowed = 1,
2146                 .suite = {
2147                         .aead = {
2148 -                               .enc = {
2149 -                                       .vecs = aes_ccm_enc_tv_template,
2150 -                                       .count = AES_CCM_ENC_TEST_VECTORS
2151 -                               },
2152 -                               .dec = {
2153 -                                       .vecs = aes_ccm_dec_tv_template,
2154 -                                       .count = AES_CCM_DEC_TEST_VECTORS
2155 -                               }
2156 +                               .enc = __VECS(aes_ccm_enc_tv_template),
2157 +                               .dec = __VECS(aes_ccm_dec_tv_template)
2158                         }
2159                 }
2160         }, {
2161 @@ -2596,14 +2781,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2162                 .test = alg_test_skcipher,
2163                 .suite = {
2164                         .cipher = {
2165 -                               .enc = {
2166 -                                       .vecs = chacha20_enc_tv_template,
2167 -                                       .count = CHACHA20_ENC_TEST_VECTORS
2168 -                               },
2169 -                               .dec = {
2170 -                                       .vecs = chacha20_enc_tv_template,
2171 -                                       .count = CHACHA20_ENC_TEST_VECTORS
2172 -                               },
2173 +                               .enc = __VECS(chacha20_enc_tv_template),
2174 +                               .dec = __VECS(chacha20_enc_tv_template),
2175                         }
2176                 }
2177         }, {
2178 @@ -2611,20 +2790,14 @@ static const struct alg_test_desc alg_test_descs[] = {
2179                 .fips_allowed = 1,
2180                 .test = alg_test_hash,
2181                 .suite = {
2182 -                       .hash = {
2183 -                               .vecs = aes_cmac128_tv_template,
2184 -                               .count = CMAC_AES_TEST_VECTORS
2185 -                       }
2186 +                       .hash = __VECS(aes_cmac128_tv_template)
2187                 }
2188         }, {
2189                 .alg = "cmac(des3_ede)",
2190                 .fips_allowed = 1,
2191                 .test = alg_test_hash,
2192                 .suite = {
2193 -                       .hash = {
2194 -                               .vecs = des3_ede_cmac64_tv_template,
2195 -                               .count = CMAC_DES3_EDE_TEST_VECTORS
2196 -                       }
2197 +                       .hash = __VECS(des3_ede_cmac64_tv_template)
2198                 }
2199         }, {
2200                 .alg = "compress_null",
2201 @@ -2633,94 +2806,30 @@ static const struct alg_test_desc alg_test_descs[] = {
2202                 .alg = "crc32",
2203                 .test = alg_test_hash,
2204                 .suite = {
2205 -                       .hash = {
2206 -                               .vecs = crc32_tv_template,
2207 -                               .count = CRC32_TEST_VECTORS
2208 -                       }
2209 +                       .hash = __VECS(crc32_tv_template)
2210                 }
2211         }, {
2212                 .alg = "crc32c",
2213                 .test = alg_test_crc32c,
2214                 .fips_allowed = 1,
2215                 .suite = {
2216 -                       .hash = {
2217 -                               .vecs = crc32c_tv_template,
2218 -                               .count = CRC32C_TEST_VECTORS
2219 -                       }
2220 +                       .hash = __VECS(crc32c_tv_template)
2221                 }
2222         }, {
2223                 .alg = "crct10dif",
2224                 .test = alg_test_hash,
2225                 .fips_allowed = 1,
2226                 .suite = {
2227 -                       .hash = {
2228 -                               .vecs = crct10dif_tv_template,
2229 -                               .count = CRCT10DIF_TEST_VECTORS
2230 -                       }
2231 +                       .hash = __VECS(crct10dif_tv_template)
2232                 }
2233 -       }, {
2234 -               .alg = "cryptd(__driver-cbc-aes-aesni)",
2235 -               .test = alg_test_null,
2236 -               .fips_allowed = 1,
2237 -       }, {
2238 -               .alg = "cryptd(__driver-cbc-camellia-aesni)",
2239 -               .test = alg_test_null,
2240 -       }, {
2241 -               .alg = "cryptd(__driver-cbc-camellia-aesni-avx2)",
2242 -               .test = alg_test_null,
2243 -       }, {
2244 -               .alg = "cryptd(__driver-cbc-serpent-avx2)",
2245 -               .test = alg_test_null,
2246 -       }, {
2247 -               .alg = "cryptd(__driver-ecb-aes-aesni)",
2248 -               .test = alg_test_null,
2249 -               .fips_allowed = 1,
2250 -       }, {
2251 -               .alg = "cryptd(__driver-ecb-camellia-aesni)",
2252 -               .test = alg_test_null,
2253 -       }, {
2254 -               .alg = "cryptd(__driver-ecb-camellia-aesni-avx2)",
2255 -               .test = alg_test_null,
2256 -       }, {
2257 -               .alg = "cryptd(__driver-ecb-cast5-avx)",
2258 -               .test = alg_test_null,
2259 -       }, {
2260 -               .alg = "cryptd(__driver-ecb-cast6-avx)",
2261 -               .test = alg_test_null,
2262 -       }, {
2263 -               .alg = "cryptd(__driver-ecb-serpent-avx)",
2264 -               .test = alg_test_null,
2265 -       }, {
2266 -               .alg = "cryptd(__driver-ecb-serpent-avx2)",
2267 -               .test = alg_test_null,
2268 -       }, {
2269 -               .alg = "cryptd(__driver-ecb-serpent-sse2)",
2270 -               .test = alg_test_null,
2271 -       }, {
2272 -               .alg = "cryptd(__driver-ecb-twofish-avx)",
2273 -               .test = alg_test_null,
2274 -       }, {
2275 -               .alg = "cryptd(__driver-gcm-aes-aesni)",
2276 -               .test = alg_test_null,
2277 -               .fips_allowed = 1,
2278 -       }, {
2279 -               .alg = "cryptd(__ghash-pclmulqdqni)",
2280 -               .test = alg_test_null,
2281 -               .fips_allowed = 1,
2282         }, {
2283                 .alg = "ctr(aes)",
2284                 .test = alg_test_skcipher,
2285                 .fips_allowed = 1,
2286                 .suite = {
2287                         .cipher = {
2288 -                               .enc = {
2289 -                                       .vecs = aes_ctr_enc_tv_template,
2290 -                                       .count = AES_CTR_ENC_TEST_VECTORS
2291 -                               },
2292 -                               .dec = {
2293 -                                       .vecs = aes_ctr_dec_tv_template,
2294 -                                       .count = AES_CTR_DEC_TEST_VECTORS
2295 -                               }
2296 +                               .enc = __VECS(aes_ctr_enc_tv_template),
2297 +                               .dec = __VECS(aes_ctr_dec_tv_template)
2298                         }
2299                 }
2300         }, {
2301 @@ -2728,14 +2837,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2302                 .test = alg_test_skcipher,
2303                 .suite = {
2304                         .cipher = {
2305 -                               .enc = {
2306 -                                       .vecs = bf_ctr_enc_tv_template,
2307 -                                       .count = BF_CTR_ENC_TEST_VECTORS
2308 -                               },
2309 -                               .dec = {
2310 -                                       .vecs = bf_ctr_dec_tv_template,
2311 -                                       .count = BF_CTR_DEC_TEST_VECTORS
2312 -                               }
2313 +                               .enc = __VECS(bf_ctr_enc_tv_template),
2314 +                               .dec = __VECS(bf_ctr_dec_tv_template)
2315                         }
2316                 }
2317         }, {
2318 @@ -2743,14 +2846,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2319                 .test = alg_test_skcipher,
2320                 .suite = {
2321                         .cipher = {
2322 -                               .enc = {
2323 -                                       .vecs = camellia_ctr_enc_tv_template,
2324 -                                       .count = CAMELLIA_CTR_ENC_TEST_VECTORS
2325 -                               },
2326 -                               .dec = {
2327 -                                       .vecs = camellia_ctr_dec_tv_template,
2328 -                                       .count = CAMELLIA_CTR_DEC_TEST_VECTORS
2329 -                               }
2330 +                               .enc = __VECS(camellia_ctr_enc_tv_template),
2331 +                               .dec = __VECS(camellia_ctr_dec_tv_template)
2332                         }
2333                 }
2334         }, {
2335 @@ -2758,14 +2855,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2336                 .test = alg_test_skcipher,
2337                 .suite = {
2338                         .cipher = {
2339 -                               .enc = {
2340 -                                       .vecs = cast5_ctr_enc_tv_template,
2341 -                                       .count = CAST5_CTR_ENC_TEST_VECTORS
2342 -                               },
2343 -                               .dec = {
2344 -                                       .vecs = cast5_ctr_dec_tv_template,
2345 -                                       .count = CAST5_CTR_DEC_TEST_VECTORS
2346 -                               }
2347 +                               .enc = __VECS(cast5_ctr_enc_tv_template),
2348 +                               .dec = __VECS(cast5_ctr_dec_tv_template)
2349                         }
2350                 }
2351         }, {
2352 @@ -2773,14 +2864,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2353                 .test = alg_test_skcipher,
2354                 .suite = {
2355                         .cipher = {
2356 -                               .enc = {
2357 -                                       .vecs = cast6_ctr_enc_tv_template,
2358 -                                       .count = CAST6_CTR_ENC_TEST_VECTORS
2359 -                               },
2360 -                               .dec = {
2361 -                                       .vecs = cast6_ctr_dec_tv_template,
2362 -                                       .count = CAST6_CTR_DEC_TEST_VECTORS
2363 -                               }
2364 +                               .enc = __VECS(cast6_ctr_enc_tv_template),
2365 +                               .dec = __VECS(cast6_ctr_dec_tv_template)
2366                         }
2367                 }
2368         }, {
2369 @@ -2788,29 +2873,18 @@ static const struct alg_test_desc alg_test_descs[] = {
2370                 .test = alg_test_skcipher,
2371                 .suite = {
2372                         .cipher = {
2373 -                               .enc = {
2374 -                                       .vecs = des_ctr_enc_tv_template,
2375 -                                       .count = DES_CTR_ENC_TEST_VECTORS
2376 -                               },
2377 -                               .dec = {
2378 -                                       .vecs = des_ctr_dec_tv_template,
2379 -                                       .count = DES_CTR_DEC_TEST_VECTORS
2380 -                               }
2381 +                               .enc = __VECS(des_ctr_enc_tv_template),
2382 +                               .dec = __VECS(des_ctr_dec_tv_template)
2383                         }
2384                 }
2385         }, {
2386                 .alg = "ctr(des3_ede)",
2387                 .test = alg_test_skcipher,
2388 +               .fips_allowed = 1,
2389                 .suite = {
2390                         .cipher = {
2391 -                               .enc = {
2392 -                                       .vecs = des3_ede_ctr_enc_tv_template,
2393 -                                       .count = DES3_EDE_CTR_ENC_TEST_VECTORS
2394 -                               },
2395 -                               .dec = {
2396 -                                       .vecs = des3_ede_ctr_dec_tv_template,
2397 -                                       .count = DES3_EDE_CTR_DEC_TEST_VECTORS
2398 -                               }
2399 +                               .enc = __VECS(des3_ede_ctr_enc_tv_template),
2400 +                               .dec = __VECS(des3_ede_ctr_dec_tv_template)
2401                         }
2402                 }
2403         }, {
2404 @@ -2818,14 +2892,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2405                 .test = alg_test_skcipher,
2406                 .suite = {
2407                         .cipher = {
2408 -                               .enc = {
2409 -                                       .vecs = serpent_ctr_enc_tv_template,
2410 -                                       .count = SERPENT_CTR_ENC_TEST_VECTORS
2411 -                               },
2412 -                               .dec = {
2413 -                                       .vecs = serpent_ctr_dec_tv_template,
2414 -                                       .count = SERPENT_CTR_DEC_TEST_VECTORS
2415 -                               }
2416 +                               .enc = __VECS(serpent_ctr_enc_tv_template),
2417 +                               .dec = __VECS(serpent_ctr_dec_tv_template)
2418                         }
2419                 }
2420         }, {
2421 @@ -2833,14 +2901,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2422                 .test = alg_test_skcipher,
2423                 .suite = {
2424                         .cipher = {
2425 -                               .enc = {
2426 -                                       .vecs = tf_ctr_enc_tv_template,
2427 -                                       .count = TF_CTR_ENC_TEST_VECTORS
2428 -                               },
2429 -                               .dec = {
2430 -                                       .vecs = tf_ctr_dec_tv_template,
2431 -                                       .count = TF_CTR_DEC_TEST_VECTORS
2432 -                               }
2433 +                               .enc = __VECS(tf_ctr_enc_tv_template),
2434 +                               .dec = __VECS(tf_ctr_dec_tv_template)
2435                         }
2436                 }
2437         }, {
2438 @@ -2848,14 +2910,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2439                 .test = alg_test_skcipher,
2440                 .suite = {
2441                         .cipher = {
2442 -                               .enc = {
2443 -                                       .vecs = cts_mode_enc_tv_template,
2444 -                                       .count = CTS_MODE_ENC_TEST_VECTORS
2445 -                               },
2446 -                               .dec = {
2447 -                                       .vecs = cts_mode_dec_tv_template,
2448 -                                       .count = CTS_MODE_DEC_TEST_VECTORS
2449 -                               }
2450 +                               .enc = __VECS(cts_mode_enc_tv_template),
2451 +                               .dec = __VECS(cts_mode_dec_tv_template)
2452                         }
2453                 }
2454         }, {
2455 @@ -2864,14 +2920,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2456                 .fips_allowed = 1,
2457                 .suite = {
2458                         .comp = {
2459 -                               .comp = {
2460 -                                       .vecs = deflate_comp_tv_template,
2461 -                                       .count = DEFLATE_COMP_TEST_VECTORS
2462 -                               },
2463 -                               .decomp = {
2464 -                                       .vecs = deflate_decomp_tv_template,
2465 -                                       .count = DEFLATE_DECOMP_TEST_VECTORS
2466 -                               }
2467 +                               .comp = __VECS(deflate_comp_tv_template),
2468 +                               .decomp = __VECS(deflate_decomp_tv_template)
2469                         }
2470                 }
2471         }, {
2472 @@ -2879,10 +2929,7 @@ static const struct alg_test_desc alg_test_descs[] = {
2473                 .test = alg_test_kpp,
2474                 .fips_allowed = 1,
2475                 .suite = {
2476 -                       .kpp = {
2477 -                               .vecs = dh_tv_template,
2478 -                               .count = DH_TEST_VECTORS
2479 -                       }
2480 +                       .kpp = __VECS(dh_tv_template)
2481                 }
2482         }, {
2483                 .alg = "digest_null",
2484 @@ -2892,30 +2939,21 @@ static const struct alg_test_desc alg_test_descs[] = {
2485                 .test = alg_test_drbg,
2486                 .fips_allowed = 1,
2487                 .suite = {
2488 -                       .drbg = {
2489 -                               .vecs = drbg_nopr_ctr_aes128_tv_template,
2490 -                               .count = ARRAY_SIZE(drbg_nopr_ctr_aes128_tv_template)
2491 -                       }
2492 +                       .drbg = __VECS(drbg_nopr_ctr_aes128_tv_template)
2493                 }
2494         }, {
2495                 .alg = "drbg_nopr_ctr_aes192",
2496                 .test = alg_test_drbg,
2497                 .fips_allowed = 1,
2498                 .suite = {
2499 -                       .drbg = {
2500 -                               .vecs = drbg_nopr_ctr_aes192_tv_template,
2501 -                               .count = ARRAY_SIZE(drbg_nopr_ctr_aes192_tv_template)
2502 -                       }
2503 +                       .drbg = __VECS(drbg_nopr_ctr_aes192_tv_template)
2504                 }
2505         }, {
2506                 .alg = "drbg_nopr_ctr_aes256",
2507                 .test = alg_test_drbg,
2508                 .fips_allowed = 1,
2509                 .suite = {
2510 -                       .drbg = {
2511 -                               .vecs = drbg_nopr_ctr_aes256_tv_template,
2512 -                               .count = ARRAY_SIZE(drbg_nopr_ctr_aes256_tv_template)
2513 -                       }
2514 +                       .drbg = __VECS(drbg_nopr_ctr_aes256_tv_template)
2515                 }
2516         }, {
2517                 /*
2518 @@ -2930,11 +2968,7 @@ static const struct alg_test_desc alg_test_descs[] = {
2519                 .test = alg_test_drbg,
2520                 .fips_allowed = 1,
2521                 .suite = {
2522 -                       .drbg = {
2523 -                               .vecs = drbg_nopr_hmac_sha256_tv_template,
2524 -                               .count =
2525 -                               ARRAY_SIZE(drbg_nopr_hmac_sha256_tv_template)
2526 -                       }
2527 +                       .drbg = __VECS(drbg_nopr_hmac_sha256_tv_template)
2528                 }
2529         }, {
2530                 /* covered by drbg_nopr_hmac_sha256 test */
2531 @@ -2954,10 +2988,7 @@ static const struct alg_test_desc alg_test_descs[] = {
2532                 .test = alg_test_drbg,
2533                 .fips_allowed = 1,
2534                 .suite = {
2535 -                       .drbg = {
2536 -                               .vecs = drbg_nopr_sha256_tv_template,
2537 -                               .count = ARRAY_SIZE(drbg_nopr_sha256_tv_template)
2538 -                       }
2539 +                       .drbg = __VECS(drbg_nopr_sha256_tv_template)
2540                 }
2541         }, {
2542                 /* covered by drbg_nopr_sha256 test */
2543 @@ -2973,10 +3004,7 @@ static const struct alg_test_desc alg_test_descs[] = {
2544                 .test = alg_test_drbg,
2545                 .fips_allowed = 1,
2546                 .suite = {
2547 -                       .drbg = {
2548 -                               .vecs = drbg_pr_ctr_aes128_tv_template,
2549 -                               .count = ARRAY_SIZE(drbg_pr_ctr_aes128_tv_template)
2550 -                       }
2551 +                       .drbg = __VECS(drbg_pr_ctr_aes128_tv_template)
2552                 }
2553         }, {
2554                 /* covered by drbg_pr_ctr_aes128 test */
2555 @@ -2996,10 +3024,7 @@ static const struct alg_test_desc alg_test_descs[] = {
2556                 .test = alg_test_drbg,
2557                 .fips_allowed = 1,
2558                 .suite = {
2559 -                       .drbg = {
2560 -                               .vecs = drbg_pr_hmac_sha256_tv_template,
2561 -                               .count = ARRAY_SIZE(drbg_pr_hmac_sha256_tv_template)
2562 -                       }
2563 +                       .drbg = __VECS(drbg_pr_hmac_sha256_tv_template)
2564                 }
2565         }, {
2566                 /* covered by drbg_pr_hmac_sha256 test */
2567 @@ -3019,10 +3044,7 @@ static const struct alg_test_desc alg_test_descs[] = {
2568                 .test = alg_test_drbg,
2569                 .fips_allowed = 1,
2570                 .suite = {
2571 -                       .drbg = {
2572 -                               .vecs = drbg_pr_sha256_tv_template,
2573 -                               .count = ARRAY_SIZE(drbg_pr_sha256_tv_template)
2574 -                       }
2575 +                       .drbg = __VECS(drbg_pr_sha256_tv_template)
2576                 }
2577         }, {
2578                 /* covered by drbg_pr_sha256 test */
2579 @@ -3033,24 +3055,14 @@ static const struct alg_test_desc alg_test_descs[] = {
2580                 .alg = "drbg_pr_sha512",
2581                 .fips_allowed = 1,
2582                 .test = alg_test_null,
2583 -       }, {
2584 -               .alg = "ecb(__aes-aesni)",
2585 -               .test = alg_test_null,
2586 -               .fips_allowed = 1,
2587         }, {
2588                 .alg = "ecb(aes)",
2589                 .test = alg_test_skcipher,
2590                 .fips_allowed = 1,
2591                 .suite = {
2592                         .cipher = {
2593 -                               .enc = {
2594 -                                       .vecs = aes_enc_tv_template,
2595 -                                       .count = AES_ENC_TEST_VECTORS
2596 -                               },
2597 -                               .dec = {
2598 -                                       .vecs = aes_dec_tv_template,
2599 -                                       .count = AES_DEC_TEST_VECTORS
2600 -                               }
2601 +                               .enc = __VECS(aes_enc_tv_template),
2602 +                               .dec = __VECS(aes_dec_tv_template)
2603                         }
2604                 }
2605         }, {
2606 @@ -3058,14 +3070,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2607                 .test = alg_test_skcipher,
2608                 .suite = {
2609                         .cipher = {
2610 -                               .enc = {
2611 -                                       .vecs = anubis_enc_tv_template,
2612 -                                       .count = ANUBIS_ENC_TEST_VECTORS
2613 -                               },
2614 -                               .dec = {
2615 -                                       .vecs = anubis_dec_tv_template,
2616 -                                       .count = ANUBIS_DEC_TEST_VECTORS
2617 -                               }
2618 +                               .enc = __VECS(anubis_enc_tv_template),
2619 +                               .dec = __VECS(anubis_dec_tv_template)
2620                         }
2621                 }
2622         }, {
2623 @@ -3073,14 +3079,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2624                 .test = alg_test_skcipher,
2625                 .suite = {
2626                         .cipher = {
2627 -                               .enc = {
2628 -                                       .vecs = arc4_enc_tv_template,
2629 -                                       .count = ARC4_ENC_TEST_VECTORS
2630 -                               },
2631 -                               .dec = {
2632 -                                       .vecs = arc4_dec_tv_template,
2633 -                                       .count = ARC4_DEC_TEST_VECTORS
2634 -                               }
2635 +                               .enc = __VECS(arc4_enc_tv_template),
2636 +                               .dec = __VECS(arc4_dec_tv_template)
2637                         }
2638                 }
2639         }, {
2640 @@ -3088,14 +3088,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2641                 .test = alg_test_skcipher,
2642                 .suite = {
2643                         .cipher = {
2644 -                               .enc = {
2645 -                                       .vecs = bf_enc_tv_template,
2646 -                                       .count = BF_ENC_TEST_VECTORS
2647 -                               },
2648 -                               .dec = {
2649 -                                       .vecs = bf_dec_tv_template,
2650 -                                       .count = BF_DEC_TEST_VECTORS
2651 -                               }
2652 +                               .enc = __VECS(bf_enc_tv_template),
2653 +                               .dec = __VECS(bf_dec_tv_template)
2654                         }
2655                 }
2656         }, {
2657 @@ -3103,14 +3097,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2658                 .test = alg_test_skcipher,
2659                 .suite = {
2660                         .cipher = {
2661 -                               .enc = {
2662 -                                       .vecs = camellia_enc_tv_template,
2663 -                                       .count = CAMELLIA_ENC_TEST_VECTORS
2664 -                               },
2665 -                               .dec = {
2666 -                                       .vecs = camellia_dec_tv_template,
2667 -                                       .count = CAMELLIA_DEC_TEST_VECTORS
2668 -                               }
2669 +                               .enc = __VECS(camellia_enc_tv_template),
2670 +                               .dec = __VECS(camellia_dec_tv_template)
2671                         }
2672                 }
2673         }, {
2674 @@ -3118,14 +3106,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2675                 .test = alg_test_skcipher,
2676                 .suite = {
2677                         .cipher = {
2678 -                               .enc = {
2679 -                                       .vecs = cast5_enc_tv_template,
2680 -                                       .count = CAST5_ENC_TEST_VECTORS
2681 -                               },
2682 -                               .dec = {
2683 -                                       .vecs = cast5_dec_tv_template,
2684 -                                       .count = CAST5_DEC_TEST_VECTORS
2685 -                               }
2686 +                               .enc = __VECS(cast5_enc_tv_template),
2687 +                               .dec = __VECS(cast5_dec_tv_template)
2688                         }
2689                 }
2690         }, {
2691 @@ -3133,14 +3115,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2692                 .test = alg_test_skcipher,
2693                 .suite = {
2694                         .cipher = {
2695 -                               .enc = {
2696 -                                       .vecs = cast6_enc_tv_template,
2697 -                                       .count = CAST6_ENC_TEST_VECTORS
2698 -                               },
2699 -                               .dec = {
2700 -                                       .vecs = cast6_dec_tv_template,
2701 -                                       .count = CAST6_DEC_TEST_VECTORS
2702 -                               }
2703 +                               .enc = __VECS(cast6_enc_tv_template),
2704 +                               .dec = __VECS(cast6_dec_tv_template)
2705                         }
2706                 }
2707         }, {
2708 @@ -3151,14 +3127,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2709                 .test = alg_test_skcipher,
2710                 .suite = {
2711                         .cipher = {
2712 -                               .enc = {
2713 -                                       .vecs = des_enc_tv_template,
2714 -                                       .count = DES_ENC_TEST_VECTORS
2715 -                               },
2716 -                               .dec = {
2717 -                                       .vecs = des_dec_tv_template,
2718 -                                       .count = DES_DEC_TEST_VECTORS
2719 -                               }
2720 +                               .enc = __VECS(des_enc_tv_template),
2721 +                               .dec = __VECS(des_dec_tv_template)
2722                         }
2723                 }
2724         }, {
2725 @@ -3167,14 +3137,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2726                 .fips_allowed = 1,
2727                 .suite = {
2728                         .cipher = {
2729 -                               .enc = {
2730 -                                       .vecs = des3_ede_enc_tv_template,
2731 -                                       .count = DES3_EDE_ENC_TEST_VECTORS
2732 -                               },
2733 -                               .dec = {
2734 -                                       .vecs = des3_ede_dec_tv_template,
2735 -                                       .count = DES3_EDE_DEC_TEST_VECTORS
2736 -                               }
2737 +                               .enc = __VECS(des3_ede_enc_tv_template),
2738 +                               .dec = __VECS(des3_ede_dec_tv_template)
2739                         }
2740                 }
2741         }, {
2742 @@ -3197,14 +3161,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2743                 .test = alg_test_skcipher,
2744                 .suite = {
2745                         .cipher = {
2746 -                               .enc = {
2747 -                                       .vecs = khazad_enc_tv_template,
2748 -                                       .count = KHAZAD_ENC_TEST_VECTORS
2749 -                               },
2750 -                               .dec = {
2751 -                                       .vecs = khazad_dec_tv_template,
2752 -                                       .count = KHAZAD_DEC_TEST_VECTORS
2753 -                               }
2754 +                               .enc = __VECS(khazad_enc_tv_template),
2755 +                               .dec = __VECS(khazad_dec_tv_template)
2756                         }
2757                 }
2758         }, {
2759 @@ -3212,14 +3170,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2760                 .test = alg_test_skcipher,
2761                 .suite = {
2762                         .cipher = {
2763 -                               .enc = {
2764 -                                       .vecs = seed_enc_tv_template,
2765 -                                       .count = SEED_ENC_TEST_VECTORS
2766 -                               },
2767 -                               .dec = {
2768 -                                       .vecs = seed_dec_tv_template,
2769 -                                       .count = SEED_DEC_TEST_VECTORS
2770 -                               }
2771 +                               .enc = __VECS(seed_enc_tv_template),
2772 +                               .dec = __VECS(seed_dec_tv_template)
2773                         }
2774                 }
2775         }, {
2776 @@ -3227,14 +3179,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2777                 .test = alg_test_skcipher,
2778                 .suite = {
2779                         .cipher = {
2780 -                               .enc = {
2781 -                                       .vecs = serpent_enc_tv_template,
2782 -                                       .count = SERPENT_ENC_TEST_VECTORS
2783 -                               },
2784 -                               .dec = {
2785 -                                       .vecs = serpent_dec_tv_template,
2786 -                                       .count = SERPENT_DEC_TEST_VECTORS
2787 -                               }
2788 +                               .enc = __VECS(serpent_enc_tv_template),
2789 +                               .dec = __VECS(serpent_dec_tv_template)
2790                         }
2791                 }
2792         }, {
2793 @@ -3242,14 +3188,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2794                 .test = alg_test_skcipher,
2795                 .suite = {
2796                         .cipher = {
2797 -                               .enc = {
2798 -                                       .vecs = tea_enc_tv_template,
2799 -                                       .count = TEA_ENC_TEST_VECTORS
2800 -                               },
2801 -                               .dec = {
2802 -                                       .vecs = tea_dec_tv_template,
2803 -                                       .count = TEA_DEC_TEST_VECTORS
2804 -                               }
2805 +                               .enc = __VECS(tea_enc_tv_template),
2806 +                               .dec = __VECS(tea_dec_tv_template)
2807                         }
2808                 }
2809         }, {
2810 @@ -3257,14 +3197,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2811                 .test = alg_test_skcipher,
2812                 .suite = {
2813                         .cipher = {
2814 -                               .enc = {
2815 -                                       .vecs = tnepres_enc_tv_template,
2816 -                                       .count = TNEPRES_ENC_TEST_VECTORS
2817 -                               },
2818 -                               .dec = {
2819 -                                       .vecs = tnepres_dec_tv_template,
2820 -                                       .count = TNEPRES_DEC_TEST_VECTORS
2821 -                               }
2822 +                               .enc = __VECS(tnepres_enc_tv_template),
2823 +                               .dec = __VECS(tnepres_dec_tv_template)
2824                         }
2825                 }
2826         }, {
2827 @@ -3272,14 +3206,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2828                 .test = alg_test_skcipher,
2829                 .suite = {
2830                         .cipher = {
2831 -                               .enc = {
2832 -                                       .vecs = tf_enc_tv_template,
2833 -                                       .count = TF_ENC_TEST_VECTORS
2834 -                               },
2835 -                               .dec = {
2836 -                                       .vecs = tf_dec_tv_template,
2837 -                                       .count = TF_DEC_TEST_VECTORS
2838 -                               }
2839 +                               .enc = __VECS(tf_enc_tv_template),
2840 +                               .dec = __VECS(tf_dec_tv_template)
2841                         }
2842                 }
2843         }, {
2844 @@ -3287,14 +3215,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2845                 .test = alg_test_skcipher,
2846                 .suite = {
2847                         .cipher = {
2848 -                               .enc = {
2849 -                                       .vecs = xeta_enc_tv_template,
2850 -                                       .count = XETA_ENC_TEST_VECTORS
2851 -                               },
2852 -                               .dec = {
2853 -                                       .vecs = xeta_dec_tv_template,
2854 -                                       .count = XETA_DEC_TEST_VECTORS
2855 -                               }
2856 +                               .enc = __VECS(xeta_enc_tv_template),
2857 +                               .dec = __VECS(xeta_dec_tv_template)
2858                         }
2859                 }
2860         }, {
2861 @@ -3302,14 +3224,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2862                 .test = alg_test_skcipher,
2863                 .suite = {
2864                         .cipher = {
2865 -                               .enc = {
2866 -                                       .vecs = xtea_enc_tv_template,
2867 -                                       .count = XTEA_ENC_TEST_VECTORS
2868 -                               },
2869 -                               .dec = {
2870 -                                       .vecs = xtea_dec_tv_template,
2871 -                                       .count = XTEA_DEC_TEST_VECTORS
2872 -                               }
2873 +                               .enc = __VECS(xtea_enc_tv_template),
2874 +                               .dec = __VECS(xtea_dec_tv_template)
2875                         }
2876                 }
2877         }, {
2878 @@ -3317,10 +3233,7 @@ static const struct alg_test_desc alg_test_descs[] = {
2879                 .test = alg_test_kpp,
2880                 .fips_allowed = 1,
2881                 .suite = {
2882 -                       .kpp = {
2883 -                               .vecs = ecdh_tv_template,
2884 -                               .count = ECDH_TEST_VECTORS
2885 -                       }
2886 +                       .kpp = __VECS(ecdh_tv_template)
2887                 }
2888         }, {
2889                 .alg = "gcm(aes)",
2890 @@ -3328,14 +3241,8 @@ static const struct alg_test_desc alg_test_descs[] = {
2891                 .fips_allowed = 1,
2892                 .suite = {
2893                         .aead = {
2894 -                               .enc = {
2895 -                                       .vecs = aes_gcm_enc_tv_template,
2896 -                                       .count = AES_GCM_ENC_TEST_VECTORS
2897 -                               },
2898 -                               .dec = {
2899 -                                       .vecs = aes_gcm_dec_tv_template,
2900 -                                       .count = AES_GCM_DEC_TEST_VECTORS
2901 -                               }
2902 +                               .enc = __VECS(aes_gcm_enc_tv_template),
2903 +                               .dec = __VECS(aes_gcm_dec_tv_template)
2904                         }
2905                 }
2906         }, {
2907 @@ -3343,136 +3250,94 @@ static const struct alg_test_desc alg_test_descs[] = {
2908                 .test = alg_test_hash,
2909                 .fips_allowed = 1,
2910                 .suite = {
2911 -                       .hash = {
2912 -                               .vecs = ghash_tv_template,
2913 -                               .count = GHASH_TEST_VECTORS
2914 -                       }
2915 +                       .hash = __VECS(ghash_tv_template)
2916                 }
2917         }, {
2918                 .alg = "hmac(crc32)",
2919                 .test = alg_test_hash,
2920                 .suite = {
2921 -                       .hash = {
2922 -                               .vecs = bfin_crc_tv_template,
2923 -                               .count = BFIN_CRC_TEST_VECTORS
2924 -                       }
2925 +                       .hash = __VECS(bfin_crc_tv_template)
2926                 }
2927         }, {
2928                 .alg = "hmac(md5)",
2929                 .test = alg_test_hash,
2930                 .suite = {
2931 -                       .hash = {
2932 -                               .vecs = hmac_md5_tv_template,
2933 -                               .count = HMAC_MD5_TEST_VECTORS
2934 -                       }
2935 +                       .hash = __VECS(hmac_md5_tv_template)
2936                 }
2937         }, {
2938                 .alg = "hmac(rmd128)",
2939                 .test = alg_test_hash,
2940                 .suite = {
2941 -                       .hash = {
2942 -                               .vecs = hmac_rmd128_tv_template,
2943 -                               .count = HMAC_RMD128_TEST_VECTORS
2944 -                       }
2945 +                       .hash = __VECS(hmac_rmd128_tv_template)
2946                 }
2947         }, {
2948                 .alg = "hmac(rmd160)",
2949                 .test = alg_test_hash,
2950                 .suite = {
2951 -                       .hash = {
2952 -                               .vecs = hmac_rmd160_tv_template,
2953 -                               .count = HMAC_RMD160_TEST_VECTORS
2954 -                       }
2955 +                       .hash = __VECS(hmac_rmd160_tv_template)
2956                 }
2957         }, {
2958                 .alg = "hmac(sha1)",
2959                 .test = alg_test_hash,
2960                 .fips_allowed = 1,
2961                 .suite = {
2962 -                       .hash = {
2963 -                               .vecs = hmac_sha1_tv_template,
2964 -                               .count = HMAC_SHA1_TEST_VECTORS
2965 -                       }
2966 +                       .hash = __VECS(hmac_sha1_tv_template)
2967                 }
2968         }, {
2969                 .alg = "hmac(sha224)",
2970                 .test = alg_test_hash,
2971                 .fips_allowed = 1,
2972                 .suite = {
2973 -                       .hash = {
2974 -                               .vecs = hmac_sha224_tv_template,
2975 -                               .count = HMAC_SHA224_TEST_VECTORS
2976 -                       }
2977 +                       .hash = __VECS(hmac_sha224_tv_template)
2978                 }
2979         }, {
2980                 .alg = "hmac(sha256)",
2981                 .test = alg_test_hash,
2982                 .fips_allowed = 1,
2983                 .suite = {
2984 -                       .hash = {
2985 -                               .vecs = hmac_sha256_tv_template,
2986 -                               .count = HMAC_SHA256_TEST_VECTORS
2987 -                       }
2988 +                       .hash = __VECS(hmac_sha256_tv_template)
2989                 }
2990         }, {
2991                 .alg = "hmac(sha3-224)",
2992                 .test = alg_test_hash,
2993                 .fips_allowed = 1,
2994                 .suite = {
2995 -                       .hash = {
2996 -                               .vecs = hmac_sha3_224_tv_template,
2997 -                               .count = HMAC_SHA3_224_TEST_VECTORS
2998 -                       }
2999 +                       .hash = __VECS(hmac_sha3_224_tv_template)
3000                 }
3001         }, {
3002                 .alg = "hmac(sha3-256)",
3003                 .test = alg_test_hash,
3004                 .fips_allowed = 1,
3005                 .suite = {
3006 -                       .hash = {
3007 -                               .vecs = hmac_sha3_256_tv_template,
3008 -                               .count = HMAC_SHA3_256_TEST_VECTORS
3009 -                       }
3010 +                       .hash = __VECS(hmac_sha3_256_tv_template)
3011                 }
3012         }, {
3013                 .alg = "hmac(sha3-384)",
3014                 .test = alg_test_hash,
3015                 .fips_allowed = 1,
3016                 .suite = {
3017 -                       .hash = {
3018 -                               .vecs = hmac_sha3_384_tv_template,
3019 -                               .count = HMAC_SHA3_384_TEST_VECTORS
3020 -                       }
3021 +                       .hash = __VECS(hmac_sha3_384_tv_template)
3022                 }
3023         }, {
3024                 .alg = "hmac(sha3-512)",
3025                 .test = alg_test_hash,
3026                 .fips_allowed = 1,
3027                 .suite = {
3028 -                       .hash = {
3029 -                               .vecs = hmac_sha3_512_tv_template,
3030 -                               .count = HMAC_SHA3_512_TEST_VECTORS
3031 -                       }
3032 +                       .hash = __VECS(hmac_sha3_512_tv_template)
3033                 }
3034         }, {
3035                 .alg = "hmac(sha384)",
3036                 .test = alg_test_hash,
3037                 .fips_allowed = 1,
3038                 .suite = {
3039 -                       .hash = {
3040 -                               .vecs = hmac_sha384_tv_template,
3041 -                               .count = HMAC_SHA384_TEST_VECTORS
3042 -                       }
3043 +                       .hash = __VECS(hmac_sha384_tv_template)
3044                 }
3045         }, {
3046                 .alg = "hmac(sha512)",
3047                 .test = alg_test_hash,
3048                 .fips_allowed = 1,
3049                 .suite = {
3050 -                       .hash = {
3051 -                               .vecs = hmac_sha512_tv_template,
3052 -                               .count = HMAC_SHA512_TEST_VECTORS
3053 -                       }
3054 +                       .hash = __VECS(hmac_sha512_tv_template)
3055                 }
3056         }, {
3057                 .alg = "jitterentropy_rng",
3058 @@ -3484,14 +3349,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3059                 .fips_allowed = 1,
3060                 .suite = {
3061                         .cipher = {
3062 -                               .enc = {
3063 -                                       .vecs = aes_kw_enc_tv_template,
3064 -                                       .count = ARRAY_SIZE(aes_kw_enc_tv_template)
3065 -                               },
3066 -                               .dec = {
3067 -                                       .vecs = aes_kw_dec_tv_template,
3068 -                                       .count = ARRAY_SIZE(aes_kw_dec_tv_template)
3069 -                               }
3070 +                               .enc = __VECS(aes_kw_enc_tv_template),
3071 +                               .dec = __VECS(aes_kw_dec_tv_template)
3072                         }
3073                 }
3074         }, {
3075 @@ -3499,14 +3358,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3076                 .test = alg_test_skcipher,
3077                 .suite = {
3078                         .cipher = {
3079 -                               .enc = {
3080 -                                       .vecs = aes_lrw_enc_tv_template,
3081 -                                       .count = AES_LRW_ENC_TEST_VECTORS
3082 -                               },
3083 -                               .dec = {
3084 -                                       .vecs = aes_lrw_dec_tv_template,
3085 -                                       .count = AES_LRW_DEC_TEST_VECTORS
3086 -                               }
3087 +                               .enc = __VECS(aes_lrw_enc_tv_template),
3088 +                               .dec = __VECS(aes_lrw_dec_tv_template)
3089                         }
3090                 }
3091         }, {
3092 @@ -3514,14 +3367,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3093                 .test = alg_test_skcipher,
3094                 .suite = {
3095                         .cipher = {
3096 -                               .enc = {
3097 -                                       .vecs = camellia_lrw_enc_tv_template,
3098 -                                       .count = CAMELLIA_LRW_ENC_TEST_VECTORS
3099 -                               },
3100 -                               .dec = {
3101 -                                       .vecs = camellia_lrw_dec_tv_template,
3102 -                                       .count = CAMELLIA_LRW_DEC_TEST_VECTORS
3103 -                               }
3104 +                               .enc = __VECS(camellia_lrw_enc_tv_template),
3105 +                               .dec = __VECS(camellia_lrw_dec_tv_template)
3106                         }
3107                 }
3108         }, {
3109 @@ -3529,14 +3376,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3110                 .test = alg_test_skcipher,
3111                 .suite = {
3112                         .cipher = {
3113 -                               .enc = {
3114 -                                       .vecs = cast6_lrw_enc_tv_template,
3115 -                                       .count = CAST6_LRW_ENC_TEST_VECTORS
3116 -                               },
3117 -                               .dec = {
3118 -                                       .vecs = cast6_lrw_dec_tv_template,
3119 -                                       .count = CAST6_LRW_DEC_TEST_VECTORS
3120 -                               }
3121 +                               .enc = __VECS(cast6_lrw_enc_tv_template),
3122 +                               .dec = __VECS(cast6_lrw_dec_tv_template)
3123                         }
3124                 }
3125         }, {
3126 @@ -3544,14 +3385,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3127                 .test = alg_test_skcipher,
3128                 .suite = {
3129                         .cipher = {
3130 -                               .enc = {
3131 -                                       .vecs = serpent_lrw_enc_tv_template,
3132 -                                       .count = SERPENT_LRW_ENC_TEST_VECTORS
3133 -                               },
3134 -                               .dec = {
3135 -                                       .vecs = serpent_lrw_dec_tv_template,
3136 -                                       .count = SERPENT_LRW_DEC_TEST_VECTORS
3137 -                               }
3138 +                               .enc = __VECS(serpent_lrw_enc_tv_template),
3139 +                               .dec = __VECS(serpent_lrw_dec_tv_template)
3140                         }
3141                 }
3142         }, {
3143 @@ -3559,14 +3394,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3144                 .test = alg_test_skcipher,
3145                 .suite = {
3146                         .cipher = {
3147 -                               .enc = {
3148 -                                       .vecs = tf_lrw_enc_tv_template,
3149 -                                       .count = TF_LRW_ENC_TEST_VECTORS
3150 -                               },
3151 -                               .dec = {
3152 -                                       .vecs = tf_lrw_dec_tv_template,
3153 -                                       .count = TF_LRW_DEC_TEST_VECTORS
3154 -                               }
3155 +                               .enc = __VECS(tf_lrw_enc_tv_template),
3156 +                               .dec = __VECS(tf_lrw_dec_tv_template)
3157                         }
3158                 }
3159         }, {
3160 @@ -3575,14 +3404,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3161                 .fips_allowed = 1,
3162                 .suite = {
3163                         .comp = {
3164 -                               .comp = {
3165 -                                       .vecs = lz4_comp_tv_template,
3166 -                                       .count = LZ4_COMP_TEST_VECTORS
3167 -                               },
3168 -                               .decomp = {
3169 -                                       .vecs = lz4_decomp_tv_template,
3170 -                                       .count = LZ4_DECOMP_TEST_VECTORS
3171 -                               }
3172 +                               .comp = __VECS(lz4_comp_tv_template),
3173 +                               .decomp = __VECS(lz4_decomp_tv_template)
3174                         }
3175                 }
3176         }, {
3177 @@ -3591,14 +3414,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3178                 .fips_allowed = 1,
3179                 .suite = {
3180                         .comp = {
3181 -                               .comp = {
3182 -                                       .vecs = lz4hc_comp_tv_template,
3183 -                                       .count = LZ4HC_COMP_TEST_VECTORS
3184 -                               },
3185 -                               .decomp = {
3186 -                                       .vecs = lz4hc_decomp_tv_template,
3187 -                                       .count = LZ4HC_DECOMP_TEST_VECTORS
3188 -                               }
3189 +                               .comp = __VECS(lz4hc_comp_tv_template),
3190 +                               .decomp = __VECS(lz4hc_decomp_tv_template)
3191                         }
3192                 }
3193         }, {
3194 @@ -3607,42 +3424,27 @@ static const struct alg_test_desc alg_test_descs[] = {
3195                 .fips_allowed = 1,
3196                 .suite = {
3197                         .comp = {
3198 -                               .comp = {
3199 -                                       .vecs = lzo_comp_tv_template,
3200 -                                       .count = LZO_COMP_TEST_VECTORS
3201 -                               },
3202 -                               .decomp = {
3203 -                                       .vecs = lzo_decomp_tv_template,
3204 -                                       .count = LZO_DECOMP_TEST_VECTORS
3205 -                               }
3206 +                               .comp = __VECS(lzo_comp_tv_template),
3207 +                               .decomp = __VECS(lzo_decomp_tv_template)
3208                         }
3209                 }
3210         }, {
3211                 .alg = "md4",
3212                 .test = alg_test_hash,
3213                 .suite = {
3214 -                       .hash = {
3215 -                               .vecs = md4_tv_template,
3216 -                               .count = MD4_TEST_VECTORS
3217 -                       }
3218 +                       .hash = __VECS(md4_tv_template)
3219                 }
3220         }, {
3221                 .alg = "md5",
3222                 .test = alg_test_hash,
3223                 .suite = {
3224 -                       .hash = {
3225 -                               .vecs = md5_tv_template,
3226 -                               .count = MD5_TEST_VECTORS
3227 -                       }
3228 +                       .hash = __VECS(md5_tv_template)
3229                 }
3230         }, {
3231                 .alg = "michael_mic",
3232                 .test = alg_test_hash,
3233                 .suite = {
3234 -                       .hash = {
3235 -                               .vecs = michael_mic_tv_template,
3236 -                               .count = MICHAEL_MIC_TEST_VECTORS
3237 -                       }
3238 +                       .hash = __VECS(michael_mic_tv_template)
3239                 }
3240         }, {
3241                 .alg = "ofb(aes)",
3242 @@ -3650,14 +3452,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3243                 .fips_allowed = 1,
3244                 .suite = {
3245                         .cipher = {
3246 -                               .enc = {
3247 -                                       .vecs = aes_ofb_enc_tv_template,
3248 -                                       .count = AES_OFB_ENC_TEST_VECTORS
3249 -                               },
3250 -                               .dec = {
3251 -                                       .vecs = aes_ofb_dec_tv_template,
3252 -                                       .count = AES_OFB_DEC_TEST_VECTORS
3253 -                               }
3254 +                               .enc = __VECS(aes_ofb_enc_tv_template),
3255 +                               .dec = __VECS(aes_ofb_dec_tv_template)
3256                         }
3257                 }
3258         }, {
3259 @@ -3665,24 +3461,15 @@ static const struct alg_test_desc alg_test_descs[] = {
3260                 .test = alg_test_skcipher,
3261                 .suite = {
3262                         .cipher = {
3263 -                               .enc = {
3264 -                                       .vecs = fcrypt_pcbc_enc_tv_template,
3265 -                                       .count = FCRYPT_ENC_TEST_VECTORS
3266 -                               },
3267 -                               .dec = {
3268 -                                       .vecs = fcrypt_pcbc_dec_tv_template,
3269 -                                       .count = FCRYPT_DEC_TEST_VECTORS
3270 -                               }
3271 +                               .enc = __VECS(fcrypt_pcbc_enc_tv_template),
3272 +                               .dec = __VECS(fcrypt_pcbc_dec_tv_template)
3273                         }
3274                 }
3275         }, {
3276                 .alg = "poly1305",
3277                 .test = alg_test_hash,
3278                 .suite = {
3279 -                       .hash = {
3280 -                               .vecs = poly1305_tv_template,
3281 -                               .count = POLY1305_TEST_VECTORS
3282 -                       }
3283 +                       .hash = __VECS(poly1305_tv_template)
3284                 }
3285         }, {
3286                 .alg = "rfc3686(ctr(aes))",
3287 @@ -3690,14 +3477,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3288                 .fips_allowed = 1,
3289                 .suite = {
3290                         .cipher = {
3291 -                               .enc = {
3292 -                                       .vecs = aes_ctr_rfc3686_enc_tv_template,
3293 -                                       .count = AES_CTR_3686_ENC_TEST_VECTORS
3294 -                               },
3295 -                               .dec = {
3296 -                                       .vecs = aes_ctr_rfc3686_dec_tv_template,
3297 -                                       .count = AES_CTR_3686_DEC_TEST_VECTORS
3298 -                               }
3299 +                               .enc = __VECS(aes_ctr_rfc3686_enc_tv_template),
3300 +                               .dec = __VECS(aes_ctr_rfc3686_dec_tv_template)
3301                         }
3302                 }
3303         }, {
3304 @@ -3706,14 +3487,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3305                 .fips_allowed = 1,
3306                 .suite = {
3307                         .aead = {
3308 -                               .enc = {
3309 -                                       .vecs = aes_gcm_rfc4106_enc_tv_template,
3310 -                                       .count = AES_GCM_4106_ENC_TEST_VECTORS
3311 -                               },
3312 -                               .dec = {
3313 -                                       .vecs = aes_gcm_rfc4106_dec_tv_template,
3314 -                                       .count = AES_GCM_4106_DEC_TEST_VECTORS
3315 -                               }
3316 +                               .enc = __VECS(aes_gcm_rfc4106_enc_tv_template),
3317 +                               .dec = __VECS(aes_gcm_rfc4106_dec_tv_template)
3318                         }
3319                 }
3320         }, {
3321 @@ -3722,14 +3497,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3322                 .fips_allowed = 1,
3323                 .suite = {
3324                         .aead = {
3325 -                               .enc = {
3326 -                                       .vecs = aes_ccm_rfc4309_enc_tv_template,
3327 -                                       .count = AES_CCM_4309_ENC_TEST_VECTORS
3328 -                               },
3329 -                               .dec = {
3330 -                                       .vecs = aes_ccm_rfc4309_dec_tv_template,
3331 -                                       .count = AES_CCM_4309_DEC_TEST_VECTORS
3332 -                               }
3333 +                               .enc = __VECS(aes_ccm_rfc4309_enc_tv_template),
3334 +                               .dec = __VECS(aes_ccm_rfc4309_dec_tv_template)
3335                         }
3336                 }
3337         }, {
3338 @@ -3737,14 +3506,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3339                 .test = alg_test_aead,
3340                 .suite = {
3341                         .aead = {
3342 -                               .enc = {
3343 -                                       .vecs = aes_gcm_rfc4543_enc_tv_template,
3344 -                                       .count = AES_GCM_4543_ENC_TEST_VECTORS
3345 -                               },
3346 -                               .dec = {
3347 -                                       .vecs = aes_gcm_rfc4543_dec_tv_template,
3348 -                                       .count = AES_GCM_4543_DEC_TEST_VECTORS
3349 -                               },
3350 +                               .enc = __VECS(aes_gcm_rfc4543_enc_tv_template),
3351 +                               .dec = __VECS(aes_gcm_rfc4543_dec_tv_template),
3352                         }
3353                 }
3354         }, {
3355 @@ -3752,14 +3515,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3356                 .test = alg_test_aead,
3357                 .suite = {
3358                         .aead = {
3359 -                               .enc = {
3360 -                                       .vecs = rfc7539_enc_tv_template,
3361 -                                       .count = RFC7539_ENC_TEST_VECTORS
3362 -                               },
3363 -                               .dec = {
3364 -                                       .vecs = rfc7539_dec_tv_template,
3365 -                                       .count = RFC7539_DEC_TEST_VECTORS
3366 -                               },
3367 +                               .enc = __VECS(rfc7539_enc_tv_template),
3368 +                               .dec = __VECS(rfc7539_dec_tv_template),
3369                         }
3370                 }
3371         }, {
3372 @@ -3767,71 +3524,47 @@ static const struct alg_test_desc alg_test_descs[] = {
3373                 .test = alg_test_aead,
3374                 .suite = {
3375                         .aead = {
3376 -                               .enc = {
3377 -                                       .vecs = rfc7539esp_enc_tv_template,
3378 -                                       .count = RFC7539ESP_ENC_TEST_VECTORS
3379 -                               },
3380 -                               .dec = {
3381 -                                       .vecs = rfc7539esp_dec_tv_template,
3382 -                                       .count = RFC7539ESP_DEC_TEST_VECTORS
3383 -                               },
3384 +                               .enc = __VECS(rfc7539esp_enc_tv_template),
3385 +                               .dec = __VECS(rfc7539esp_dec_tv_template),
3386                         }
3387                 }
3388         }, {
3389                 .alg = "rmd128",
3390                 .test = alg_test_hash,
3391                 .suite = {
3392 -                       .hash = {
3393 -                               .vecs = rmd128_tv_template,
3394 -                               .count = RMD128_TEST_VECTORS
3395 -                       }
3396 +                       .hash = __VECS(rmd128_tv_template)
3397                 }
3398         }, {
3399                 .alg = "rmd160",
3400                 .test = alg_test_hash,
3401                 .suite = {
3402 -                       .hash = {
3403 -                               .vecs = rmd160_tv_template,
3404 -                               .count = RMD160_TEST_VECTORS
3405 -                       }
3406 +                       .hash = __VECS(rmd160_tv_template)
3407                 }
3408         }, {
3409                 .alg = "rmd256",
3410                 .test = alg_test_hash,
3411                 .suite = {
3412 -                       .hash = {
3413 -                               .vecs = rmd256_tv_template,
3414 -                               .count = RMD256_TEST_VECTORS
3415 -                       }
3416 +                       .hash = __VECS(rmd256_tv_template)
3417                 }
3418         }, {
3419                 .alg = "rmd320",
3420                 .test = alg_test_hash,
3421                 .suite = {
3422 -                       .hash = {
3423 -                               .vecs = rmd320_tv_template,
3424 -                               .count = RMD320_TEST_VECTORS
3425 -                       }
3426 +                       .hash = __VECS(rmd320_tv_template)
3427                 }
3428         }, {
3429                 .alg = "rsa",
3430                 .test = alg_test_akcipher,
3431                 .fips_allowed = 1,
3432                 .suite = {
3433 -                       .akcipher = {
3434 -                               .vecs = rsa_tv_template,
3435 -                               .count = RSA_TEST_VECTORS
3436 -                       }
3437 +                       .akcipher = __VECS(rsa_tv_template)
3438                 }
3439         }, {
3440                 .alg = "salsa20",
3441                 .test = alg_test_skcipher,
3442                 .suite = {
3443                         .cipher = {
3444 -                               .enc = {
3445 -                                       .vecs = salsa20_stream_enc_tv_template,
3446 -                                       .count = SALSA20_STREAM_ENC_TEST_VECTORS
3447 -                               }
3448 +                               .enc = __VECS(salsa20_stream_enc_tv_template)
3449                         }
3450                 }
3451         }, {
3452 @@ -3839,162 +3572,120 @@ static const struct alg_test_desc alg_test_descs[] = {
3453                 .test = alg_test_hash,
3454                 .fips_allowed = 1,
3455                 .suite = {
3456 -                       .hash = {
3457 -                               .vecs = sha1_tv_template,
3458 -                               .count = SHA1_TEST_VECTORS
3459 -                       }
3460 +                       .hash = __VECS(sha1_tv_template)
3461                 }
3462         }, {
3463                 .alg = "sha224",
3464                 .test = alg_test_hash,
3465                 .fips_allowed = 1,
3466                 .suite = {
3467 -                       .hash = {
3468 -                               .vecs = sha224_tv_template,
3469 -                               .count = SHA224_TEST_VECTORS
3470 -                       }
3471 +                       .hash = __VECS(sha224_tv_template)
3472                 }
3473         }, {
3474                 .alg = "sha256",
3475                 .test = alg_test_hash,
3476                 .fips_allowed = 1,
3477                 .suite = {
3478 -                       .hash = {
3479 -                               .vecs = sha256_tv_template,
3480 -                               .count = SHA256_TEST_VECTORS
3481 -                       }
3482 +                       .hash = __VECS(sha256_tv_template)
3483                 }
3484         }, {
3485                 .alg = "sha3-224",
3486                 .test = alg_test_hash,
3487                 .fips_allowed = 1,
3488                 .suite = {
3489 -                       .hash = {
3490 -                               .vecs = sha3_224_tv_template,
3491 -                               .count = SHA3_224_TEST_VECTORS
3492 -                       }
3493 +                       .hash = __VECS(sha3_224_tv_template)
3494                 }
3495         }, {
3496                 .alg = "sha3-256",
3497                 .test = alg_test_hash,
3498                 .fips_allowed = 1,
3499                 .suite = {
3500 -                       .hash = {
3501 -                               .vecs = sha3_256_tv_template,
3502 -                               .count = SHA3_256_TEST_VECTORS
3503 -                       }
3504 +                       .hash = __VECS(sha3_256_tv_template)
3505                 }
3506         }, {
3507                 .alg = "sha3-384",
3508                 .test = alg_test_hash,
3509                 .fips_allowed = 1,
3510                 .suite = {
3511 -                       .hash = {
3512 -                               .vecs = sha3_384_tv_template,
3513 -                               .count = SHA3_384_TEST_VECTORS
3514 -                       }
3515 +                       .hash = __VECS(sha3_384_tv_template)
3516                 }
3517         }, {
3518                 .alg = "sha3-512",
3519                 .test = alg_test_hash,
3520                 .fips_allowed = 1,
3521                 .suite = {
3522 -                       .hash = {
3523 -                               .vecs = sha3_512_tv_template,
3524 -                               .count = SHA3_512_TEST_VECTORS
3525 -                       }
3526 +                       .hash = __VECS(sha3_512_tv_template)
3527                 }
3528         }, {
3529                 .alg = "sha384",
3530                 .test = alg_test_hash,
3531                 .fips_allowed = 1,
3532                 .suite = {
3533 -                       .hash = {
3534 -                               .vecs = sha384_tv_template,
3535 -                               .count = SHA384_TEST_VECTORS
3536 -                       }
3537 +                       .hash = __VECS(sha384_tv_template)
3538                 }
3539         }, {
3540                 .alg = "sha512",
3541                 .test = alg_test_hash,
3542                 .fips_allowed = 1,
3543                 .suite = {
3544 -                       .hash = {
3545 -                               .vecs = sha512_tv_template,
3546 -                               .count = SHA512_TEST_VECTORS
3547 -                       }
3548 +                       .hash = __VECS(sha512_tv_template)
3549                 }
3550         }, {
3551                 .alg = "tgr128",
3552                 .test = alg_test_hash,
3553                 .suite = {
3554 -                       .hash = {
3555 -                               .vecs = tgr128_tv_template,
3556 -                               .count = TGR128_TEST_VECTORS
3557 -                       }
3558 +                       .hash = __VECS(tgr128_tv_template)
3559                 }
3560         }, {
3561                 .alg = "tgr160",
3562                 .test = alg_test_hash,
3563                 .suite = {
3564 -                       .hash = {
3565 -                               .vecs = tgr160_tv_template,
3566 -                               .count = TGR160_TEST_VECTORS
3567 -                       }
3568 +                       .hash = __VECS(tgr160_tv_template)
3569                 }
3570         }, {
3571                 .alg = "tgr192",
3572                 .test = alg_test_hash,
3573                 .suite = {
3574 -                       .hash = {
3575 -                               .vecs = tgr192_tv_template,
3576 -                               .count = TGR192_TEST_VECTORS
3577 +                       .hash = __VECS(tgr192_tv_template)
3578 +               }
3579 +       }, {
3580 +               .alg = "tls10(hmac(sha1),cbc(aes))",
3581 +               .test = alg_test_tls,
3582 +               .suite = {
3583 +                       .tls = {
3584 +                               .enc = __VECS(tls_enc_tv_template),
3585 +                               .dec = __VECS(tls_dec_tv_template)
3586                         }
3587                 }
3588         }, {
3589                 .alg = "vmac(aes)",
3590                 .test = alg_test_hash,
3591                 .suite = {
3592 -                       .hash = {
3593 -                               .vecs = aes_vmac128_tv_template,
3594 -                               .count = VMAC_AES_TEST_VECTORS
3595 -                       }
3596 +                       .hash = __VECS(aes_vmac128_tv_template)
3597                 }
3598         }, {
3599                 .alg = "wp256",
3600                 .test = alg_test_hash,
3601                 .suite = {
3602 -                       .hash = {
3603 -                               .vecs = wp256_tv_template,
3604 -                               .count = WP256_TEST_VECTORS
3605 -                       }
3606 +                       .hash = __VECS(wp256_tv_template)
3607                 }
3608         }, {
3609                 .alg = "wp384",
3610                 .test = alg_test_hash,
3611                 .suite = {
3612 -                       .hash = {
3613 -                               .vecs = wp384_tv_template,
3614 -                               .count = WP384_TEST_VECTORS
3615 -                       }
3616 +                       .hash = __VECS(wp384_tv_template)
3617                 }
3618         }, {
3619                 .alg = "wp512",
3620                 .test = alg_test_hash,
3621                 .suite = {
3622 -                       .hash = {
3623 -                               .vecs = wp512_tv_template,
3624 -                               .count = WP512_TEST_VECTORS
3625 -                       }
3626 +                       .hash = __VECS(wp512_tv_template)
3627                 }
3628         }, {
3629                 .alg = "xcbc(aes)",
3630                 .test = alg_test_hash,
3631                 .suite = {
3632 -                       .hash = {
3633 -                               .vecs = aes_xcbc128_tv_template,
3634 -                               .count = XCBC_AES_TEST_VECTORS
3635 -                       }
3636 +                       .hash = __VECS(aes_xcbc128_tv_template)
3637                 }
3638         }, {
3639                 .alg = "xts(aes)",
3640 @@ -4002,14 +3693,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3641                 .fips_allowed = 1,
3642                 .suite = {
3643                         .cipher = {
3644 -                               .enc = {
3645 -                                       .vecs = aes_xts_enc_tv_template,
3646 -                                       .count = AES_XTS_ENC_TEST_VECTORS
3647 -                               },
3648 -                               .dec = {
3649 -                                       .vecs = aes_xts_dec_tv_template,
3650 -                                       .count = AES_XTS_DEC_TEST_VECTORS
3651 -                               }
3652 +                               .enc = __VECS(aes_xts_enc_tv_template),
3653 +                               .dec = __VECS(aes_xts_dec_tv_template)
3654                         }
3655                 }
3656         }, {
3657 @@ -4017,14 +3702,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3658                 .test = alg_test_skcipher,
3659                 .suite = {
3660                         .cipher = {
3661 -                               .enc = {
3662 -                                       .vecs = camellia_xts_enc_tv_template,
3663 -                                       .count = CAMELLIA_XTS_ENC_TEST_VECTORS
3664 -                               },
3665 -                               .dec = {
3666 -                                       .vecs = camellia_xts_dec_tv_template,
3667 -                                       .count = CAMELLIA_XTS_DEC_TEST_VECTORS
3668 -                               }
3669 +                               .enc = __VECS(camellia_xts_enc_tv_template),
3670 +                               .dec = __VECS(camellia_xts_dec_tv_template)
3671                         }
3672                 }
3673         }, {
3674 @@ -4032,14 +3711,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3675                 .test = alg_test_skcipher,
3676                 .suite = {
3677                         .cipher = {
3678 -                               .enc = {
3679 -                                       .vecs = cast6_xts_enc_tv_template,
3680 -                                       .count = CAST6_XTS_ENC_TEST_VECTORS
3681 -                               },
3682 -                               .dec = {
3683 -                                       .vecs = cast6_xts_dec_tv_template,
3684 -                                       .count = CAST6_XTS_DEC_TEST_VECTORS
3685 -                               }
3686 +                               .enc = __VECS(cast6_xts_enc_tv_template),
3687 +                               .dec = __VECS(cast6_xts_dec_tv_template)
3688                         }
3689                 }
3690         }, {
3691 @@ -4047,14 +3720,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3692                 .test = alg_test_skcipher,
3693                 .suite = {
3694                         .cipher = {
3695 -                               .enc = {
3696 -                                       .vecs = serpent_xts_enc_tv_template,
3697 -                                       .count = SERPENT_XTS_ENC_TEST_VECTORS
3698 -                               },
3699 -                               .dec = {
3700 -                                       .vecs = serpent_xts_dec_tv_template,
3701 -                                       .count = SERPENT_XTS_DEC_TEST_VECTORS
3702 -                               }
3703 +                               .enc = __VECS(serpent_xts_enc_tv_template),
3704 +                               .dec = __VECS(serpent_xts_dec_tv_template)
3705                         }
3706                 }
3707         }, {
3708 @@ -4062,14 +3729,8 @@ static const struct alg_test_desc alg_test_descs[] = {
3709                 .test = alg_test_skcipher,
3710                 .suite = {
3711                         .cipher = {
3712 -                               .enc = {
3713 -                                       .vecs = tf_xts_enc_tv_template,
3714 -                                       .count = TF_XTS_ENC_TEST_VECTORS
3715 -                               },
3716 -                               .dec = {
3717 -                                       .vecs = tf_xts_dec_tv_template,
3718 -                                       .count = TF_XTS_DEC_TEST_VECTORS
3719 -                               }
3720 +                               .enc = __VECS(tf_xts_enc_tv_template),
3721 +                               .dec = __VECS(tf_xts_dec_tv_template)
3722                         }
3723                 }
3724         }
3725 diff --git a/crypto/testmgr.h b/crypto/testmgr.h
3726 index 9033088c..ce9f4334 100644
3727 --- a/crypto/testmgr.h
3728 +++ b/crypto/testmgr.h
3729 @@ -34,9 +34,9 @@
3730  
3731  struct hash_testvec {
3732         /* only used with keyed hash algorithms */
3733 -       char *key;
3734 -       char *plaintext;
3735 -       char *digest;
3736 +       const char *key;
3737 +       const char *plaintext;
3738 +       const char *digest;
3739         unsigned char tap[MAX_TAP];
3740         unsigned short psize;
3741         unsigned char np;
3742 @@ -63,11 +63,11 @@ struct hash_testvec {
3743   */
3744  
3745  struct cipher_testvec {
3746 -       char *key;
3747 -       char *iv;
3748 -       char *iv_out;
3749 -       char *input;
3750 -       char *result;
3751 +       const char *key;
3752 +       const char *iv;
3753 +       const char *iv_out;
3754 +       const char *input;
3755 +       const char *result;
3756         unsigned short tap[MAX_TAP];
3757         int np;
3758         unsigned char also_non_np;
3759 @@ -80,11 +80,11 @@ struct cipher_testvec {
3760  };
3761  
3762  struct aead_testvec {
3763 -       char *key;
3764 -       char *iv;
3765 -       char *input;
3766 -       char *assoc;
3767 -       char *result;
3768 +       const char *key;
3769 +       const char *iv;
3770 +       const char *input;
3771 +       const char *assoc;
3772 +       const char *result;
3773         unsigned char tap[MAX_TAP];
3774         unsigned char atap[MAX_TAP];
3775         int np;
3776 @@ -99,10 +99,10 @@ struct aead_testvec {
3777  };
3778  
3779  struct cprng_testvec {
3780 -       char *key;
3781 -       char *dt;
3782 -       char *v;
3783 -       char *result;
3784 +       const char *key;
3785 +       const char *dt;
3786 +       const char *v;
3787 +       const char *result;
3788         unsigned char klen;
3789         unsigned short dtlen;
3790         unsigned short vlen;
3791 @@ -111,24 +111,38 @@ struct cprng_testvec {
3792  };
3793  
3794  struct drbg_testvec {
3795 -       unsigned char *entropy;
3796 +       const unsigned char *entropy;
3797         size_t entropylen;
3798 -       unsigned char *entpra;
3799 -       unsigned char *entprb;
3800 +       const unsigned char *entpra;
3801 +       const unsigned char *entprb;
3802         size_t entprlen;
3803 -       unsigned char *addtla;
3804 -       unsigned char *addtlb;
3805 +       const unsigned char *addtla;
3806 +       const unsigned char *addtlb;
3807         size_t addtllen;
3808 -       unsigned char *pers;
3809 +       const unsigned char *pers;
3810         size_t perslen;
3811 -       unsigned char *expected;
3812 +       const unsigned char *expected;
3813         size_t expectedlen;
3814  };
3815  
3816 +struct tls_testvec {
3817 +       char *key;      /* wrapped keys for encryption and authentication */
3818 +       char *iv;       /* initialization vector */
3819 +       char *input;    /* input data */
3820 +       char *assoc;    /* associated data: seq num, type, version, input len */
3821 +       char *result;   /* result data */
3822 +       unsigned char fail;     /* the test failure is expected */
3823 +       unsigned char novrfy;   /* dec verification failure expected */
3824 +       unsigned char klen;     /* key length */
3825 +       unsigned short ilen;    /* input data length */
3826 +       unsigned short alen;    /* associated data length */
3827 +       unsigned short rlen;    /* result length */
3828 +};
3829 +
3830  struct akcipher_testvec {
3831 -       unsigned char *key;
3832 -       unsigned char *m;
3833 -       unsigned char *c;
3834 +       const unsigned char *key;
3835 +       const unsigned char *m;
3836 +       const unsigned char *c;
3837         unsigned int key_len;
3838         unsigned int m_size;
3839         unsigned int c_size;
3840 @@ -136,27 +150,227 @@ struct akcipher_testvec {
3841  };
3842  
3843  struct kpp_testvec {
3844 -       unsigned char *secret;
3845 -       unsigned char *b_public;
3846 -       unsigned char *expected_a_public;
3847 -       unsigned char *expected_ss;
3848 +       const unsigned char *secret;
3849 +       const unsigned char *b_public;
3850 +       const unsigned char *expected_a_public;
3851 +       const unsigned char *expected_ss;
3852         unsigned short secret_size;
3853         unsigned short b_public_size;
3854         unsigned short expected_a_public_size;
3855         unsigned short expected_ss_size;
3856  };
3857  
3858 -static char zeroed_string[48];
3859 +static const char zeroed_string[48];
3860  
3861  /*
3862 - * RSA test vectors. Borrowed from openSSL.
3863 + * TLS1.0 synthetic test vectors
3864   */
3865 -#ifdef CONFIG_CRYPTO_FIPS
3866 -#define RSA_TEST_VECTORS       2
3867 +static struct tls_testvec tls_enc_tv_template[] = {
3868 +       {
3869 +#ifdef __LITTLE_ENDIAN
3870 +               .key    = "\x08\x00"            /* rta length */
3871 +                       "\x01\x00"              /* rta type */
3872 +#else
3873 +               .key    = "\x00\x08"            /* rta length */
3874 +                       "\x00\x01"              /* rta type */
3875 +#endif
3876 +                       "\x00\x00\x00\x10"      /* enc key length */
3877 +                       "authenticationkey20benckeyis16_bytes",
3878 +               .klen   = 8 + 20 + 16,
3879 +               .iv     = "iv0123456789abcd",
3880 +               .input  = "Single block msg",
3881 +               .ilen   = 16,
3882 +               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
3883 +                       "\x00\x03\x01\x00\x10",
3884 +               .alen   = 13,
3885 +               .result = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
3886 +                       "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
3887 +                       "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
3888 +                       "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
3889 +                       "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
3890 +                       "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
3891 +               .rlen   = 16 + 20 + 12,
3892 +       }, {
3893 +#ifdef __LITTLE_ENDIAN
3894 +               .key    = "\x08\x00"            /* rta length */
3895 +                       "\x01\x00"              /* rta type */
3896  #else
3897 -#define RSA_TEST_VECTORS       5
3898 +               .key    = "\x00\x08"            /* rta length */
3899 +                       "\x00\x01"              /* rta type */
3900  #endif
3901 -static struct akcipher_testvec rsa_tv_template[] = {
3902 +                       "\x00\x00\x00\x10"      /* enc key length */
3903 +                       "authenticationkey20benckeyis16_bytes",
3904 +               .klen   = 8 + 20 + 16,
3905 +               .iv     = "iv0123456789abcd",
3906 +               .input  = "",
3907 +               .ilen   = 0,
3908 +               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
3909 +                       "\x00\x03\x01\x00\x00",
3910 +               .alen   = 13,
3911 +               .result = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
3912 +                       "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
3913 +                       "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
3914 +                       "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
3915 +               .rlen   = 20 + 12,
3916 +       }, {
3917 +#ifdef __LITTLE_ENDIAN
3918 +               .key    = "\x08\x00"            /* rta length */
3919 +                       "\x01\x00"              /* rta type */
3920 +#else
3921 +               .key    = "\x00\x08"            /* rta length */
3922 +                       "\x00\x01"              /* rta type */
3923 +#endif
3924 +                       "\x00\x00\x00\x10"      /* enc key length */
3925 +                       "authenticationkey20benckeyis16_bytes",
3926 +               .klen   = 8 + 20 + 16,
3927 +               .iv     = "iv0123456789abcd",
3928 +               .input  = "285 bytes plaintext285 bytes plaintext285 bytes"
3929 +                       " plaintext285 bytes plaintext285 bytes plaintext285"
3930 +                       " bytes plaintext285 bytes plaintext285 bytes"
3931 +                       " plaintext285 bytes plaintext285 bytes plaintext285"
3932 +                       " bytes plaintext285 bytes plaintext285 bytes"
3933 +                       " plaintext285 bytes plaintext285 bytes plaintext285"
3934 +                       " bytes plaintext285 bytes plaintext",
3935 +               .ilen   = 285,
3936 +               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
3937 +                       "\x00\x03\x01\x01\x1d",
3938 +               .alen   = 13,
3939 +               .result = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
3940 +                       "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
3941 +                       "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
3942 +                       "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
3943 +                       "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
3944 +                       "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
3945 +                       "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
3946 +                       "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
3947 +                       "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
3948 +                       "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
3949 +                       "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
3950 +                       "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
3951 +                       "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
3952 +                       "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
3953 +                       "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
3954 +                       "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
3955 +                       "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
3956 +                       "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
3957 +                       "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
3958 +                       "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
3959 +                       "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
3960 +                       "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
3961 +                       "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
3962 +                       "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
3963 +                       "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
3964 +                       "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
3965 +                       "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
3966 +               .rlen   = 285 + 20 + 15,
3967 +       }
3968 +};
3969 +
3970 +static struct tls_testvec tls_dec_tv_template[] = {
3971 +       {
3972 +#ifdef __LITTLE_ENDIAN
3973 +               .key    = "\x08\x00"            /* rta length */
3974 +                       "\x01\x00"              /* rta type */
3975 +#else
3976 +               .key    = "\x00\x08"            /* rta length */
3977 +                       "\x00\x01"              /* rta type */
3978 +#endif
3979 +                       "\x00\x00\x00\x10"      /* enc key length */
3980 +                       "authenticationkey20benckeyis16_bytes",
3981 +               .klen   = 8 + 20 + 16,
3982 +               .iv     = "iv0123456789abcd",
3983 +               .input  = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
3984 +                       "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
3985 +                       "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
3986 +                       "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
3987 +                       "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
3988 +                       "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
3989 +               .ilen   = 16 + 20 + 12,
3990 +               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
3991 +                       "\x00\x03\x01\x00\x30",
3992 +               .alen   = 13,
3993 +               .result = "Single block msg",
3994 +               .rlen   = 16,
3995 +       }, {
3996 +#ifdef __LITTLE_ENDIAN
3997 +               .key    = "\x08\x00"            /* rta length */
3998 +                       "\x01\x00"              /* rta type */
3999 +#else
4000 +               .key    = "\x00\x08"            /* rta length */
4001 +                       "\x00\x01"              /* rta type */
4002 +#endif
4003 +                       "\x00\x00\x00\x10"      /* enc key length */
4004 +                       "authenticationkey20benckeyis16_bytes",
4005 +               .klen   = 8 + 20 + 16,
4006 +               .iv     = "iv0123456789abcd",
4007 +               .input = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
4008 +                       "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
4009 +                       "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
4010 +                       "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
4011 +               .ilen   = 20 + 12,
4012 +               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
4013 +                       "\x00\x03\x01\x00\x20",
4014 +               .alen   = 13,
4015 +               .result = "",
4016 +               .rlen   = 0,
4017 +       }, {
4018 +#ifdef __LITTLE_ENDIAN
4019 +               .key    = "\x08\x00"            /* rta length */
4020 +                       "\x01\x00"              /* rta type */
4021 +#else
4022 +               .key    = "\x00\x08"            /* rta length */
4023 +                       "\x00\x01"              /* rta type */
4024 +#endif
4025 +                       "\x00\x00\x00\x10"      /* enc key length */
4026 +                       "authenticationkey20benckeyis16_bytes",
4027 +               .klen   = 8 + 20 + 16,
4028 +               .iv     = "iv0123456789abcd",
4029 +               .input = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
4030 +                       "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
4031 +                       "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
4032 +                       "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
4033 +                       "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
4034 +                       "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
4035 +                       "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
4036 +                       "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
4037 +                       "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
4038 +                       "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
4039 +                       "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
4040 +                       "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
4041 +                       "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
4042 +                       "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
4043 +                       "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
4044 +                       "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
4045 +                       "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
4046 +                       "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
4047 +                       "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
4048 +                       "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
4049 +                       "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
4050 +                       "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
4051 +                       "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
4052 +                       "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
4053 +                       "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
4054 +                       "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
4055 +                       "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
4056 +
4057 +               .ilen   = 285 + 20 + 15,
4058 +               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
4059 +                       "\x00\x03\x01\x01\x40",
4060 +               .alen   = 13,
4061 +               .result = "285 bytes plaintext285 bytes plaintext285 bytes"
4062 +                       " plaintext285 bytes plaintext285 bytes plaintext285"
4063 +                       " bytes plaintext285 bytes plaintext285 bytes"
4064 +                       " plaintext285 bytes plaintext285 bytes plaintext285"
4065 +                       " bytes plaintext285 bytes plaintext285 bytes"
4066 +                       " plaintext285 bytes plaintext285 bytes plaintext",
4067 +               .rlen   = 285,
4068 +       }
4069 +};
4070 +
4071 +/*
4072 + * RSA test vectors. Borrowed from openSSL.
4073 + */
4074 +static const struct akcipher_testvec rsa_tv_template[] = {
4075         {
4076  #ifndef CONFIG_CRYPTO_FIPS
4077         .key =
4078 @@ -340,6 +554,7 @@ static struct akcipher_testvec rsa_tv_template[] = {
4079         .m_size = 8,
4080         .c_size = 256,
4081         .public_key_vec = true,
4082 +#ifndef CONFIG_CRYPTO_FIPS
4083         }, {
4084         .key =
4085         "\x30\x82\x09\x29" /* sequence of 2345 bytes */
4086 @@ -538,12 +753,11 @@ static struct akcipher_testvec rsa_tv_template[] = {
4087         .key_len = 2349,
4088         .m_size = 8,
4089         .c_size = 512,
4090 +#endif
4091         }
4092  };
4093  
4094 -#define DH_TEST_VECTORS 2
4095 -
4096 -struct kpp_testvec dh_tv_template[] = {
4097 +static const struct kpp_testvec dh_tv_template[] = {
4098         {
4099         .secret =
4100  #ifdef __LITTLE_ENDIAN
4101 @@ -760,12 +974,7 @@ struct kpp_testvec dh_tv_template[] = {
4102         }
4103  };
4104  
4105 -#ifdef CONFIG_CRYPTO_FIPS
4106 -#define ECDH_TEST_VECTORS 1
4107 -#else
4108 -#define ECDH_TEST_VECTORS 2
4109 -#endif
4110 -struct kpp_testvec ecdh_tv_template[] = {
4111 +static const struct kpp_testvec ecdh_tv_template[] = {
4112         {
4113  #ifndef CONFIG_CRYPTO_FIPS
4114         .secret =
4115 @@ -856,9 +1065,7 @@ struct kpp_testvec ecdh_tv_template[] = {
4116  /*
4117   * MD4 test vectors from RFC1320
4118   */
4119 -#define MD4_TEST_VECTORS       7
4120 -
4121 -static struct hash_testvec md4_tv_template [] = {
4122 +static const struct hash_testvec md4_tv_template[] = {
4123         {
4124                 .plaintext = "",
4125                 .digest = "\x31\xd6\xcf\xe0\xd1\x6a\xe9\x31"
4126 @@ -899,8 +1106,7 @@ static struct hash_testvec md4_tv_template [] = {
4127         },
4128  };
4129  
4130 -#define SHA3_224_TEST_VECTORS  3
4131 -static struct hash_testvec sha3_224_tv_template[] = {
4132 +static const struct hash_testvec sha3_224_tv_template[] = {
4133         {
4134                 .plaintext = "",
4135                 .digest = "\x6b\x4e\x03\x42\x36\x67\xdb\xb7"
4136 @@ -925,8 +1131,7 @@ static struct hash_testvec sha3_224_tv_template[] = {
4137         },
4138  };
4139  
4140 -#define SHA3_256_TEST_VECTORS  3
4141 -static struct hash_testvec sha3_256_tv_template[] = {
4142 +static const struct hash_testvec sha3_256_tv_template[] = {
4143         {
4144                 .plaintext = "",
4145                 .digest = "\xa7\xff\xc6\xf8\xbf\x1e\xd7\x66"
4146 @@ -952,8 +1157,7 @@ static struct hash_testvec sha3_256_tv_template[] = {
4147  };
4148  
4149  
4150 -#define SHA3_384_TEST_VECTORS  3
4151 -static struct hash_testvec sha3_384_tv_template[] = {
4152 +static const struct hash_testvec sha3_384_tv_template[] = {
4153         {
4154                 .plaintext = "",
4155                 .digest = "\x0c\x63\xa7\x5b\x84\x5e\x4f\x7d"
4156 @@ -985,8 +1189,7 @@ static struct hash_testvec sha3_384_tv_template[] = {
4157  };
4158  
4159  
4160 -#define SHA3_512_TEST_VECTORS  3
4161 -static struct hash_testvec sha3_512_tv_template[] = {
4162 +static const struct hash_testvec sha3_512_tv_template[] = {
4163         {
4164                 .plaintext = "",
4165                 .digest = "\xa6\x9f\x73\xcc\xa2\x3a\x9a\xc5"
4166 @@ -1027,9 +1230,7 @@ static struct hash_testvec sha3_512_tv_template[] = {
4167  /*
4168   * MD5 test vectors from RFC1321
4169   */
4170 -#define MD5_TEST_VECTORS       7
4171 -
4172 -static struct hash_testvec md5_tv_template[] = {
4173 +static const struct hash_testvec md5_tv_template[] = {
4174         {
4175                 .digest = "\xd4\x1d\x8c\xd9\x8f\x00\xb2\x04"
4176                           "\xe9\x80\x09\x98\xec\xf8\x42\x7e",
4177 @@ -1073,9 +1274,7 @@ static struct hash_testvec md5_tv_template[] = {
4178  /*
4179   * RIPEMD-128 test vectors from ISO/IEC 10118-3:2004(E)
4180   */
4181 -#define RMD128_TEST_VECTORS     10
4182 -
4183 -static struct hash_testvec rmd128_tv_template[] = {
4184 +static const struct hash_testvec rmd128_tv_template[] = {
4185         {
4186                 .digest = "\xcd\xf2\x62\x13\xa1\x50\xdc\x3e"
4187                           "\xcb\x61\x0f\x18\xf6\xb3\x8b\x46",
4188 @@ -1137,9 +1336,7 @@ static struct hash_testvec rmd128_tv_template[] = {
4189  /*
4190   * RIPEMD-160 test vectors from ISO/IEC 10118-3:2004(E)
4191   */
4192 -#define RMD160_TEST_VECTORS     10
4193 -
4194 -static struct hash_testvec rmd160_tv_template[] = {
4195 +static const struct hash_testvec rmd160_tv_template[] = {
4196         {
4197                 .digest = "\x9c\x11\x85\xa5\xc5\xe9\xfc\x54\x61\x28"
4198                           "\x08\x97\x7e\xe8\xf5\x48\xb2\x25\x8d\x31",
4199 @@ -1201,9 +1398,7 @@ static struct hash_testvec rmd160_tv_template[] = {
4200  /*
4201   * RIPEMD-256 test vectors
4202   */
4203 -#define RMD256_TEST_VECTORS     8
4204 -
4205 -static struct hash_testvec rmd256_tv_template[] = {
4206 +static const struct hash_testvec rmd256_tv_template[] = {
4207         {
4208                 .digest = "\x02\xba\x4c\x4e\x5f\x8e\xcd\x18"
4209                           "\x77\xfc\x52\xd6\x4d\x30\xe3\x7a"
4210 @@ -1269,9 +1464,7 @@ static struct hash_testvec rmd256_tv_template[] = {
4211  /*
4212   * RIPEMD-320 test vectors
4213   */
4214 -#define RMD320_TEST_VECTORS     8
4215 -
4216 -static struct hash_testvec rmd320_tv_template[] = {
4217 +static const struct hash_testvec rmd320_tv_template[] = {
4218         {
4219                 .digest = "\x22\xd6\x5d\x56\x61\x53\x6c\xdc\x75\xc1"
4220                           "\xfd\xf5\xc6\xde\x7b\x41\xb9\xf2\x73\x25"
4221 @@ -1334,36 +1527,49 @@ static struct hash_testvec rmd320_tv_template[] = {
4222         }
4223  };
4224  
4225 -#define CRCT10DIF_TEST_VECTORS 3
4226 -static struct hash_testvec crct10dif_tv_template[] = {
4227 +static const struct hash_testvec crct10dif_tv_template[] = {
4228         {
4229 -               .plaintext = "abc",
4230 -               .psize  = 3,
4231 -#ifdef __LITTLE_ENDIAN
4232 -               .digest = "\x3b\x44",
4233 -#else
4234 -               .digest = "\x44\x3b",
4235 -#endif
4236 -       }, {
4237 -               .plaintext = "1234567890123456789012345678901234567890"
4238 -                            "123456789012345678901234567890123456789",
4239 -               .psize  = 79,
4240 -#ifdef __LITTLE_ENDIAN
4241 -               .digest = "\x70\x4b",
4242 -#else
4243 -               .digest = "\x4b\x70",
4244 -#endif
4245 -       }, {
4246 -               .plaintext =
4247 -               "abcddddddddddddddddddddddddddddddddddddddddddddddddddddd",
4248 -               .psize  = 56,
4249 -#ifdef __LITTLE_ENDIAN
4250 -               .digest = "\xe3\x9c",
4251 -#else
4252 -               .digest = "\x9c\xe3",
4253 -#endif
4254 -               .np     = 2,
4255 -               .tap    = { 28, 28 }
4256 +               .plaintext      = "abc",
4257 +               .psize          = 3,
4258 +               .digest         = (u8 *)(u16 []){ 0x443b },
4259 +       }, {
4260 +               .plaintext      = "1234567890123456789012345678901234567890"
4261 +                                 "123456789012345678901234567890123456789",
4262 +               .psize          = 79,
4263 +               .digest         = (u8 *)(u16 []){ 0x4b70 },
4264 +               .np             = 2,
4265 +               .tap            = { 63, 16 },
4266 +       }, {
4267 +               .plaintext      = "abcdddddddddddddddddddddddddddddddddddddddd"
4268 +                                 "ddddddddddddd",
4269 +               .psize          = 56,
4270 +               .digest         = (u8 *)(u16 []){ 0x9ce3 },
4271 +               .np             = 8,
4272 +               .tap            = { 1, 2, 28, 7, 6, 5, 4, 3 },
4273 +       }, {
4274 +               .plaintext      = "1234567890123456789012345678901234567890"
4275 +                                 "1234567890123456789012345678901234567890"
4276 +                                 "1234567890123456789012345678901234567890"
4277 +                                 "1234567890123456789012345678901234567890"
4278 +                                 "1234567890123456789012345678901234567890"
4279 +                                 "1234567890123456789012345678901234567890"
4280 +                                 "1234567890123456789012345678901234567890"
4281 +                                 "123456789012345678901234567890123456789",
4282 +               .psize          = 319,
4283 +               .digest         = (u8 *)(u16 []){ 0x44c6 },
4284 +       }, {
4285 +               .plaintext      = "1234567890123456789012345678901234567890"
4286 +                                 "1234567890123456789012345678901234567890"
4287 +                                 "1234567890123456789012345678901234567890"
4288 +                                 "1234567890123456789012345678901234567890"
4289 +                                 "1234567890123456789012345678901234567890"
4290 +                                 "1234567890123456789012345678901234567890"
4291 +                                 "1234567890123456789012345678901234567890"
4292 +                                 "123456789012345678901234567890123456789",
4293 +               .psize          = 319,
4294 +               .digest         = (u8 *)(u16 []){ 0x44c6 },
4295 +               .np             = 4,
4296 +               .tap            = { 1, 255, 57, 6 },
4297         }
4298  };
4299  
4300 @@ -1371,9 +1577,7 @@ static struct hash_testvec crct10dif_tv_template[] = {
4301   * SHA1 test vectors  from from FIPS PUB 180-1
4302   * Long vector from CAVS 5.0
4303   */
4304 -#define SHA1_TEST_VECTORS      6
4305 -
4306 -static struct hash_testvec sha1_tv_template[] = {
4307 +static const struct hash_testvec sha1_tv_template[] = {
4308         {
4309                 .plaintext = "",
4310                 .psize  = 0,
4311 @@ -1563,9 +1767,7 @@ static struct hash_testvec sha1_tv_template[] = {
4312  /*
4313   * SHA224 test vectors from from FIPS PUB 180-2
4314   */
4315 -#define SHA224_TEST_VECTORS     5
4316 -
4317 -static struct hash_testvec sha224_tv_template[] = {
4318 +static const struct hash_testvec sha224_tv_template[] = {
4319         {
4320                 .plaintext = "",
4321                 .psize  = 0,
4322 @@ -1737,9 +1939,7 @@ static struct hash_testvec sha224_tv_template[] = {
4323  /*
4324   * SHA256 test vectors from from NIST
4325   */
4326 -#define SHA256_TEST_VECTORS    5
4327 -
4328 -static struct hash_testvec sha256_tv_template[] = {
4329 +static const struct hash_testvec sha256_tv_template[] = {
4330         {
4331                 .plaintext = "",
4332                 .psize  = 0,
4333 @@ -1910,9 +2110,7 @@ static struct hash_testvec sha256_tv_template[] = {
4334  /*
4335   * SHA384 test vectors from from NIST and kerneli
4336   */
4337 -#define SHA384_TEST_VECTORS    6
4338 -
4339 -static struct hash_testvec sha384_tv_template[] = {
4340 +static const struct hash_testvec sha384_tv_template[] = {
4341         {
4342                 .plaintext = "",
4343                 .psize  = 0,
4344 @@ -2104,9 +2302,7 @@ static struct hash_testvec sha384_tv_template[] = {
4345  /*
4346   * SHA512 test vectors from from NIST and kerneli
4347   */
4348 -#define SHA512_TEST_VECTORS    6
4349 -
4350 -static struct hash_testvec sha512_tv_template[] = {
4351 +static const struct hash_testvec sha512_tv_template[] = {
4352         {
4353                 .plaintext = "",
4354                 .psize  = 0,
4355 @@ -2313,9 +2509,7 @@ static struct hash_testvec sha512_tv_template[] = {
4356   * by Vincent Rijmen and Paulo S. L. M. Barreto as part of the NESSIE
4357   * submission
4358   */
4359 -#define WP512_TEST_VECTORS     8
4360 -
4361 -static struct hash_testvec wp512_tv_template[] = {
4362 +static const struct hash_testvec wp512_tv_template[] = {
4363         {
4364                 .plaintext = "",
4365                 .psize  = 0,
4366 @@ -2411,9 +2605,7 @@ static struct hash_testvec wp512_tv_template[] = {
4367         },
4368  };
4369  
4370 -#define WP384_TEST_VECTORS     8
4371 -
4372 -static struct hash_testvec wp384_tv_template[] = {
4373 +static const struct hash_testvec wp384_tv_template[] = {
4374         {
4375                 .plaintext = "",
4376                 .psize  = 0,
4377 @@ -2493,9 +2685,7 @@ static struct hash_testvec wp384_tv_template[] = {
4378         },
4379  };
4380  
4381 -#define WP256_TEST_VECTORS     8
4382 -
4383 -static struct hash_testvec wp256_tv_template[] = {
4384 +static const struct hash_testvec wp256_tv_template[] = {
4385         {
4386                 .plaintext = "",
4387                 .psize  = 0,
4388 @@ -2562,9 +2752,7 @@ static struct hash_testvec wp256_tv_template[] = {
4389  /*
4390   * TIGER test vectors from Tiger website
4391   */
4392 -#define TGR192_TEST_VECTORS    6
4393 -
4394 -static struct hash_testvec tgr192_tv_template[] = {
4395 +static const struct hash_testvec tgr192_tv_template[] = {
4396         {
4397                 .plaintext = "",
4398                 .psize  = 0,
4399 @@ -2607,9 +2795,7 @@ static struct hash_testvec tgr192_tv_template[] = {
4400         },
4401  };
4402  
4403 -#define TGR160_TEST_VECTORS    6
4404 -
4405 -static struct hash_testvec tgr160_tv_template[] = {
4406 +static const struct hash_testvec tgr160_tv_template[] = {
4407         {
4408                 .plaintext = "",
4409                 .psize  = 0,
4410 @@ -2652,9 +2838,7 @@ static struct hash_testvec tgr160_tv_template[] = {
4411         },
4412  };
4413  
4414 -#define TGR128_TEST_VECTORS    6
4415 -
4416 -static struct hash_testvec tgr128_tv_template[] = {
4417 +static const struct hash_testvec tgr128_tv_template[] = {
4418         {
4419                 .plaintext = "",
4420                 .psize  = 0,
4421 @@ -2691,9 +2875,7 @@ static struct hash_testvec tgr128_tv_template[] = {
4422         },
4423  };
4424  
4425 -#define GHASH_TEST_VECTORS 6
4426 -
4427 -static struct hash_testvec ghash_tv_template[] =
4428 +static const struct hash_testvec ghash_tv_template[] =
4429  {
4430         {
4431                 .key    = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03"
4432 @@ -2808,9 +2990,7 @@ static struct hash_testvec ghash_tv_template[] =
4433   * HMAC-MD5 test vectors from RFC2202
4434   * (These need to be fixed to not use strlen).
4435   */
4436 -#define HMAC_MD5_TEST_VECTORS  7
4437 -
4438 -static struct hash_testvec hmac_md5_tv_template[] =
4439 +static const struct hash_testvec hmac_md5_tv_template[] =
4440  {
4441         {
4442                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4443 @@ -2890,9 +3070,7 @@ static struct hash_testvec hmac_md5_tv_template[] =
4444  /*
4445   * HMAC-RIPEMD128 test vectors from RFC2286
4446   */
4447 -#define HMAC_RMD128_TEST_VECTORS       7
4448 -
4449 -static struct hash_testvec hmac_rmd128_tv_template[] = {
4450 +static const struct hash_testvec hmac_rmd128_tv_template[] = {
4451         {
4452                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4453                 .ksize  = 16,
4454 @@ -2971,9 +3149,7 @@ static struct hash_testvec hmac_rmd128_tv_template[] = {
4455  /*
4456   * HMAC-RIPEMD160 test vectors from RFC2286
4457   */
4458 -#define HMAC_RMD160_TEST_VECTORS       7
4459 -
4460 -static struct hash_testvec hmac_rmd160_tv_template[] = {
4461 +static const struct hash_testvec hmac_rmd160_tv_template[] = {
4462         {
4463                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4464                 .ksize  = 20,
4465 @@ -3052,9 +3228,7 @@ static struct hash_testvec hmac_rmd160_tv_template[] = {
4466  /*
4467   * HMAC-SHA1 test vectors from RFC2202
4468   */
4469 -#define HMAC_SHA1_TEST_VECTORS 7
4470 -
4471 -static struct hash_testvec hmac_sha1_tv_template[] = {
4472 +static const struct hash_testvec hmac_sha1_tv_template[] = {
4473         {
4474                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4475                 .ksize  = 20,
4476 @@ -3135,9 +3309,7 @@ static struct hash_testvec hmac_sha1_tv_template[] = {
4477  /*
4478   * SHA224 HMAC test vectors from RFC4231
4479   */
4480 -#define HMAC_SHA224_TEST_VECTORS    4
4481 -
4482 -static struct hash_testvec hmac_sha224_tv_template[] = {
4483 +static const struct hash_testvec hmac_sha224_tv_template[] = {
4484         {
4485                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4486                         "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4487 @@ -3250,9 +3422,7 @@ static struct hash_testvec hmac_sha224_tv_template[] = {
4488   * HMAC-SHA256 test vectors from
4489   * draft-ietf-ipsec-ciph-sha-256-01.txt
4490   */
4491 -#define HMAC_SHA256_TEST_VECTORS       10
4492 -
4493 -static struct hash_testvec hmac_sha256_tv_template[] = {
4494 +static const struct hash_testvec hmac_sha256_tv_template[] = {
4495         {
4496                 .key    = "\x01\x02\x03\x04\x05\x06\x07\x08"
4497                           "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
4498 @@ -3387,9 +3557,7 @@ static struct hash_testvec hmac_sha256_tv_template[] = {
4499         },
4500  };
4501  
4502 -#define CMAC_AES_TEST_VECTORS 6
4503 -
4504 -static struct hash_testvec aes_cmac128_tv_template[] = {
4505 +static const struct hash_testvec aes_cmac128_tv_template[] = {
4506         { /* From NIST Special Publication 800-38B, AES-128 */
4507                 .key            = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4508                                   "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4509 @@ -3464,9 +3632,67 @@ static struct hash_testvec aes_cmac128_tv_template[] = {
4510         }
4511  };
4512  
4513 -#define CMAC_DES3_EDE_TEST_VECTORS 4
4514 +static const struct hash_testvec aes_cbcmac_tv_template[] = {
4515 +       {
4516 +               .key            = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4517 +                                 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4518 +               .plaintext      = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4519 +                                 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a",
4520 +               .digest         = "\x3a\xd7\x7b\xb4\x0d\x7a\x36\x60"
4521 +                                 "\xa8\x9e\xca\xf3\x24\x66\xef\x97",
4522 +               .psize          = 16,
4523 +               .ksize          = 16,
4524 +       }, {
4525 +               .key            = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4526 +                                 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4527 +               .plaintext      = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4528 +                                 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4529 +                                 "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4530 +                                 "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4531 +                                 "\x30",
4532 +               .digest         = "\x9d\x0d\xd0\x63\xfb\xcb\x24\x43"
4533 +                                 "\xf8\xf2\x76\x03\xac\x39\xb0\x9d",
4534 +               .psize          = 33,
4535 +               .ksize          = 16,
4536 +               .np             = 2,
4537 +               .tap            = { 7, 26 },
4538 +       }, {
4539 +               .key            = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4540 +                                 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4541 +               .plaintext      = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4542 +                                 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4543 +                                 "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4544 +                                 "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4545 +                                 "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
4546 +                                 "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
4547 +                                 "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
4548 +                                 "\xad\x2b\x41\x7b\xe6\x6c\x37",
4549 +               .digest         = "\xc0\x71\x73\xb8\xa0\x2c\x11\x7c"
4550 +                                 "\xaf\xdc\xb2\xf8\x89\x32\xa3\x3a",
4551 +               .psize          = 63,
4552 +               .ksize          = 16,
4553 +       }, {
4554 +               .key            = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
4555 +                                 "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
4556 +                                 "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
4557 +                                 "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
4558 +               .plaintext      = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4559 +                                 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4560 +                                 "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4561 +                                 "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4562 +                                 "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
4563 +                                 "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
4564 +                                 "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
4565 +                                 "\xad\x2b\x41\x7b\xe6\x6c\x37\x10"
4566 +                                 "\x1c",
4567 +               .digest         = "\x6a\x4e\xdb\x21\x47\x51\xdf\x4f"
4568 +                                 "\xa8\x4d\x4c\x10\x3b\x72\x7d\xd6",
4569 +               .psize          = 65,
4570 +               .ksize          = 32,
4571 +       }
4572 +};
4573  
4574 -static struct hash_testvec des3_ede_cmac64_tv_template[] = {
4575 +static const struct hash_testvec des3_ede_cmac64_tv_template[] = {
4576  /*
4577   * From NIST Special Publication 800-38B, Three Key TDEA
4578   * Corrected test vectors from:
4579 @@ -3512,9 +3738,7 @@ static struct hash_testvec des3_ede_cmac64_tv_template[] = {
4580         }
4581  };
4582  
4583 -#define XCBC_AES_TEST_VECTORS 6
4584 -
4585 -static struct hash_testvec aes_xcbc128_tv_template[] = {
4586 +static const struct hash_testvec aes_xcbc128_tv_template[] = {
4587         {
4588                 .key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
4589                           "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
4590 @@ -3580,36 +3804,35 @@ static struct hash_testvec aes_xcbc128_tv_template[] = {
4591         }
4592  };
4593  
4594 -#define VMAC_AES_TEST_VECTORS  11
4595 -static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
4596 -                               '\x02', '\x03', '\x02', '\x02',
4597 -                               '\x02', '\x04', '\x01', '\x07',
4598 -                               '\x04', '\x01', '\x04', '\x03',};
4599 -static char vmac_string2[128] = {'a', 'b', 'c',};
4600 -static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
4601 -                               'a', 'b', 'c', 'a', 'b', 'c',
4602 -                               'a', 'b', 'c', 'a', 'b', 'c',
4603 -                               'a', 'b', 'c', 'a', 'b', 'c',
4604 -                               'a', 'b', 'c', 'a', 'b', 'c',
4605 -                               'a', 'b', 'c', 'a', 'b', 'c',
4606 -                               'a', 'b', 'c', 'a', 'b', 'c',
4607 -                               'a', 'b', 'c', 'a', 'b', 'c',
4608 -                               };
4609 -
4610 -static char vmac_string4[17] = {'b', 'c', 'e', 'f',
4611 -                               'i', 'j', 'l', 'm',
4612 -                               'o', 'p', 'r', 's',
4613 -                               't', 'u', 'w', 'x', 'z'};
4614 -
4615 -static char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
4616 -                                'o', 'l', 'k', ']', '%',
4617 -                                '9', '2', '7', '!', 'A'};
4618 -
4619 -static char vmac_string6[129] = {'p', 't', '*', '7', 'l',
4620 -                                'i', '!', '#', 'w', '0',
4621 -                                'z', '/', '4', 'A', 'n'};
4622 -
4623 -static struct hash_testvec aes_vmac128_tv_template[] = {
4624 +static const char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
4625 +                                      '\x02', '\x03', '\x02', '\x02',
4626 +                                      '\x02', '\x04', '\x01', '\x07',
4627 +                                      '\x04', '\x01', '\x04', '\x03',};
4628 +static const char vmac_string2[128] = {'a', 'b', 'c',};
4629 +static const char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
4630 +                                      'a', 'b', 'c', 'a', 'b', 'c',
4631 +                                      'a', 'b', 'c', 'a', 'b', 'c',
4632 +                                      'a', 'b', 'c', 'a', 'b', 'c',
4633 +                                      'a', 'b', 'c', 'a', 'b', 'c',
4634 +                                      'a', 'b', 'c', 'a', 'b', 'c',
4635 +                                      'a', 'b', 'c', 'a', 'b', 'c',
4636 +                                      'a', 'b', 'c', 'a', 'b', 'c',
4637 +                                     };
4638 +
4639 +static const char vmac_string4[17] = {'b', 'c', 'e', 'f',
4640 +                                     'i', 'j', 'l', 'm',
4641 +                                     'o', 'p', 'r', 's',
4642 +                                     't', 'u', 'w', 'x', 'z'};
4643 +
4644 +static const char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
4645 +                                      'o', 'l', 'k', ']', '%',
4646 +                                      '9', '2', '7', '!', 'A'};
4647 +
4648 +static const char vmac_string6[129] = {'p', 't', '*', '7', 'l',
4649 +                                      'i', '!', '#', 'w', '0',
4650 +                                      'z', '/', '4', 'A', 'n'};
4651 +
4652 +static const struct hash_testvec aes_vmac128_tv_template[] = {
4653         {
4654                 .key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
4655                           "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
4656 @@ -3687,9 +3910,7 @@ static struct hash_testvec aes_vmac128_tv_template[] = {
4657   * SHA384 HMAC test vectors from RFC4231
4658   */
4659  
4660 -#define HMAC_SHA384_TEST_VECTORS       4
4661 -
4662 -static struct hash_testvec hmac_sha384_tv_template[] = {
4663 +static const struct hash_testvec hmac_sha384_tv_template[] = {
4664         {
4665                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4666                           "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4667 @@ -3787,9 +4008,7 @@ static struct hash_testvec hmac_sha384_tv_template[] = {
4668   * SHA512 HMAC test vectors from RFC4231
4669   */
4670  
4671 -#define HMAC_SHA512_TEST_VECTORS       4
4672 -
4673 -static struct hash_testvec hmac_sha512_tv_template[] = {
4674 +static const struct hash_testvec hmac_sha512_tv_template[] = {
4675         {
4676                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4677                           "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4678 @@ -3894,9 +4113,7 @@ static struct hash_testvec hmac_sha512_tv_template[] = {
4679         },
4680  };
4681  
4682 -#define HMAC_SHA3_224_TEST_VECTORS     4
4683 -
4684 -static struct hash_testvec hmac_sha3_224_tv_template[] = {
4685 +static const struct hash_testvec hmac_sha3_224_tv_template[] = {
4686         {
4687                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4688                           "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4689 @@ -3985,9 +4202,7 @@ static struct hash_testvec hmac_sha3_224_tv_template[] = {
4690         },
4691  };
4692  
4693 -#define HMAC_SHA3_256_TEST_VECTORS     4
4694 -
4695 -static struct hash_testvec hmac_sha3_256_tv_template[] = {
4696 +static const struct hash_testvec hmac_sha3_256_tv_template[] = {
4697         {
4698                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4699                           "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4700 @@ -4076,9 +4291,7 @@ static struct hash_testvec hmac_sha3_256_tv_template[] = {
4701         },
4702  };
4703  
4704 -#define HMAC_SHA3_384_TEST_VECTORS     4
4705 -
4706 -static struct hash_testvec hmac_sha3_384_tv_template[] = {
4707 +static const struct hash_testvec hmac_sha3_384_tv_template[] = {
4708         {
4709                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4710                           "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4711 @@ -4175,9 +4388,7 @@ static struct hash_testvec hmac_sha3_384_tv_template[] = {
4712         },
4713  };
4714  
4715 -#define HMAC_SHA3_512_TEST_VECTORS     4
4716 -
4717 -static struct hash_testvec hmac_sha3_512_tv_template[] = {
4718 +static const struct hash_testvec hmac_sha3_512_tv_template[] = {
4719         {
4720                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4721                           "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4722 @@ -4286,9 +4497,7 @@ static struct hash_testvec hmac_sha3_512_tv_template[] = {
4723   * Poly1305 test vectors from RFC7539 A.3.
4724   */
4725  
4726 -#define POLY1305_TEST_VECTORS  11
4727 -
4728 -static struct hash_testvec poly1305_tv_template[] = {
4729 +static const struct hash_testvec poly1305_tv_template[] = {
4730         { /* Test Vector #1 */
4731                 .plaintext      = "\x00\x00\x00\x00\x00\x00\x00\x00"
4732                                   "\x00\x00\x00\x00\x00\x00\x00\x00"
4733 @@ -4533,20 +4742,7 @@ static struct hash_testvec poly1305_tv_template[] = {
4734  /*
4735   * DES test vectors.
4736   */
4737 -#define DES_ENC_TEST_VECTORS           11
4738 -#define DES_DEC_TEST_VECTORS           5
4739 -#define DES_CBC_ENC_TEST_VECTORS       6
4740 -#define DES_CBC_DEC_TEST_VECTORS       5
4741 -#define DES_CTR_ENC_TEST_VECTORS       2
4742 -#define DES_CTR_DEC_TEST_VECTORS       2
4743 -#define DES3_EDE_ENC_TEST_VECTORS      4
4744 -#define DES3_EDE_DEC_TEST_VECTORS      4
4745 -#define DES3_EDE_CBC_ENC_TEST_VECTORS  2
4746 -#define DES3_EDE_CBC_DEC_TEST_VECTORS  2
4747 -#define DES3_EDE_CTR_ENC_TEST_VECTORS  2
4748 -#define DES3_EDE_CTR_DEC_TEST_VECTORS  2
4749 -
4750 -static struct cipher_testvec des_enc_tv_template[] = {
4751 +static const struct cipher_testvec des_enc_tv_template[] = {
4752         { /* From Applied Cryptography */
4753                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4754                 .klen   = 8,
4755 @@ -4720,7 +4916,7 @@ static struct cipher_testvec des_enc_tv_template[] = {
4756         },
4757  };
4758  
4759 -static struct cipher_testvec des_dec_tv_template[] = {
4760 +static const struct cipher_testvec des_dec_tv_template[] = {
4761         { /* From Applied Cryptography */
4762                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4763                 .klen   = 8,
4764 @@ -4830,7 +5026,7 @@ static struct cipher_testvec des_dec_tv_template[] = {
4765         },
4766  };
4767  
4768 -static struct cipher_testvec des_cbc_enc_tv_template[] = {
4769 +static const struct cipher_testvec des_cbc_enc_tv_template[] = {
4770         { /* From OpenSSL */
4771                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4772                 .klen   = 8,
4773 @@ -4956,7 +5152,7 @@ static struct cipher_testvec des_cbc_enc_tv_template[] = {
4774         },
4775  };
4776  
4777 -static struct cipher_testvec des_cbc_dec_tv_template[] = {
4778 +static const struct cipher_testvec des_cbc_dec_tv_template[] = {
4779         { /* FIPS Pub 81 */
4780                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4781                 .klen   = 8,
4782 @@ -5065,7 +5261,7 @@ static struct cipher_testvec des_cbc_dec_tv_template[] = {
4783         },
4784  };
4785  
4786 -static struct cipher_testvec des_ctr_enc_tv_template[] = {
4787 +static const struct cipher_testvec des_ctr_enc_tv_template[] = {
4788         { /* Generated with Crypto++ */
4789                 .key    = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
4790                 .klen   = 8,
4791 @@ -5211,7 +5407,7 @@ static struct cipher_testvec des_ctr_enc_tv_template[] = {
4792         },
4793  };
4794  
4795 -static struct cipher_testvec des_ctr_dec_tv_template[] = {
4796 +static const struct cipher_testvec des_ctr_dec_tv_template[] = {
4797         { /* Generated with Crypto++ */
4798                 .key    = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
4799                 .klen   = 8,
4800 @@ -5357,7 +5553,7 @@ static struct cipher_testvec des_ctr_dec_tv_template[] = {
4801         },
4802  };
4803  
4804 -static struct cipher_testvec des3_ede_enc_tv_template[] = {
4805 +static const struct cipher_testvec des3_ede_enc_tv_template[] = {
4806         { /* These are from openssl */
4807                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4808                           "\x55\x55\x55\x55\x55\x55\x55\x55"
4809 @@ -5522,7 +5718,7 @@ static struct cipher_testvec des3_ede_enc_tv_template[] = {
4810         },
4811  };
4812  
4813 -static struct cipher_testvec des3_ede_dec_tv_template[] = {
4814 +static const struct cipher_testvec des3_ede_dec_tv_template[] = {
4815         { /* These are from openssl */
4816                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4817                           "\x55\x55\x55\x55\x55\x55\x55\x55"
4818 @@ -5687,7 +5883,7 @@ static struct cipher_testvec des3_ede_dec_tv_template[] = {
4819         },
4820  };
4821  
4822 -static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
4823 +static const struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
4824         { /* Generated from openssl */
4825                 .key    = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
4826                           "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
4827 @@ -5867,7 +6063,7 @@ static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
4828         },
4829  };
4830  
4831 -static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
4832 +static const struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
4833         { /* Generated from openssl */
4834                 .key    = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
4835                           "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
4836 @@ -6047,7 +6243,7 @@ static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
4837         },
4838  };
4839  
4840 -static struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
4841 +static const struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
4842         { /* Generated with Crypto++ */
4843                 .key    = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
4844                           "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
4845 @@ -6325,7 +6521,7 @@ static struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
4846         },
4847  };
4848  
4849 -static struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
4850 +static const struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
4851         { /* Generated with Crypto++ */
4852                 .key    = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
4853                           "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
4854 @@ -6606,14 +6802,7 @@ static struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
4855  /*
4856   * Blowfish test vectors.
4857   */
4858 -#define BF_ENC_TEST_VECTORS    7
4859 -#define BF_DEC_TEST_VECTORS    7
4860 -#define BF_CBC_ENC_TEST_VECTORS        2
4861 -#define BF_CBC_DEC_TEST_VECTORS        2
4862 -#define BF_CTR_ENC_TEST_VECTORS        2
4863 -#define BF_CTR_DEC_TEST_VECTORS        2
4864 -
4865 -static struct cipher_testvec bf_enc_tv_template[] = {
4866 +static const struct cipher_testvec bf_enc_tv_template[] = {
4867         { /* DES test vectors from OpenSSL */
4868                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00",
4869                 .klen   = 8,
4870 @@ -6805,7 +6994,7 @@ static struct cipher_testvec bf_enc_tv_template[] = {
4871         },
4872  };
4873  
4874 -static struct cipher_testvec bf_dec_tv_template[] = {
4875 +static const struct cipher_testvec bf_dec_tv_template[] = {
4876         { /* DES test vectors from OpenSSL */
4877                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00",
4878                 .klen   = 8,
4879 @@ -6997,7 +7186,7 @@ static struct cipher_testvec bf_dec_tv_template[] = {
4880         },
4881  };
4882  
4883 -static struct cipher_testvec bf_cbc_enc_tv_template[] = {
4884 +static const struct cipher_testvec bf_cbc_enc_tv_template[] = {
4885         { /* From OpenSSL */
4886                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4887                           "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
4888 @@ -7154,7 +7343,7 @@ static struct cipher_testvec bf_cbc_enc_tv_template[] = {
4889         },
4890  };
4891  
4892 -static struct cipher_testvec bf_cbc_dec_tv_template[] = {
4893 +static const struct cipher_testvec bf_cbc_dec_tv_template[] = {
4894         { /* From OpenSSL */
4895                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4896                           "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
4897 @@ -7311,7 +7500,7 @@ static struct cipher_testvec bf_cbc_dec_tv_template[] = {
4898         },
4899  };
4900  
4901 -static struct cipher_testvec bf_ctr_enc_tv_template[] = {
4902 +static const struct cipher_testvec bf_ctr_enc_tv_template[] = {
4903         { /* Generated with Crypto++ */
4904                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4905                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4906 @@ -7723,7 +7912,7 @@ static struct cipher_testvec bf_ctr_enc_tv_template[] = {
4907         },
4908  };
4909  
4910 -static struct cipher_testvec bf_ctr_dec_tv_template[] = {
4911 +static const struct cipher_testvec bf_ctr_dec_tv_template[] = {
4912         { /* Generated with Crypto++ */
4913                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4914                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4915 @@ -8138,18 +8327,7 @@ static struct cipher_testvec bf_ctr_dec_tv_template[] = {
4916  /*
4917   * Twofish test vectors.
4918   */
4919 -#define TF_ENC_TEST_VECTORS            4
4920 -#define TF_DEC_TEST_VECTORS            4
4921 -#define TF_CBC_ENC_TEST_VECTORS                5
4922 -#define TF_CBC_DEC_TEST_VECTORS                5
4923 -#define TF_CTR_ENC_TEST_VECTORS                2
4924 -#define TF_CTR_DEC_TEST_VECTORS                2
4925 -#define TF_LRW_ENC_TEST_VECTORS                8
4926 -#define TF_LRW_DEC_TEST_VECTORS                8
4927 -#define TF_XTS_ENC_TEST_VECTORS                5
4928 -#define TF_XTS_DEC_TEST_VECTORS                5
4929 -
4930 -static struct cipher_testvec tf_enc_tv_template[] = {
4931 +static const struct cipher_testvec tf_enc_tv_template[] = {
4932         {
4933                 .key    = zeroed_string,
4934                 .klen   = 16,
4935 @@ -8317,7 +8495,7 @@ static struct cipher_testvec tf_enc_tv_template[] = {
4936         },
4937  };
4938  
4939 -static struct cipher_testvec tf_dec_tv_template[] = {
4940 +static const struct cipher_testvec tf_dec_tv_template[] = {
4941         {
4942                 .key    = zeroed_string,
4943                 .klen   = 16,
4944 @@ -8485,7 +8663,7 @@ static struct cipher_testvec tf_dec_tv_template[] = {
4945         },
4946  };
4947  
4948 -static struct cipher_testvec tf_cbc_enc_tv_template[] = {
4949 +static const struct cipher_testvec tf_cbc_enc_tv_template[] = {
4950         { /* Generated with Nettle */
4951                 .key    = zeroed_string,
4952                 .klen   = 16,
4953 @@ -8668,7 +8846,7 @@ static struct cipher_testvec tf_cbc_enc_tv_template[] = {
4954         },
4955  };
4956  
4957 -static struct cipher_testvec tf_cbc_dec_tv_template[] = {
4958 +static const struct cipher_testvec tf_cbc_dec_tv_template[] = {
4959         { /* Reverse of the first four above */
4960                 .key    = zeroed_string,
4961                 .klen   = 16,
4962 @@ -8851,7 +9029,7 @@ static struct cipher_testvec tf_cbc_dec_tv_template[] = {
4963         },
4964  };
4965  
4966 -static struct cipher_testvec tf_ctr_enc_tv_template[] = {
4967 +static const struct cipher_testvec tf_ctr_enc_tv_template[] = {
4968         { /* Generated with Crypto++ */
4969                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4970                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4971 @@ -9262,7 +9440,7 @@ static struct cipher_testvec tf_ctr_enc_tv_template[] = {
4972         },
4973  };
4974  
4975 -static struct cipher_testvec tf_ctr_dec_tv_template[] = {
4976 +static const struct cipher_testvec tf_ctr_dec_tv_template[] = {
4977         { /* Generated with Crypto++ */
4978                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4979                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4980 @@ -9673,7 +9851,7 @@ static struct cipher_testvec tf_ctr_dec_tv_template[] = {
4981         },
4982  };
4983  
4984 -static struct cipher_testvec tf_lrw_enc_tv_template[] = {
4985 +static const struct cipher_testvec tf_lrw_enc_tv_template[] = {
4986         /* Generated from AES-LRW test vectors */
4987         {
4988                 .key    = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
4989 @@ -9925,7 +10103,7 @@ static struct cipher_testvec tf_lrw_enc_tv_template[] = {
4990         },
4991  };
4992  
4993 -static struct cipher_testvec tf_lrw_dec_tv_template[] = {
4994 +static const struct cipher_testvec tf_lrw_dec_tv_template[] = {
4995         /* Generated from AES-LRW test vectors */
4996         /* same as enc vectors with input and result reversed */
4997         {
4998 @@ -10178,7 +10356,7 @@ static struct cipher_testvec tf_lrw_dec_tv_template[] = {
4999         },
5000  };
5001  
5002 -static struct cipher_testvec tf_xts_enc_tv_template[] = {
5003 +static const struct cipher_testvec tf_xts_enc_tv_template[] = {
5004         /* Generated from AES-XTS test vectors */
5005  {
5006                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00"
5007 @@ -10520,7 +10698,7 @@ static struct cipher_testvec tf_xts_enc_tv_template[] = {
5008         },
5009  };
5010  
5011 -static struct cipher_testvec tf_xts_dec_tv_template[] = {
5012 +static const struct cipher_testvec tf_xts_dec_tv_template[] = {
5013         /* Generated from AES-XTS test vectors */
5014         /* same as enc vectors with input and result reversed */
5015         {
5016 @@ -10867,25 +11045,7 @@ static struct cipher_testvec tf_xts_dec_tv_template[] = {
5017   * Serpent test vectors.  These are backwards because Serpent writes
5018   * octet sequences in right-to-left mode.
5019   */
5020 -#define SERPENT_ENC_TEST_VECTORS       5
5021 -#define SERPENT_DEC_TEST_VECTORS       5
5022 -
5023 -#define TNEPRES_ENC_TEST_VECTORS       4
5024 -#define TNEPRES_DEC_TEST_VECTORS       4
5025 -
5026 -#define SERPENT_CBC_ENC_TEST_VECTORS   1
5027 -#define SERPENT_CBC_DEC_TEST_VECTORS   1
5028 -
5029 -#define SERPENT_CTR_ENC_TEST_VECTORS   2
5030 -#define SERPENT_CTR_DEC_TEST_VECTORS   2
5031 -
5032 -#define SERPENT_LRW_ENC_TEST_VECTORS   8
5033 -#define SERPENT_LRW_DEC_TEST_VECTORS   8
5034 -
5035 -#define SERPENT_XTS_ENC_TEST_VECTORS   5
5036 -#define SERPENT_XTS_DEC_TEST_VECTORS   5
5037 -
5038 -static struct cipher_testvec serpent_enc_tv_template[] = {
5039 +static const struct cipher_testvec serpent_enc_tv_template[] = {
5040         {
5041                 .input  = "\x00\x01\x02\x03\x04\x05\x06\x07"
5042                           "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5043 @@ -11061,7 +11221,7 @@ static struct cipher_testvec serpent_enc_tv_template[] = {
5044         },
5045  };
5046  
5047 -static struct cipher_testvec tnepres_enc_tv_template[] = {
5048 +static const struct cipher_testvec tnepres_enc_tv_template[] = {
5049         { /* KeySize=128, PT=0, I=1 */
5050                 .input  = "\x00\x00\x00\x00\x00\x00\x00\x00"
5051                           "\x00\x00\x00\x00\x00\x00\x00\x00",
5052 @@ -11111,7 +11271,7 @@ static struct cipher_testvec tnepres_enc_tv_template[] = {
5053  };
5054  
5055  
5056 -static struct cipher_testvec serpent_dec_tv_template[] = {
5057 +static const struct cipher_testvec serpent_dec_tv_template[] = {
5058         {
5059                 .input  = "\x12\x07\xfc\xce\x9b\xd0\xd6\x47"
5060                           "\x6a\xe9\x8f\xbe\xd1\x43\xa0\xe2",
5061 @@ -11287,7 +11447,7 @@ static struct cipher_testvec serpent_dec_tv_template[] = {
5062         },
5063  };
5064  
5065 -static struct cipher_testvec tnepres_dec_tv_template[] = {
5066 +static const struct cipher_testvec tnepres_dec_tv_template[] = {
5067         {
5068                 .input  = "\x41\xcc\x6b\x31\x59\x31\x45\x97"
5069                           "\x6d\x6f\xbb\x38\x4b\x37\x21\x28",
5070 @@ -11328,7 +11488,7 @@ static struct cipher_testvec tnepres_dec_tv_template[] = {
5071         },
5072  };
5073  
5074 -static struct cipher_testvec serpent_cbc_enc_tv_template[] = {
5075 +static const struct cipher_testvec serpent_cbc_enc_tv_template[] = {
5076         { /* Generated with Crypto++ */
5077                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5078                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5079 @@ -11469,7 +11629,7 @@ static struct cipher_testvec serpent_cbc_enc_tv_template[] = {
5080         },
5081  };
5082  
5083 -static struct cipher_testvec serpent_cbc_dec_tv_template[] = {
5084 +static const struct cipher_testvec serpent_cbc_dec_tv_template[] = {
5085         { /* Generated with Crypto++ */
5086                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5087                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5088 @@ -11610,7 +11770,7 @@ static struct cipher_testvec serpent_cbc_dec_tv_template[] = {
5089         },
5090  };
5091  
5092 -static struct cipher_testvec serpent_ctr_enc_tv_template[] = {
5093 +static const struct cipher_testvec serpent_ctr_enc_tv_template[] = {
5094         { /* Generated with Crypto++ */
5095                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5096                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5097 @@ -12021,7 +12181,7 @@ static struct cipher_testvec serpent_ctr_enc_tv_template[] = {
5098         },
5099  };
5100  
5101 -static struct cipher_testvec serpent_ctr_dec_tv_template[] = {
5102 +static const struct cipher_testvec serpent_ctr_dec_tv_template[] = {
5103         { /* Generated with Crypto++ */
5104                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5105                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5106 @@ -12432,7 +12592,7 @@ static struct cipher_testvec serpent_ctr_dec_tv_template[] = {
5107         },
5108  };
5109  
5110 -static struct cipher_testvec serpent_lrw_enc_tv_template[] = {
5111 +static const struct cipher_testvec serpent_lrw_enc_tv_template[] = {
5112         /* Generated from AES-LRW test vectors */
5113         {
5114                 .key    = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
5115 @@ -12684,7 +12844,7 @@ static struct cipher_testvec serpent_lrw_enc_tv_template[] = {
5116         },
5117  };
5118  
5119 -static struct cipher_testvec serpent_lrw_dec_tv_template[] = {
5120 +static const struct cipher_testvec serpent_lrw_dec_tv_template[] = {
5121         /* Generated from AES-LRW test vectors */
5122         /* same as enc vectors with input and result reversed */
5123         {
5124 @@ -12937,7 +13097,7 @@ static struct cipher_testvec serpent_lrw_dec_tv_template[] = {
5125         },
5126  };
5127  
5128 -static struct cipher_testvec serpent_xts_enc_tv_template[] = {
5129 +static const struct cipher_testvec serpent_xts_enc_tv_template[] = {
5130         /* Generated from AES-XTS test vectors */
5131         {
5132                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00"
5133 @@ -13279,7 +13439,7 @@ static struct cipher_testvec serpent_xts_enc_tv_template[] = {
5134         },
5135  };
5136  
5137 -static struct cipher_testvec serpent_xts_dec_tv_template[] = {
5138 +static const struct cipher_testvec serpent_xts_dec_tv_template[] = {
5139         /* Generated from AES-XTS test vectors */
5140         /* same as enc vectors with input and result reversed */
5141         {
5142 @@ -13623,18 +13783,7 @@ static struct cipher_testvec serpent_xts_dec_tv_template[] = {
5143  };
5144  
5145  /* Cast6 test vectors from RFC 2612 */
5146 -#define CAST6_ENC_TEST_VECTORS         4
5147 -#define CAST6_DEC_TEST_VECTORS         4
5148 -#define CAST6_CBC_ENC_TEST_VECTORS     1
5149 -#define CAST6_CBC_DEC_TEST_VECTORS     1
5150 -#define CAST6_CTR_ENC_TEST_VECTORS     2
5151 -#define CAST6_CTR_DEC_TEST_VECTORS     2
5152 -#define CAST6_LRW_ENC_TEST_VECTORS     1
5153 -#define CAST6_LRW_DEC_TEST_VECTORS     1
5154 -#define CAST6_XTS_ENC_TEST_VECTORS     1
5155 -#define CAST6_XTS_DEC_TEST_VECTORS     1
5156 -
5157 -static struct cipher_testvec cast6_enc_tv_template[] = {
5158 +static const struct cipher_testvec cast6_enc_tv_template[] = {
5159         {
5160                 .key    = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
5161                           "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d",
5162 @@ -13805,7 +13954,7 @@ static struct cipher_testvec cast6_enc_tv_template[] = {
5163         },
5164  };
5165  
5166 -static struct cipher_testvec cast6_dec_tv_template[] = {
5167 +static const struct cipher_testvec cast6_dec_tv_template[] = {
5168         {
5169                 .key    = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
5170                           "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d",
5171 @@ -13976,7 +14125,7 @@ static struct cipher_testvec cast6_dec_tv_template[] = {
5172         },
5173  };
5174  
5175 -static struct cipher_testvec cast6_cbc_enc_tv_template[] = {
5176 +static const struct cipher_testvec cast6_cbc_enc_tv_template[] = {
5177         { /* Generated from TF test vectors */
5178                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5179                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5180 @@ -14117,7 +14266,7 @@ static struct cipher_testvec cast6_cbc_enc_tv_template[] = {
5181         },
5182  };
5183  
5184 -static struct cipher_testvec cast6_cbc_dec_tv_template[] = {
5185 +static const struct cipher_testvec cast6_cbc_dec_tv_template[] = {
5186         { /* Generated from TF test vectors */
5187                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5188                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5189 @@ -14258,7 +14407,7 @@ static struct cipher_testvec cast6_cbc_dec_tv_template[] = {
5190         },
5191  };
5192  
5193 -static struct cipher_testvec cast6_ctr_enc_tv_template[] = {
5194 +static const struct cipher_testvec cast6_ctr_enc_tv_template[] = {
5195         { /* Generated from TF test vectors */
5196                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5197                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5198 @@ -14415,7 +14564,7 @@ static struct cipher_testvec cast6_ctr_enc_tv_template[] = {
5199         },
5200  };
5201  
5202 -static struct cipher_testvec cast6_ctr_dec_tv_template[] = {
5203 +static const struct cipher_testvec cast6_ctr_dec_tv_template[] = {
5204         { /* Generated from TF test vectors */
5205                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5206                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5207 @@ -14572,7 +14721,7 @@ static struct cipher_testvec cast6_ctr_dec_tv_template[] = {
5208         },
5209  };
5210  
5211 -static struct cipher_testvec cast6_lrw_enc_tv_template[] = {
5212 +static const struct cipher_testvec cast6_lrw_enc_tv_template[] = {
5213         { /* Generated from TF test vectors */
5214                 .key    = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
5215                           "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
5216 @@ -14719,7 +14868,7 @@ static struct cipher_testvec cast6_lrw_enc_tv_template[] = {
5217         },
5218  };
5219  
5220 -static struct cipher_testvec cast6_lrw_dec_tv_template[] = {
5221 +static const struct cipher_testvec cast6_lrw_dec_tv_template[] = {
5222         { /* Generated from TF test vectors */
5223                 .key    = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
5224                           "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
5225 @@ -14866,7 +15015,7 @@ static struct cipher_testvec cast6_lrw_dec_tv_template[] = {
5226         },
5227  };
5228  
5229 -static struct cipher_testvec cast6_xts_enc_tv_template[] = {
5230 +static const struct cipher_testvec cast6_xts_enc_tv_template[] = {
5231         { /* Generated from TF test vectors */
5232                 .key    = "\x27\x18\x28\x18\x28\x45\x90\x45"
5233                           "\x23\x53\x60\x28\x74\x71\x35\x26"
5234 @@ -15015,7 +15164,7 @@ static struct cipher_testvec cast6_xts_enc_tv_template[] = {
5235         },
5236  };
5237  
5238 -static struct cipher_testvec cast6_xts_dec_tv_template[] = {
5239 +static const struct cipher_testvec cast6_xts_dec_tv_template[] = {
5240         { /* Generated from TF test vectors */
5241                 .key    = "\x27\x18\x28\x18\x28\x45\x90\x45"
5242                           "\x23\x53\x60\x28\x74\x71\x35\x26"
5243 @@ -15168,39 +15317,7 @@ static struct cipher_testvec cast6_xts_dec_tv_template[] = {
5244  /*
5245   * AES test vectors.
5246   */
5247 -#define AES_ENC_TEST_VECTORS 4
5248 -#define AES_DEC_TEST_VECTORS 4
5249 -#define AES_CBC_ENC_TEST_VECTORS 5
5250 -#define AES_CBC_DEC_TEST_VECTORS 5
5251 -#define HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS 2
5252 -#define HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS 2
5253 -#define HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC 2
5254 -#define HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC 2
5255 -#define HMAC_SHA1_AES_CBC_ENC_TEST_VEC 7
5256 -#define HMAC_SHA256_AES_CBC_ENC_TEST_VEC 7
5257 -#define HMAC_SHA512_AES_CBC_ENC_TEST_VEC 7
5258 -#define AES_LRW_ENC_TEST_VECTORS 8
5259 -#define AES_LRW_DEC_TEST_VECTORS 8
5260 -#define AES_XTS_ENC_TEST_VECTORS 5
5261 -#define AES_XTS_DEC_TEST_VECTORS 5
5262 -#define AES_CTR_ENC_TEST_VECTORS 5
5263 -#define AES_CTR_DEC_TEST_VECTORS 5
5264 -#define AES_OFB_ENC_TEST_VECTORS 1
5265 -#define AES_OFB_DEC_TEST_VECTORS 1
5266 -#define AES_CTR_3686_ENC_TEST_VECTORS 7
5267 -#define AES_CTR_3686_DEC_TEST_VECTORS 6
5268 -#define AES_GCM_ENC_TEST_VECTORS 9
5269 -#define AES_GCM_DEC_TEST_VECTORS 8
5270 -#define AES_GCM_4106_ENC_TEST_VECTORS 23
5271 -#define AES_GCM_4106_DEC_TEST_VECTORS 23
5272 -#define AES_GCM_4543_ENC_TEST_VECTORS 1
5273 -#define AES_GCM_4543_DEC_TEST_VECTORS 2
5274 -#define AES_CCM_ENC_TEST_VECTORS 8
5275 -#define AES_CCM_DEC_TEST_VECTORS 7
5276 -#define AES_CCM_4309_ENC_TEST_VECTORS 7
5277 -#define AES_CCM_4309_DEC_TEST_VECTORS 10
5278 -
5279 -static struct cipher_testvec aes_enc_tv_template[] = {
5280 +static const struct cipher_testvec aes_enc_tv_template[] = {
5281         { /* From FIPS-197 */
5282                 .key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
5283                           "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5284 @@ -15372,7 +15489,7 @@ static struct cipher_testvec aes_enc_tv_template[] = {
5285         },
5286  };
5287  
5288 -static struct cipher_testvec aes_dec_tv_template[] = {
5289 +static const struct cipher_testvec aes_dec_tv_template[] = {
5290         { /* From FIPS-197 */
5291                 .key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
5292                           "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5293 @@ -15544,7 +15661,7 @@ static struct cipher_testvec aes_dec_tv_template[] = {
5294         },
5295  };
5296  
5297 -static struct cipher_testvec aes_cbc_enc_tv_template[] = {
5298 +static const struct cipher_testvec aes_cbc_enc_tv_template[] = {
5299         { /* From RFC 3602 */
5300                 .key    = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
5301                           "\x51\x2e\x03\xd5\x34\x12\x00\x06",
5302 @@ -15766,7 +15883,7 @@ static struct cipher_testvec aes_cbc_enc_tv_template[] = {
5303         },
5304  };
5305  
5306 -static struct cipher_testvec aes_cbc_dec_tv_template[] = {
5307 +static const struct cipher_testvec aes_cbc_dec_tv_template[] = {
5308         { /* From RFC 3602 */
5309                 .key    = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
5310                           "\x51\x2e\x03\xd5\x34\x12\x00\x06",
5311 @@ -15988,7 +16105,7 @@ static struct cipher_testvec aes_cbc_dec_tv_template[] = {
5312         },
5313  };
5314  
5315 -static struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
5316 +static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
5317         { /* Input data from RFC 2410 Case 1 */
5318  #ifdef __LITTLE_ENDIAN
5319                 .key    = "\x08\x00"            /* rta length */
5320 @@ -16030,7 +16147,7 @@ static struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
5321         },
5322  };
5323  
5324 -static struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
5325 +static const struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
5326         {
5327  #ifdef __LITTLE_ENDIAN
5328                 .key    = "\x08\x00"            /* rta length */
5329 @@ -16072,7 +16189,7 @@ static struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
5330         },
5331  };
5332  
5333 -static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
5334 +static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
5335         { /* RFC 3602 Case 1 */
5336  #ifdef __LITTLE_ENDIAN
5337                 .key    = "\x08\x00"            /* rta length */
5338 @@ -16341,7 +16458,7 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
5339         },
5340  };
5341  
5342 -static struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
5343 +static const struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
5344         { /* Input data from RFC 2410 Case 1 */
5345  #ifdef __LITTLE_ENDIAN
5346                 .key    = "\x08\x00"            /* rta length */
5347 @@ -16387,7 +16504,7 @@ static struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
5348         },
5349  };
5350  
5351 -static struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = {
5352 +static const struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = {
5353         {
5354  #ifdef __LITTLE_ENDIAN
5355                 .key    = "\x08\x00"            /* rta length */
5356 @@ -16433,7 +16550,7 @@ static struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = {
5357         },
5358  };
5359  
5360 -static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
5361 +static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
5362         { /* RFC 3602 Case 1 */
5363  #ifdef __LITTLE_ENDIAN
5364                 .key    = "\x08\x00"            /* rta length */
5365 @@ -16716,7 +16833,7 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
5366         },
5367  };
5368  
5369 -static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
5370 +static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
5371         { /* RFC 3602 Case 1 */
5372  #ifdef __LITTLE_ENDIAN
5373                 .key    = "\x08\x00"            /* rta length */
5374 @@ -17055,9 +17172,7 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
5375         },
5376  };
5377  
5378 -#define HMAC_SHA1_DES_CBC_ENC_TEST_VEC 1
5379 -
5380 -static struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
5381 +static const struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
5382         { /*Generated with cryptopp*/
5383  #ifdef __LITTLE_ENDIAN
5384                 .key    = "\x08\x00"            /* rta length */
5385 @@ -17116,9 +17231,7 @@ static struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
5386         },
5387  };
5388  
5389 -#define HMAC_SHA224_DES_CBC_ENC_TEST_VEC       1
5390 -
5391 -static struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
5392 +static const struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
5393         { /*Generated with cryptopp*/
5394  #ifdef __LITTLE_ENDIAN
5395                 .key    = "\x08\x00"            /* rta length */
5396 @@ -17177,9 +17290,7 @@ static struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
5397         },
5398  };
5399  
5400 -#define HMAC_SHA256_DES_CBC_ENC_TEST_VEC       1
5401 -
5402 -static struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
5403 +static const struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
5404         { /*Generated with cryptopp*/
5405  #ifdef __LITTLE_ENDIAN
5406                 .key    = "\x08\x00"            /* rta length */
5407 @@ -17240,9 +17351,7 @@ static struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
5408         },
5409  };
5410  
5411 -#define HMAC_SHA384_DES_CBC_ENC_TEST_VEC       1
5412 -
5413 -static struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
5414 +static const struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
5415         { /*Generated with cryptopp*/
5416  #ifdef __LITTLE_ENDIAN
5417                 .key    = "\x08\x00"            /* rta length */
5418 @@ -17307,9 +17416,7 @@ static struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
5419         },
5420  };
5421  
5422 -#define HMAC_SHA512_DES_CBC_ENC_TEST_VEC       1
5423 -
5424 -static struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
5425 +static const struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
5426         { /*Generated with cryptopp*/
5427  #ifdef __LITTLE_ENDIAN
5428                 .key    = "\x08\x00"            /* rta length */
5429 @@ -17378,9 +17485,7 @@ static struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
5430         },
5431  };
5432  
5433 -#define HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC    1
5434 -
5435 -static struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
5436 +static const struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
5437         { /*Generated with cryptopp*/
5438  #ifdef __LITTLE_ENDIAN
5439                 .key    = "\x08\x00"            /* rta length */
5440 @@ -17441,9 +17546,7 @@ static struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
5441         },
5442  };
5443  
5444 -#define HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC  1
5445 -
5446 -static struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
5447 +static const struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
5448         { /*Generated with cryptopp*/
5449  #ifdef __LITTLE_ENDIAN
5450                 .key    = "\x08\x00"            /* rta length */
5451 @@ -17504,9 +17607,7 @@ static struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
5452         },
5453  };
5454  
5455 -#define HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC  1
5456 -
5457 -static struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
5458 +static const struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
5459         { /*Generated with cryptopp*/
5460  #ifdef __LITTLE_ENDIAN
5461                 .key    = "\x08\x00"            /* rta length */
5462 @@ -17569,9 +17670,7 @@ static struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
5463         },
5464  };
5465  
5466 -#define HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC  1
5467 -
5468 -static struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
5469 +static const struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
5470         { /*Generated with cryptopp*/
5471  #ifdef __LITTLE_ENDIAN
5472                 .key    = "\x08\x00"            /* rta length */
5473 @@ -17638,9 +17737,7 @@ static struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
5474         },
5475  };
5476  
5477 -#define HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC  1
5478 -
5479 -static struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
5480 +static const struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
5481         { /*Generated with cryptopp*/
5482  #ifdef __LITTLE_ENDIAN
5483                 .key    = "\x08\x00"            /* rta length */
5484 @@ -17711,7 +17808,7 @@ static struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
5485         },
5486  };
5487  
5488 -static struct cipher_testvec aes_lrw_enc_tv_template[] = {
5489 +static const struct cipher_testvec aes_lrw_enc_tv_template[] = {
5490         /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
5491         { /* LRW-32-AES 1 */
5492                 .key    = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
5493 @@ -17964,7 +18061,7 @@ static struct cipher_testvec aes_lrw_enc_tv_template[] = {
5494         }
5495  };
5496  
5497 -static struct cipher_testvec aes_lrw_dec_tv_template[] = {
5498 +static const struct cipher_testvec aes_lrw_dec_tv_template[] = {
5499         /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
5500         /* same as enc vectors with input and result reversed */
5501         { /* LRW-32-AES 1 */
5502 @@ -18218,7 +18315,7 @@ static struct cipher_testvec aes_lrw_dec_tv_template[] = {
5503         }
5504  };
5505  
5506 -static struct cipher_testvec aes_xts_enc_tv_template[] = {
5507 +static const struct cipher_testvec aes_xts_enc_tv_template[] = {
5508         /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */
5509         { /* XTS-AES 1 */
5510                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00"
5511 @@ -18561,7 +18658,7 @@ static struct cipher_testvec aes_xts_enc_tv_template[] = {
5512         }
5513  };
5514  
5515 -static struct cipher_testvec aes_xts_dec_tv_template[] = {
5516 +static const struct cipher_testvec aes_xts_dec_tv_template[] = {
5517         /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */
5518         { /* XTS-AES 1 */
5519                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00"
5520 @@ -18905,7 +19002,7 @@ static struct cipher_testvec aes_xts_dec_tv_template[] = {
5521  };
5522  
5523  
5524 -static struct cipher_testvec aes_ctr_enc_tv_template[] = {
5525 +static const struct cipher_testvec aes_ctr_enc_tv_template[] = {
5526         { /* From NIST Special Publication 800-38A, Appendix F.5 */
5527                 .key    = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5528                           "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
5529 @@ -19260,7 +19357,7 @@ static struct cipher_testvec aes_ctr_enc_tv_template[] = {
5530         },
5531  };
5532  
5533 -static struct cipher_testvec aes_ctr_dec_tv_template[] = {
5534 +static const struct cipher_testvec aes_ctr_dec_tv_template[] = {
5535         { /* From NIST Special Publication 800-38A, Appendix F.5 */
5536                 .key    = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5537                           "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
5538 @@ -19615,7 +19712,7 @@ static struct cipher_testvec aes_ctr_dec_tv_template[] = {
5539         },
5540  };
5541  
5542 -static struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
5543 +static const struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
5544         { /* From RFC 3686 */
5545                 .key    = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
5546                           "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
5547 @@ -20747,7 +20844,7 @@ static struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
5548         },
5549  };
5550  
5551 -static struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
5552 +static const struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
5553         { /* From RFC 3686 */
5554                 .key    = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
5555                           "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
5556 @@ -20838,7 +20935,7 @@ static struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
5557         },
5558  };
5559  
5560 -static struct cipher_testvec aes_ofb_enc_tv_template[] = {
5561 +static const struct cipher_testvec aes_ofb_enc_tv_template[] = {
5562          /* From NIST Special Publication 800-38A, Appendix F.5 */
5563         {
5564                 .key    = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5565 @@ -20867,7 +20964,7 @@ static struct cipher_testvec aes_ofb_enc_tv_template[] = {
5566         }
5567  };
5568  
5569 -static struct cipher_testvec aes_ofb_dec_tv_template[] = {
5570 +static const struct cipher_testvec aes_ofb_dec_tv_template[] = {
5571          /* From NIST Special Publication 800-38A, Appendix F.5 */
5572         {
5573                 .key    = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5574 @@ -20896,7 +20993,7 @@ static struct cipher_testvec aes_ofb_dec_tv_template[] = {
5575         }
5576  };
5577  
5578 -static struct aead_testvec aes_gcm_enc_tv_template[] = {
5579 +static const struct aead_testvec aes_gcm_enc_tv_template[] = {
5580         { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
5581                 .key    = zeroed_string,
5582                 .klen   = 16,
5583 @@ -21056,7 +21153,7 @@ static struct aead_testvec aes_gcm_enc_tv_template[] = {
5584         }
5585  };
5586  
5587 -static struct aead_testvec aes_gcm_dec_tv_template[] = {
5588 +static const struct aead_testvec aes_gcm_dec_tv_template[] = {
5589         { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
5590                 .key    = zeroed_string,
5591                 .klen   = 32,
5592 @@ -21258,7 +21355,7 @@ static struct aead_testvec aes_gcm_dec_tv_template[] = {
5593         }
5594  };
5595  
5596 -static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
5597 +static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
5598         { /* Generated using Crypto++ */
5599                 .key    = zeroed_string,
5600                 .klen   = 20,
5601 @@ -21871,7 +21968,7 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
5602         }
5603  };
5604  
5605 -static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
5606 +static const struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
5607         { /* Generated using Crypto++ */
5608                 .key    = zeroed_string,
5609                 .klen   = 20,
5610 @@ -22485,7 +22582,7 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
5611         }
5612  };
5613  
5614 -static struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
5615 +static const struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
5616         { /* From draft-mcgrew-gcm-test-01 */
5617                 .key    = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda"
5618                           "\x90\x6a\xc7\x3c\x36\x13\xa6\x34"
5619 @@ -22516,7 +22613,7 @@ static struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
5620         }
5621  };
5622  
5623 -static struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
5624 +static const struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
5625         { /* From draft-mcgrew-gcm-test-01 */
5626                 .key    = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda"
5627                           "\x90\x6a\xc7\x3c\x36\x13\xa6\x34"
5628 @@ -22575,7 +22672,7 @@ static struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
5629         },
5630  };
5631  
5632 -static struct aead_testvec aes_ccm_enc_tv_template[] = {
5633 +static const struct aead_testvec aes_ccm_enc_tv_template[] = {
5634         { /* From RFC 3610 */
5635                 .key    = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5636                           "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5637 @@ -22859,7 +22956,7 @@ static struct aead_testvec aes_ccm_enc_tv_template[] = {
5638         }
5639  };
5640  
5641 -static struct aead_testvec aes_ccm_dec_tv_template[] = {
5642 +static const struct aead_testvec aes_ccm_dec_tv_template[] = {
5643         { /* From RFC 3610 */
5644                 .key    = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5645                           "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5646 @@ -23191,7 +23288,7 @@ static struct aead_testvec aes_ccm_dec_tv_template[] = {
5647   * These vectors are copied/generated from the ones for rfc4106 with
5648   * the key truncated by one byte..
5649   */
5650 -static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
5651 +static const struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
5652         { /* Generated using Crypto++ */
5653                 .key    = zeroed_string,
5654                 .klen   = 19,
5655 @@ -23804,7 +23901,7 @@ static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
5656         }
5657  };
5658  
5659 -static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[]   = {
5660 +static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[]     = {
5661         { /* Generated using Crypto++ */
5662                 .key    = zeroed_string,
5663                 .klen   = 19,
5664 @@ -24420,9 +24517,7 @@ static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[]    = {
5665  /*
5666   * ChaCha20-Poly1305 AEAD test vectors from RFC7539 2.8.2./A.5.
5667   */
5668 -#define RFC7539_ENC_TEST_VECTORS 2
5669 -#define RFC7539_DEC_TEST_VECTORS 2
5670 -static struct aead_testvec rfc7539_enc_tv_template[] = {
5671 +static const struct aead_testvec rfc7539_enc_tv_template[] = {
5672         {
5673                 .key    = "\x80\x81\x82\x83\x84\x85\x86\x87"
5674                           "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
5675 @@ -24554,7 +24649,7 @@ static struct aead_testvec rfc7539_enc_tv_template[] = {
5676         },
5677  };
5678  
5679 -static struct aead_testvec rfc7539_dec_tv_template[] = {
5680 +static const struct aead_testvec rfc7539_dec_tv_template[] = {
5681         {
5682                 .key    = "\x80\x81\x82\x83\x84\x85\x86\x87"
5683                           "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
5684 @@ -24689,9 +24784,7 @@ static struct aead_testvec rfc7539_dec_tv_template[] = {
5685  /*
5686   * draft-irtf-cfrg-chacha20-poly1305
5687   */
5688 -#define RFC7539ESP_DEC_TEST_VECTORS 1
5689 -#define RFC7539ESP_ENC_TEST_VECTORS 1
5690 -static struct aead_testvec rfc7539esp_enc_tv_template[] = {
5691 +static const struct aead_testvec rfc7539esp_enc_tv_template[] = {
5692         {
5693                 .key    = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
5694                           "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
5695 @@ -24779,7 +24872,7 @@ static struct aead_testvec rfc7539esp_enc_tv_template[] = {
5696         },
5697  };
5698  
5699 -static struct aead_testvec rfc7539esp_dec_tv_template[] = {
5700 +static const struct aead_testvec rfc7539esp_dec_tv_template[] = {
5701         {
5702                 .key    = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
5703                           "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
5704 @@ -24875,7 +24968,7 @@ static struct aead_testvec rfc7539esp_dec_tv_template[] = {
5705   * semiblock of the ciphertext from the test vector. For decryption, iv is
5706   * the first semiblock of the ciphertext.
5707   */
5708 -static struct cipher_testvec aes_kw_enc_tv_template[] = {
5709 +static const struct cipher_testvec aes_kw_enc_tv_template[] = {
5710         {
5711                 .key    = "\x75\x75\xda\x3a\x93\x60\x7c\xc2"
5712                           "\xbf\xd8\xce\xc7\xaa\xdf\xd9\xa6",
5713 @@ -24890,7 +24983,7 @@ static struct cipher_testvec aes_kw_enc_tv_template[] = {
5714         },
5715  };
5716  
5717 -static struct cipher_testvec aes_kw_dec_tv_template[] = {
5718 +static const struct cipher_testvec aes_kw_dec_tv_template[] = {
5719         {
5720                 .key    = "\x80\xaa\x99\x73\x27\xa4\x80\x6b"
5721                           "\x6a\x7a\x41\xa5\x2b\x86\xc3\x71"
5722 @@ -24913,9 +25006,7 @@ static struct cipher_testvec aes_kw_dec_tv_template[] = {
5723   *     http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf
5724   * Only AES-128 is supported at this time.
5725   */
5726 -#define ANSI_CPRNG_AES_TEST_VECTORS    6
5727 -
5728 -static struct cprng_testvec ansi_cprng_aes_tv_template[] = {
5729 +static const struct cprng_testvec ansi_cprng_aes_tv_template[] = {
5730         {
5731                 .key    = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
5732                           "\xed\x06\x1c\xab\xb8\xd4\x62\x02",
5733 @@ -25011,7 +25102,7 @@ static struct cprng_testvec ansi_cprng_aes_tv_template[] = {
5734   * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
5735   * w/o personalization string, w/ and w/o additional input string).
5736   */
5737 -static struct drbg_testvec drbg_pr_sha256_tv_template[] = {
5738 +static const struct drbg_testvec drbg_pr_sha256_tv_template[] = {
5739         {
5740                 .entropy = (unsigned char *)
5741                         "\x72\x88\x4c\xcd\x6c\x85\x57\x70\xf7\x0b\x8b\x86"
5742 @@ -25169,7 +25260,7 @@ static struct drbg_testvec drbg_pr_sha256_tv_template[] = {
5743         },
5744  };
5745  
5746 -static struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
5747 +static const struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
5748         {
5749                 .entropy = (unsigned char *)
5750                         "\x99\x69\xe5\x4b\x47\x03\xff\x31\x78\x5b\x87\x9a"
5751 @@ -25327,7 +25418,7 @@ static struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
5752         },
5753  };
5754  
5755 -static struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
5756 +static const struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
5757         {
5758                 .entropy = (unsigned char *)
5759                         "\xd1\x44\xc6\x61\x81\x6d\xca\x9d\x15\x28\x8a\x42"
5760 @@ -25451,7 +25542,7 @@ static struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
5761   * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
5762   * w/o personalization string, w/ and w/o additional input string).
5763   */
5764 -static struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
5765 +static const struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
5766         {
5767                 .entropy = (unsigned char *)
5768                         "\xa6\x5a\xd0\xf3\x45\xdb\x4e\x0e\xff\xe8\x75\xc3"
5769 @@ -25573,7 +25664,7 @@ static struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
5770         },
5771  };
5772  
5773 -static struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
5774 +static const struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
5775         {
5776                 .entropy = (unsigned char *)
5777                         "\xca\x85\x19\x11\x34\x93\x84\xbf\xfe\x89\xde\x1c"
5778 @@ -25695,7 +25786,7 @@ static struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
5779         },
5780  };
5781  
5782 -static struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
5783 +static const struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
5784         {
5785                 .entropy = (unsigned char *)
5786                         "\xc3\x5c\x2f\xa2\xa8\x9d\x52\xa1\x1f\xa3\x2a\xa9"
5787 @@ -25719,7 +25810,7 @@ static struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
5788         },
5789  };
5790  
5791 -static struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
5792 +static const struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
5793         {
5794                 .entropy = (unsigned char *)
5795                         "\x36\x40\x19\x40\xfa\x8b\x1f\xba\x91\xa1\x66\x1f"
5796 @@ -25743,7 +25834,7 @@ static struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
5797         },
5798  };
5799  
5800 -static struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
5801 +static const struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
5802         {
5803                 .entropy = (unsigned char *)
5804                         "\x87\xe1\xc5\x32\x99\x7f\x57\xa3\x5c\x28\x6d\xe8"
5805 @@ -25832,14 +25923,7 @@ static struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
5806  };
5807  
5808  /* Cast5 test vectors from RFC 2144 */
5809 -#define CAST5_ENC_TEST_VECTORS         4
5810 -#define CAST5_DEC_TEST_VECTORS         4
5811 -#define CAST5_CBC_ENC_TEST_VECTORS     1
5812 -#define CAST5_CBC_DEC_TEST_VECTORS     1
5813 -#define CAST5_CTR_ENC_TEST_VECTORS     2
5814 -#define CAST5_CTR_DEC_TEST_VECTORS     2
5815 -
5816 -static struct cipher_testvec cast5_enc_tv_template[] = {
5817 +static const struct cipher_testvec cast5_enc_tv_template[] = {
5818         {
5819                 .key    = "\x01\x23\x45\x67\x12\x34\x56\x78"
5820                           "\x23\x45\x67\x89\x34\x56\x78\x9a",
5821 @@ -26000,7 +26084,7 @@ static struct cipher_testvec cast5_enc_tv_template[] = {
5822         },
5823  };
5824  
5825 -static struct cipher_testvec cast5_dec_tv_template[] = {
5826 +static const struct cipher_testvec cast5_dec_tv_template[] = {
5827         {
5828                 .key    = "\x01\x23\x45\x67\x12\x34\x56\x78"
5829                           "\x23\x45\x67\x89\x34\x56\x78\x9a",
5830 @@ -26161,7 +26245,7 @@ static struct cipher_testvec cast5_dec_tv_template[] = {
5831         },
5832  };
5833  
5834 -static struct cipher_testvec cast5_cbc_enc_tv_template[] = {
5835 +static const struct cipher_testvec cast5_cbc_enc_tv_template[] = {
5836         { /* Generated from TF test vectors */
5837                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5838                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5839 @@ -26299,7 +26383,7 @@ static struct cipher_testvec cast5_cbc_enc_tv_template[] = {
5840         },
5841  };
5842  
5843 -static struct cipher_testvec cast5_cbc_dec_tv_template[] = {
5844 +static const struct cipher_testvec cast5_cbc_dec_tv_template[] = {
5845         { /* Generated from TF test vectors */
5846                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5847                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5848 @@ -26437,7 +26521,7 @@ static struct cipher_testvec cast5_cbc_dec_tv_template[] = {
5849         },
5850  };
5851  
5852 -static struct cipher_testvec cast5_ctr_enc_tv_template[] = {
5853 +static const struct cipher_testvec cast5_ctr_enc_tv_template[] = {
5854         { /* Generated from TF test vectors */
5855                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5856                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5857 @@ -26588,7 +26672,7 @@ static struct cipher_testvec cast5_ctr_enc_tv_template[] = {
5858         },
5859  };
5860  
5861 -static struct cipher_testvec cast5_ctr_dec_tv_template[] = {
5862 +static const struct cipher_testvec cast5_ctr_dec_tv_template[] = {
5863         { /* Generated from TF test vectors */
5864                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5865                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5866 @@ -26742,10 +26826,7 @@ static struct cipher_testvec cast5_ctr_dec_tv_template[] = {
5867  /*
5868   * ARC4 test vectors from OpenSSL
5869   */
5870 -#define ARC4_ENC_TEST_VECTORS  7
5871 -#define ARC4_DEC_TEST_VECTORS  7
5872 -
5873 -static struct cipher_testvec arc4_enc_tv_template[] = {
5874 +static const struct cipher_testvec arc4_enc_tv_template[] = {
5875         {
5876                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5877                 .klen   = 8,
5878 @@ -26811,7 +26892,7 @@ static struct cipher_testvec arc4_enc_tv_template[] = {
5879         },
5880  };
5881  
5882 -static struct cipher_testvec arc4_dec_tv_template[] = {
5883 +static const struct cipher_testvec arc4_dec_tv_template[] = {
5884         {
5885                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5886                 .klen   = 8,
5887 @@ -26880,10 +26961,7 @@ static struct cipher_testvec arc4_dec_tv_template[] = {
5888  /*
5889   * TEA test vectors
5890   */
5891 -#define TEA_ENC_TEST_VECTORS   4
5892 -#define TEA_DEC_TEST_VECTORS   4
5893 -
5894 -static struct cipher_testvec tea_enc_tv_template[] = {
5895 +static const struct cipher_testvec tea_enc_tv_template[] = {
5896         {
5897                 .key    = zeroed_string,
5898                 .klen   = 16,
5899 @@ -26926,7 +27004,7 @@ static struct cipher_testvec tea_enc_tv_template[] = {
5900         }
5901  };
5902  
5903 -static struct cipher_testvec tea_dec_tv_template[] = {
5904 +static const struct cipher_testvec tea_dec_tv_template[] = {
5905         {
5906                 .key    = zeroed_string,
5907                 .klen   = 16,
5908 @@ -26972,10 +27050,7 @@ static struct cipher_testvec tea_dec_tv_template[] = {
5909  /*
5910   * XTEA test vectors
5911   */
5912 -#define XTEA_ENC_TEST_VECTORS  4
5913 -#define XTEA_DEC_TEST_VECTORS  4
5914 -
5915 -static struct cipher_testvec xtea_enc_tv_template[] = {
5916 +static const struct cipher_testvec xtea_enc_tv_template[] = {
5917         {
5918                 .key    = zeroed_string,
5919                 .klen   = 16,
5920 @@ -27018,7 +27093,7 @@ static struct cipher_testvec xtea_enc_tv_template[] = {
5921         }
5922  };
5923  
5924 -static struct cipher_testvec xtea_dec_tv_template[] = {
5925 +static const struct cipher_testvec xtea_dec_tv_template[] = {
5926         {
5927                 .key    = zeroed_string,
5928                 .klen   = 16,
5929 @@ -27064,10 +27139,7 @@ static struct cipher_testvec xtea_dec_tv_template[] = {
5930  /*
5931   * KHAZAD test vectors.
5932   */
5933 -#define KHAZAD_ENC_TEST_VECTORS 5
5934 -#define KHAZAD_DEC_TEST_VECTORS 5
5935 -
5936 -static struct cipher_testvec khazad_enc_tv_template[] = {
5937 +static const struct cipher_testvec khazad_enc_tv_template[] = {
5938         {
5939                 .key    = "\x80\x00\x00\x00\x00\x00\x00\x00"
5940                           "\x00\x00\x00\x00\x00\x00\x00\x00",
5941 @@ -27113,7 +27185,7 @@ static struct cipher_testvec khazad_enc_tv_template[] = {
5942         },
5943  };
5944  
5945 -static struct cipher_testvec khazad_dec_tv_template[] = {
5946 +static const struct cipher_testvec khazad_dec_tv_template[] = {
5947         {
5948                 .key    = "\x80\x00\x00\x00\x00\x00\x00\x00"
5949                           "\x00\x00\x00\x00\x00\x00\x00\x00",
5950 @@ -27163,12 +27235,7 @@ static struct cipher_testvec khazad_dec_tv_template[] = {
5951   * Anubis test vectors.
5952   */
5953  
5954 -#define ANUBIS_ENC_TEST_VECTORS                        5
5955 -#define ANUBIS_DEC_TEST_VECTORS                        5
5956 -#define ANUBIS_CBC_ENC_TEST_VECTORS            2
5957 -#define ANUBIS_CBC_DEC_TEST_VECTORS            2
5958 -
5959 -static struct cipher_testvec anubis_enc_tv_template[] = {
5960 +static const struct cipher_testvec anubis_enc_tv_template[] = {
5961         {
5962                 .key    = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5963                           "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5964 @@ -27231,7 +27298,7 @@ static struct cipher_testvec anubis_enc_tv_template[] = {
5965         },
5966  };
5967  
5968 -static struct cipher_testvec anubis_dec_tv_template[] = {
5969 +static const struct cipher_testvec anubis_dec_tv_template[] = {
5970         {
5971                 .key    = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5972                           "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5973 @@ -27294,7 +27361,7 @@ static struct cipher_testvec anubis_dec_tv_template[] = {
5974         },
5975  };
5976  
5977 -static struct cipher_testvec anubis_cbc_enc_tv_template[] = {
5978 +static const struct cipher_testvec anubis_cbc_enc_tv_template[] = {
5979         {
5980                 .key    = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5981                           "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5982 @@ -27329,7 +27396,7 @@ static struct cipher_testvec anubis_cbc_enc_tv_template[] = {
5983         },
5984  };
5985  
5986 -static struct cipher_testvec anubis_cbc_dec_tv_template[] = {
5987 +static const struct cipher_testvec anubis_cbc_dec_tv_template[] = {
5988         {
5989                 .key    = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5990                           "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5991 @@ -27367,10 +27434,7 @@ static struct cipher_testvec anubis_cbc_dec_tv_template[] = {
5992  /*
5993   * XETA test vectors
5994   */
5995 -#define XETA_ENC_TEST_VECTORS  4
5996 -#define XETA_DEC_TEST_VECTORS  4
5997 -
5998 -static struct cipher_testvec xeta_enc_tv_template[] = {
5999 +static const struct cipher_testvec xeta_enc_tv_template[] = {
6000         {
6001                 .key    = zeroed_string,
6002                 .klen   = 16,
6003 @@ -27413,7 +27477,7 @@ static struct cipher_testvec xeta_enc_tv_template[] = {
6004         }
6005  };
6006  
6007 -static struct cipher_testvec xeta_dec_tv_template[] = {
6008 +static const struct cipher_testvec xeta_dec_tv_template[] = {
6009         {
6010                 .key    = zeroed_string,
6011                 .klen   = 16,
6012 @@ -27459,10 +27523,7 @@ static struct cipher_testvec xeta_dec_tv_template[] = {
6013  /*
6014   * FCrypt test vectors
6015   */
6016 -#define FCRYPT_ENC_TEST_VECTORS        ARRAY_SIZE(fcrypt_pcbc_enc_tv_template)
6017 -#define FCRYPT_DEC_TEST_VECTORS        ARRAY_SIZE(fcrypt_pcbc_dec_tv_template)
6018 -
6019 -static struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
6020 +static const struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
6021         { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
6022                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00",
6023                 .klen   = 8,
6024 @@ -27523,7 +27584,7 @@ static struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
6025         }
6026  };
6027  
6028 -static struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
6029 +static const struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
6030         { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
6031                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00",
6032                 .klen   = 8,
6033 @@ -27587,18 +27648,7 @@ static struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
6034  /*
6035   * CAMELLIA test vectors.
6036   */
6037 -#define CAMELLIA_ENC_TEST_VECTORS 4
6038 -#define CAMELLIA_DEC_TEST_VECTORS 4
6039 -#define CAMELLIA_CBC_ENC_TEST_VECTORS 3
6040 -#define CAMELLIA_CBC_DEC_TEST_VECTORS 3
6041 -#define CAMELLIA_CTR_ENC_TEST_VECTORS 2
6042 -#define CAMELLIA_CTR_DEC_TEST_VECTORS 2
6043 -#define CAMELLIA_LRW_ENC_TEST_VECTORS 8
6044 -#define CAMELLIA_LRW_DEC_TEST_VECTORS 8
6045 -#define CAMELLIA_XTS_ENC_TEST_VECTORS 5
6046 -#define CAMELLIA_XTS_DEC_TEST_VECTORS 5
6047 -
6048 -static struct cipher_testvec camellia_enc_tv_template[] = {
6049 +static const struct cipher_testvec camellia_enc_tv_template[] = {
6050         {
6051                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6052                           "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6053 @@ -27898,7 +27948,7 @@ static struct cipher_testvec camellia_enc_tv_template[] = {
6054         },
6055  };
6056  
6057 -static struct cipher_testvec camellia_dec_tv_template[] = {
6058 +static const struct cipher_testvec camellia_dec_tv_template[] = {
6059         {
6060                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6061                           "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6062 @@ -28198,7 +28248,7 @@ static struct cipher_testvec camellia_dec_tv_template[] = {
6063         },
6064  };
6065  
6066 -static struct cipher_testvec camellia_cbc_enc_tv_template[] = {
6067 +static const struct cipher_testvec camellia_cbc_enc_tv_template[] = {
6068         {
6069                 .key    = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
6070                           "\x51\x2e\x03\xd5\x34\x12\x00\x06",
6071 @@ -28494,7 +28544,7 @@ static struct cipher_testvec camellia_cbc_enc_tv_template[] = {
6072         },
6073  };
6074  
6075 -static struct cipher_testvec camellia_cbc_dec_tv_template[] = {
6076 +static const struct cipher_testvec camellia_cbc_dec_tv_template[] = {
6077         {
6078                 .key    = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
6079                           "\x51\x2e\x03\xd5\x34\x12\x00\x06",
6080 @@ -28790,7 +28840,7 @@ static struct cipher_testvec camellia_cbc_dec_tv_template[] = {
6081         },
6082  };
6083  
6084 -static struct cipher_testvec camellia_ctr_enc_tv_template[] = {
6085 +static const struct cipher_testvec camellia_ctr_enc_tv_template[] = {
6086         { /* Generated with Crypto++ */
6087                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
6088                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
6089 @@ -29457,7 +29507,7 @@ static struct cipher_testvec camellia_ctr_enc_tv_template[] = {
6090         },
6091  };
6092  
6093 -static struct cipher_testvec camellia_ctr_dec_tv_template[] = {
6094 +static const struct cipher_testvec camellia_ctr_dec_tv_template[] = {
6095         { /* Generated with Crypto++ */
6096                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
6097                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
6098 @@ -30124,7 +30174,7 @@ static struct cipher_testvec camellia_ctr_dec_tv_template[] = {
6099         },
6100  };
6101  
6102 -static struct cipher_testvec camellia_lrw_enc_tv_template[] = {
6103 +static const struct cipher_testvec camellia_lrw_enc_tv_template[] = {
6104         /* Generated from AES-LRW test vectors */
6105         {
6106                 .key    = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
6107 @@ -30376,7 +30426,7 @@ static struct cipher_testvec camellia_lrw_enc_tv_template[] = {
6108         },
6109  };
6110  
6111 -static struct cipher_testvec camellia_lrw_dec_tv_template[] = {
6112 +static const struct cipher_testvec camellia_lrw_dec_tv_template[] = {
6113         /* Generated from AES-LRW test vectors */
6114         /* same as enc vectors with input and result reversed */
6115         {
6116 @@ -30629,7 +30679,7 @@ static struct cipher_testvec camellia_lrw_dec_tv_template[] = {
6117         },
6118  };
6119  
6120 -static struct cipher_testvec camellia_xts_enc_tv_template[] = {
6121 +static const struct cipher_testvec camellia_xts_enc_tv_template[] = {
6122         /* Generated from AES-XTS test vectors */
6123         {
6124                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00"
6125 @@ -30971,7 +31021,7 @@ static struct cipher_testvec camellia_xts_enc_tv_template[] = {
6126         },
6127  };
6128  
6129 -static struct cipher_testvec camellia_xts_dec_tv_template[] = {
6130 +static const struct cipher_testvec camellia_xts_dec_tv_template[] = {
6131         /* Generated from AES-XTS test vectors */
6132         /* same as enc vectors with input and result reversed */
6133         {
6134 @@ -31317,10 +31367,7 @@ static struct cipher_testvec camellia_xts_dec_tv_template[] = {
6135  /*
6136   * SEED test vectors
6137   */
6138 -#define SEED_ENC_TEST_VECTORS  4
6139 -#define SEED_DEC_TEST_VECTORS  4
6140 -
6141 -static struct cipher_testvec seed_enc_tv_template[] = {
6142 +static const struct cipher_testvec seed_enc_tv_template[] = {
6143         {
6144                 .key    = zeroed_string,
6145                 .klen   = 16,
6146 @@ -31362,7 +31409,7 @@ static struct cipher_testvec seed_enc_tv_template[] = {
6147         }
6148  };
6149  
6150 -static struct cipher_testvec seed_dec_tv_template[] = {
6151 +static const struct cipher_testvec seed_dec_tv_template[] = {
6152         {
6153                 .key    = zeroed_string,
6154                 .klen   = 16,
6155 @@ -31404,8 +31451,7 @@ static struct cipher_testvec seed_dec_tv_template[] = {
6156         }
6157  };
6158  
6159 -#define SALSA20_STREAM_ENC_TEST_VECTORS 5
6160 -static struct cipher_testvec salsa20_stream_enc_tv_template[] = {
6161 +static const struct cipher_testvec salsa20_stream_enc_tv_template[] = {
6162         /*
6163         * Testvectors from verified.test-vectors submitted to ECRYPT.
6164         * They are truncated to size 39, 64, 111, 129 to test a variety
6165 @@ -32574,8 +32620,7 @@ static struct cipher_testvec salsa20_stream_enc_tv_template[] = {
6166         },
6167  };
6168  
6169 -#define CHACHA20_ENC_TEST_VECTORS 4
6170 -static struct cipher_testvec chacha20_enc_tv_template[] = {
6171 +static const struct cipher_testvec chacha20_enc_tv_template[] = {
6172         { /* RFC7539 A.2. Test Vector #1 */
6173                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00"
6174                           "\x00\x00\x00\x00\x00\x00\x00\x00"
6175 @@ -33086,9 +33131,7 @@ static struct cipher_testvec chacha20_enc_tv_template[] = {
6176  /*
6177   * CTS (Cipher Text Stealing) mode tests
6178   */
6179 -#define CTS_MODE_ENC_TEST_VECTORS 6
6180 -#define CTS_MODE_DEC_TEST_VECTORS 6
6181 -static struct cipher_testvec cts_mode_enc_tv_template[] = {
6182 +static const struct cipher_testvec cts_mode_enc_tv_template[] = {
6183         { /* from rfc3962 */
6184                 .klen   = 16,
6185                 .key    = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
6186 @@ -33190,7 +33233,7 @@ static struct cipher_testvec cts_mode_enc_tv_template[] = {
6187         }
6188  };
6189  
6190 -static struct cipher_testvec cts_mode_dec_tv_template[] = {
6191 +static const struct cipher_testvec cts_mode_dec_tv_template[] = {
6192         { /* from rfc3962 */
6193                 .klen   = 16,
6194                 .key    = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
6195 @@ -33308,10 +33351,7 @@ struct comp_testvec {
6196   * Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL.
6197   */
6198  
6199 -#define DEFLATE_COMP_TEST_VECTORS 2
6200 -#define DEFLATE_DECOMP_TEST_VECTORS 2
6201 -
6202 -static struct comp_testvec deflate_comp_tv_template[] = {
6203 +static const struct comp_testvec deflate_comp_tv_template[] = {
6204         {
6205                 .inlen  = 70,
6206                 .outlen = 38,
6207 @@ -33347,7 +33387,7 @@ static struct comp_testvec deflate_comp_tv_template[] = {
6208         },
6209  };
6210  
6211 -static struct comp_testvec deflate_decomp_tv_template[] = {
6212 +static const struct comp_testvec deflate_decomp_tv_template[] = {
6213         {
6214                 .inlen  = 122,
6215                 .outlen = 191,
6216 @@ -33386,10 +33426,7 @@ static struct comp_testvec deflate_decomp_tv_template[] = {
6217  /*
6218   * LZO test vectors (null-terminated strings).
6219   */
6220 -#define LZO_COMP_TEST_VECTORS 2
6221 -#define LZO_DECOMP_TEST_VECTORS 2
6222 -
6223 -static struct comp_testvec lzo_comp_tv_template[] = {
6224 +static const struct comp_testvec lzo_comp_tv_template[] = {
6225         {
6226                 .inlen  = 70,
6227                 .outlen = 57,
6228 @@ -33429,7 +33466,7 @@ static struct comp_testvec lzo_comp_tv_template[] = {
6229         },
6230  };
6231  
6232 -static struct comp_testvec lzo_decomp_tv_template[] = {
6233 +static const struct comp_testvec lzo_decomp_tv_template[] = {
6234         {
6235                 .inlen  = 133,
6236                 .outlen = 159,
6237 @@ -33472,7 +33509,7 @@ static struct comp_testvec lzo_decomp_tv_template[] = {
6238   */
6239  #define MICHAEL_MIC_TEST_VECTORS 6
6240  
6241 -static struct hash_testvec michael_mic_tv_template[] = {
6242 +static const struct hash_testvec michael_mic_tv_template[] = {
6243         {
6244                 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
6245                 .ksize = 8,
6246 @@ -33520,9 +33557,7 @@ static struct hash_testvec michael_mic_tv_template[] = {
6247  /*
6248   * CRC32 test vectors
6249   */
6250 -#define CRC32_TEST_VECTORS 14
6251 -
6252 -static struct hash_testvec crc32_tv_template[] = {
6253 +static const struct hash_testvec crc32_tv_template[] = {
6254         {
6255                 .key = "\x87\xa9\xcb\xed",
6256                 .ksize = 4,
6257 @@ -33954,9 +33989,7 @@ static struct hash_testvec crc32_tv_template[] = {
6258  /*
6259   * CRC32C test vectors
6260   */
6261 -#define CRC32C_TEST_VECTORS 15
6262 -
6263 -static struct hash_testvec crc32c_tv_template[] = {
6264 +static const struct hash_testvec crc32c_tv_template[] = {
6265         {
6266                 .psize = 0,
6267                 .digest = "\x00\x00\x00\x00",
6268 @@ -34392,9 +34425,7 @@ static struct hash_testvec crc32c_tv_template[] = {
6269  /*
6270   * Blakcifn CRC test vectors
6271   */
6272 -#define BFIN_CRC_TEST_VECTORS 6
6273 -
6274 -static struct hash_testvec bfin_crc_tv_template[] = {
6275 +static const struct hash_testvec bfin_crc_tv_template[] = {
6276         {
6277                 .psize = 0,
6278                 .digest = "\x00\x00\x00\x00",
6279 @@ -34479,9 +34510,6 @@ static struct hash_testvec bfin_crc_tv_template[] = {
6280  
6281  };
6282  
6283 -#define LZ4_COMP_TEST_VECTORS 1
6284 -#define LZ4_DECOMP_TEST_VECTORS 1
6285 -
6286  static struct comp_testvec lz4_comp_tv_template[] = {
6287         {
6288                 .inlen  = 70,
6289 @@ -34512,9 +34540,6 @@ static struct comp_testvec lz4_decomp_tv_template[] = {
6290         },
6291  };
6292  
6293 -#define LZ4HC_COMP_TEST_VECTORS 1
6294 -#define LZ4HC_DECOMP_TEST_VECTORS 1
6295 -
6296  static struct comp_testvec lz4hc_comp_tv_template[] = {
6297         {
6298                 .inlen  = 70,
6299 diff --git a/crypto/tls.c b/crypto/tls.c
6300 new file mode 100644
6301 index 00000000..377226f5
6302 --- /dev/null
6303 +++ b/crypto/tls.c
6304 @@ -0,0 +1,607 @@
6305 +/*
6306 + * Copyright 2013 Freescale Semiconductor, Inc.
6307 + * Copyright 2017 NXP Semiconductor, Inc.
6308 + *
6309 + * This program is free software; you can redistribute it and/or modify it
6310 + * under the terms of the GNU General Public License as published by the Free
6311 + * Software Foundation; either version 2 of the License, or (at your option)
6312 + * any later version.
6313 + *
6314 + */
6315 +
6316 +#include <crypto/internal/aead.h>
6317 +#include <crypto/internal/hash.h>
6318 +#include <crypto/internal/skcipher.h>
6319 +#include <crypto/authenc.h>
6320 +#include <crypto/null.h>
6321 +#include <crypto/scatterwalk.h>
6322 +#include <linux/err.h>
6323 +#include <linux/init.h>
6324 +#include <linux/module.h>
6325 +#include <linux/rtnetlink.h>
6326 +
6327 +struct tls_instance_ctx {
6328 +       struct crypto_ahash_spawn auth;
6329 +       struct crypto_skcipher_spawn enc;
6330 +};
6331 +
6332 +struct crypto_tls_ctx {
6333 +       unsigned int reqoff;
6334 +       struct crypto_ahash *auth;
6335 +       struct crypto_skcipher *enc;
6336 +       struct crypto_skcipher *null;
6337 +};
6338 +
6339 +struct tls_request_ctx {
6340 +       /*
6341 +        * cryptlen holds the payload length in the case of encryption or
6342 +        * payload_len + icv_len + padding_len in case of decryption
6343 +        */
6344 +       unsigned int cryptlen;
6345 +       /* working space for partial results */
6346 +       struct scatterlist tmp[2];
6347 +       struct scatterlist cipher[2];
6348 +       struct scatterlist dst[2];
6349 +       char tail[];
6350 +};
6351 +
6352 +struct async_op {
6353 +       struct completion completion;
6354 +       int err;
6355 +};
6356 +
6357 +static void tls_async_op_done(struct crypto_async_request *req, int err)
6358 +{
6359 +       struct async_op *areq = req->data;
6360 +
6361 +       if (err == -EINPROGRESS)
6362 +               return;
6363 +
6364 +       areq->err = err;
6365 +       complete(&areq->completion);
6366 +}
6367 +
6368 +static int crypto_tls_setkey(struct crypto_aead *tls, const u8 *key,
6369 +                            unsigned int keylen)
6370 +{
6371 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6372 +       struct crypto_ahash *auth = ctx->auth;
6373 +       struct crypto_skcipher *enc = ctx->enc;
6374 +       struct crypto_authenc_keys keys;
6375 +       int err = -EINVAL;
6376 +
6377 +       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
6378 +               goto badkey;
6379 +
6380 +       crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
6381 +       crypto_ahash_set_flags(auth, crypto_aead_get_flags(tls) &
6382 +                                   CRYPTO_TFM_REQ_MASK);
6383 +       err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
6384 +       crypto_aead_set_flags(tls, crypto_ahash_get_flags(auth) &
6385 +                                      CRYPTO_TFM_RES_MASK);
6386 +
6387 +       if (err)
6388 +               goto out;
6389 +
6390 +       crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
6391 +       crypto_skcipher_set_flags(enc, crypto_aead_get_flags(tls) &
6392 +                                        CRYPTO_TFM_REQ_MASK);
6393 +       err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
6394 +       crypto_aead_set_flags(tls, crypto_skcipher_get_flags(enc) &
6395 +                                      CRYPTO_TFM_RES_MASK);
6396 +
6397 +out:
6398 +       return err;
6399 +
6400 +badkey:
6401 +       crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
6402 +       goto out;
6403 +}
6404 +
6405 +/**
6406 + * crypto_tls_genicv - Calculate hmac digest for a TLS record
6407 + * @hash:      (output) buffer to save the digest into
6408 + * @src:       (input) scatterlist with the assoc and payload data
6409 + * @srclen:    (input) size of the source buffer (assoclen + cryptlen)
6410 + * @req:       (input) aead request
6411 + **/
6412 +static int crypto_tls_genicv(u8 *hash, struct scatterlist *src,
6413 +                            unsigned int srclen, struct aead_request *req)
6414 +{
6415 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
6416 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6417 +       struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6418 +       struct async_op ahash_op;
6419 +       struct ahash_request *ahreq = (void *)(treq_ctx->tail + ctx->reqoff);
6420 +       unsigned int flags = CRYPTO_TFM_REQ_MAY_SLEEP;
6421 +       int err = -EBADMSG;
6422 +
6423 +        /* Bail out if the request assoc len is 0 */
6424 +       if (!req->assoclen)
6425 +               return err;
6426 +
6427 +       init_completion(&ahash_op.completion);
6428 +
6429 +       /* the hash transform to be executed comes from the original request */
6430 +       ahash_request_set_tfm(ahreq, ctx->auth);
6431 +       /* prepare the hash request with input data and result pointer */
6432 +       ahash_request_set_crypt(ahreq, src, hash, srclen);
6433 +       /* set the notifier for when the async hash function returns */
6434 +       ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
6435 +                                  tls_async_op_done, &ahash_op);
6436 +
6437 +       /* Calculate the digest on the given data. The result is put in hash */
6438 +       err = crypto_ahash_digest(ahreq);
6439 +       if (err == -EINPROGRESS) {
6440 +               err = wait_for_completion_interruptible(&ahash_op.completion);
6441 +               if (!err)
6442 +                       err = ahash_op.err;
6443 +       }
6444 +
6445 +       return err;
6446 +}
6447 +
6448 +/**
6449 + * crypto_tls_gen_padicv - Calculate and pad hmac digest for a TLS record
6450 + * @hash:      (output) buffer to save the digest and padding into
6451 + * @phashlen:  (output) the size of digest + padding
6452 + * @req:       (input) aead request
6453 + **/
6454 +static int crypto_tls_gen_padicv(u8 *hash, unsigned int *phashlen,
6455 +                                struct aead_request *req)
6456 +{
6457 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
6458 +       unsigned int hash_size = crypto_aead_authsize(tls);
6459 +       unsigned int block_size = crypto_aead_blocksize(tls);
6460 +       unsigned int srclen = req->cryptlen + hash_size;
6461 +       unsigned int icvlen = req->cryptlen + req->assoclen;
6462 +       unsigned int padlen;
6463 +       int err;
6464 +
6465 +       err = crypto_tls_genicv(hash, req->src, icvlen, req);
6466 +       if (err)
6467 +               goto out;
6468 +
6469 +       /* add padding after digest */
6470 +       padlen = block_size - (srclen % block_size);
6471 +       memset(hash + hash_size, padlen - 1, padlen);
6472 +
6473 +       *phashlen = hash_size + padlen;
6474 +out:
6475 +       return err;
6476 +}
6477 +
6478 +static int crypto_tls_copy_data(struct aead_request *req,
6479 +                               struct scatterlist *src,
6480 +                               struct scatterlist *dst,
6481 +                               unsigned int len)
6482 +{
6483 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
6484 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6485 +       SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
6486 +
6487 +       skcipher_request_set_tfm(skreq, ctx->null);
6488 +       skcipher_request_set_callback(skreq, aead_request_flags(req),
6489 +                                     NULL, NULL);
6490 +       skcipher_request_set_crypt(skreq, src, dst, len, NULL);
6491 +
6492 +       return crypto_skcipher_encrypt(skreq);
6493 +}
6494 +
6495 +static int crypto_tls_encrypt(struct aead_request *req)
6496 +{
6497 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
6498 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6499 +       struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6500 +       struct skcipher_request *skreq;
6501 +       struct scatterlist *cipher = treq_ctx->cipher;
6502 +       struct scatterlist *tmp = treq_ctx->tmp;
6503 +       struct scatterlist *sg, *src, *dst;
6504 +       unsigned int cryptlen, phashlen;
6505 +       u8 *hash = treq_ctx->tail;
6506 +       int err;
6507 +
6508 +       /*
6509 +        * The hash result is saved at the beginning of the tls request ctx
6510 +        * and is aligned as required by the hash transform. Enough space was
6511 +        * allocated in crypto_tls_init_tfm to accommodate the difference. The
6512 +        * requests themselves start later at treq_ctx->tail + ctx->reqoff so
6513 +        * the result is not overwritten by the second (cipher) request.
6514 +        */
6515 +       hash = (u8 *)ALIGN((unsigned long)hash +
6516 +                          crypto_ahash_alignmask(ctx->auth),
6517 +                          crypto_ahash_alignmask(ctx->auth) + 1);
6518 +
6519 +       /*
6520 +        * STEP 1: create ICV together with necessary padding
6521 +        */
6522 +       err = crypto_tls_gen_padicv(hash, &phashlen, req);
6523 +       if (err)
6524 +               return err;
6525 +
6526 +       /*
6527 +        * STEP 2: Hash and padding are combined with the payload
6528 +        * depending on the form it arrives. Scatter tables must have at least
6529 +        * one page of data before chaining with another table and can't have
6530 +        * an empty data page. The following code addresses these requirements.
6531 +        *
6532 +        * If the payload is empty, only the hash is encrypted, otherwise the
6533 +        * payload scatterlist is merged with the hash. A special merging case
6534 +        * is when the payload has only one page of data. In that case the
6535 +        * payload page is moved to another scatterlist and prepared there for
6536 +        * encryption.
6537 +        */
6538 +       if (req->cryptlen) {
6539 +               src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
6540 +
6541 +               sg_init_table(cipher, 2);
6542 +               sg_set_buf(cipher + 1, hash, phashlen);
6543 +
6544 +               if (sg_is_last(src)) {
6545 +                       sg_set_page(cipher, sg_page(src), req->cryptlen,
6546 +                                   src->offset);
6547 +                       src = cipher;
6548 +               } else {
6549 +                       unsigned int rem_len = req->cryptlen;
6550 +
6551 +                       for (sg = src; rem_len > sg->length; sg = sg_next(sg))
6552 +                               rem_len -= min(rem_len, sg->length);
6553 +
6554 +                       sg_set_page(cipher, sg_page(sg), rem_len, sg->offset);
6555 +                       sg_chain(sg, 1, cipher);
6556 +               }
6557 +       } else {
6558 +               sg_init_one(cipher, hash, phashlen);
6559 +               src = cipher;
6560 +       }
6561 +
6562 +       /**
6563 +        * If src != dst copy the associated data from source to destination.
6564 +        * In both cases fast-forward passed the associated data in the dest.
6565 +        */
6566 +       if (req->src != req->dst) {
6567 +               err = crypto_tls_copy_data(req, req->src, req->dst,
6568 +                                          req->assoclen);
6569 +               if (err)
6570 +                       return err;
6571 +       }
6572 +       dst = scatterwalk_ffwd(treq_ctx->dst, req->dst, req->assoclen);
6573 +
6574 +       /*
6575 +        * STEP 3: encrypt the frame and return the result
6576 +        */
6577 +       cryptlen = req->cryptlen + phashlen;
6578 +
6579 +       /*
6580 +        * The hash and the cipher are applied at different times and their
6581 +        * requests can use the same memory space without interference
6582 +        */
6583 +       skreq = (void *)(treq_ctx->tail + ctx->reqoff);
6584 +       skcipher_request_set_tfm(skreq, ctx->enc);
6585 +       skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
6586 +       skcipher_request_set_callback(skreq, aead_request_flags(req),
6587 +                                     req->base.complete, req->base.data);
6588 +       /*
6589 +        * Apply the cipher transform. The result will be in req->dst when the
6590 +        * asynchronuous call terminates
6591 +        */
6592 +       err = crypto_skcipher_encrypt(skreq);
6593 +
6594 +       return err;
6595 +}
6596 +
6597 +static int crypto_tls_decrypt(struct aead_request *req)
6598 +{
6599 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
6600 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6601 +       struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6602 +       unsigned int cryptlen = req->cryptlen;
6603 +       unsigned int hash_size = crypto_aead_authsize(tls);
6604 +       unsigned int block_size = crypto_aead_blocksize(tls);
6605 +       struct skcipher_request *skreq = (void *)(treq_ctx->tail + ctx->reqoff);
6606 +       struct scatterlist *tmp = treq_ctx->tmp;
6607 +       struct scatterlist *src, *dst;
6608 +
6609 +       u8 padding[255]; /* padding can be 0-255 bytes */
6610 +       u8 pad_size;
6611 +       u16 *len_field;
6612 +       u8 *ihash, *hash = treq_ctx->tail;
6613 +
6614 +       int paderr = 0;
6615 +       int err = -EINVAL;
6616 +       int i;
6617 +       struct async_op ciph_op;
6618 +
6619 +       /*
6620 +        * Rule out bad packets. The input packet length must be at least one
6621 +        * byte more than the hash_size
6622 +        */
6623 +       if (cryptlen <= hash_size || cryptlen % block_size)
6624 +               goto out;
6625 +
6626 +       /*
6627 +        * Step 1 - Decrypt the source. Fast-forward past the associated data
6628 +        * to the encrypted data. The result will be overwritten in place so
6629 +        * that the decrypted data will be adjacent to the associated data. The
6630 +        * last step (computing the hash) will have it's input data already
6631 +        * prepared and ready to be accessed at req->src.
6632 +        */
6633 +       src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
6634 +       dst = src;
6635 +
6636 +       init_completion(&ciph_op.completion);
6637 +       skcipher_request_set_tfm(skreq, ctx->enc);
6638 +       skcipher_request_set_callback(skreq, aead_request_flags(req),
6639 +                                     tls_async_op_done, &ciph_op);
6640 +       skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
6641 +       err = crypto_skcipher_decrypt(skreq);
6642 +       if (err == -EINPROGRESS) {
6643 +               err = wait_for_completion_interruptible(&ciph_op.completion);
6644 +               if (!err)
6645 +                       err = ciph_op.err;
6646 +       }
6647 +       if (err)
6648 +               goto out;
6649 +
6650 +       /*
6651 +        * Step 2 - Verify padding
6652 +        * Retrieve the last byte of the payload; this is the padding size.
6653 +        */
6654 +       cryptlen -= 1;
6655 +       scatterwalk_map_and_copy(&pad_size, dst, cryptlen, 1, 0);
6656 +
6657 +       /* RFC recommendation for invalid padding size. */
6658 +       if (cryptlen < pad_size + hash_size) {
6659 +               pad_size = 0;
6660 +               paderr = -EBADMSG;
6661 +       }
6662 +       cryptlen -= pad_size;
6663 +       scatterwalk_map_and_copy(padding, dst, cryptlen, pad_size, 0);
6664 +
6665 +       /* Padding content must be equal with pad_size. We verify it all */
6666 +       for (i = 0; i < pad_size; i++)
6667 +               if (padding[i] != pad_size)
6668 +                       paderr = -EBADMSG;
6669 +
6670 +       /*
6671 +        * Step 3 - Verify hash
6672 +        * Align the digest result as required by the hash transform. Enough
6673 +        * space was allocated in crypto_tls_init_tfm
6674 +        */
6675 +       hash = (u8 *)ALIGN((unsigned long)hash +
6676 +                          crypto_ahash_alignmask(ctx->auth),
6677 +                          crypto_ahash_alignmask(ctx->auth) + 1);
6678 +       /*
6679 +        * Two bytes at the end of the associated data make the length field.
6680 +        * It must be updated with the length of the cleartext message before
6681 +        * the hash is calculated.
6682 +        */
6683 +       len_field = sg_virt(req->src) + req->assoclen - 2;
6684 +       cryptlen -= hash_size;
6685 +       *len_field = htons(cryptlen);
6686 +
6687 +       /* This is the hash from the decrypted packet. Save it for later */
6688 +       ihash = hash + hash_size;
6689 +       scatterwalk_map_and_copy(ihash, dst, cryptlen, hash_size, 0);
6690 +
6691 +       /* Now compute and compare our ICV with the one from the packet */
6692 +       err = crypto_tls_genicv(hash, req->src, cryptlen + req->assoclen, req);
6693 +       if (!err)
6694 +               err = memcmp(hash, ihash, hash_size) ? -EBADMSG : 0;
6695 +
6696 +       if (req->src != req->dst) {
6697 +               err = crypto_tls_copy_data(req, req->src, req->dst, cryptlen +
6698 +                                          req->assoclen);
6699 +               if (err)
6700 +                       goto out;
6701 +       }
6702 +
6703 +       /* return the first found error */
6704 +       if (paderr)
6705 +               err = paderr;
6706 +
6707 +out:
6708 +       aead_request_complete(req, err);
6709 +       return err;
6710 +}
6711 +
6712 +static int crypto_tls_init_tfm(struct crypto_aead *tfm)
6713 +{
6714 +       struct aead_instance *inst = aead_alg_instance(tfm);
6715 +       struct tls_instance_ctx *ictx = aead_instance_ctx(inst);
6716 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
6717 +       struct crypto_ahash *auth;
6718 +       struct crypto_skcipher *enc;
6719 +       struct crypto_skcipher *null;
6720 +       int err;
6721 +
6722 +       auth = crypto_spawn_ahash(&ictx->auth);
6723 +       if (IS_ERR(auth))
6724 +               return PTR_ERR(auth);
6725 +
6726 +       enc = crypto_spawn_skcipher(&ictx->enc);
6727 +       err = PTR_ERR(enc);
6728 +       if (IS_ERR(enc))
6729 +               goto err_free_ahash;
6730 +
6731 +       null = crypto_get_default_null_skcipher2();
6732 +       err = PTR_ERR(null);
6733 +       if (IS_ERR(null))
6734 +               goto err_free_skcipher;
6735 +
6736 +       ctx->auth = auth;
6737 +       ctx->enc = enc;
6738 +       ctx->null = null;
6739 +
6740 +       /*
6741 +        * Allow enough space for two digests. The two digests will be compared
6742 +        * during the decryption phase. One will come from the decrypted packet
6743 +        * and the other will be calculated. For encryption, one digest is
6744 +        * padded (up to a cipher blocksize) and chained with the payload
6745 +        */
6746 +       ctx->reqoff = ALIGN(crypto_ahash_digestsize(auth) +
6747 +                           crypto_ahash_alignmask(auth),
6748 +                           crypto_ahash_alignmask(auth) + 1) +
6749 +                           max(crypto_ahash_digestsize(auth),
6750 +                               crypto_skcipher_blocksize(enc));
6751 +
6752 +       crypto_aead_set_reqsize(tfm,
6753 +                               sizeof(struct tls_request_ctx) +
6754 +                               ctx->reqoff +
6755 +                               max_t(unsigned int,
6756 +                                     crypto_ahash_reqsize(auth) +
6757 +                                     sizeof(struct ahash_request),
6758 +                                     crypto_skcipher_reqsize(enc) +
6759 +                                     sizeof(struct skcipher_request)));
6760 +
6761 +       return 0;
6762 +
6763 +err_free_skcipher:
6764 +       crypto_free_skcipher(enc);
6765 +err_free_ahash:
6766 +       crypto_free_ahash(auth);
6767 +       return err;
6768 +}
6769 +
6770 +static void crypto_tls_exit_tfm(struct crypto_aead *tfm)
6771 +{
6772 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
6773 +
6774 +       crypto_free_ahash(ctx->auth);
6775 +       crypto_free_skcipher(ctx->enc);
6776 +       crypto_put_default_null_skcipher2();
6777 +}
6778 +
6779 +static void crypto_tls_free(struct aead_instance *inst)
6780 +{
6781 +       struct tls_instance_ctx *ctx = aead_instance_ctx(inst);
6782 +
6783 +       crypto_drop_skcipher(&ctx->enc);
6784 +       crypto_drop_ahash(&ctx->auth);
6785 +       kfree(inst);
6786 +}
6787 +
6788 +static int crypto_tls_create(struct crypto_template *tmpl, struct rtattr **tb)
6789 +{
6790 +       struct crypto_attr_type *algt;
6791 +       struct aead_instance *inst;
6792 +       struct hash_alg_common *auth;
6793 +       struct crypto_alg *auth_base;
6794 +       struct skcipher_alg *enc;
6795 +       struct tls_instance_ctx *ctx;
6796 +       const char *enc_name;
6797 +       int err;
6798 +
6799 +       algt = crypto_get_attr_type(tb);
6800 +       if (IS_ERR(algt))
6801 +               return PTR_ERR(algt);
6802 +
6803 +       if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
6804 +               return -EINVAL;
6805 +
6806 +       auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
6807 +                             CRYPTO_ALG_TYPE_AHASH_MASK |
6808 +                             crypto_requires_sync(algt->type, algt->mask));
6809 +       if (IS_ERR(auth))
6810 +               return PTR_ERR(auth);
6811 +
6812 +       auth_base = &auth->base;
6813 +
6814 +       enc_name = crypto_attr_alg_name(tb[2]);
6815 +       err = PTR_ERR(enc_name);
6816 +       if (IS_ERR(enc_name))
6817 +               goto out_put_auth;
6818 +
6819 +       inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
6820 +       err = -ENOMEM;
6821 +       if (!inst)
6822 +               goto out_put_auth;
6823 +
6824 +       ctx = aead_instance_ctx(inst);
6825 +
6826 +       err = crypto_init_ahash_spawn(&ctx->auth, auth,
6827 +                                     aead_crypto_instance(inst));
6828 +       if (err)
6829 +               goto err_free_inst;
6830 +
6831 +       crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
6832 +       err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
6833 +                                  crypto_requires_sync(algt->type,
6834 +                                                       algt->mask));
6835 +       if (err)
6836 +               goto err_drop_auth;
6837 +
6838 +       enc = crypto_spawn_skcipher_alg(&ctx->enc);
6839 +
6840 +       err = -ENAMETOOLONG;
6841 +       if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
6842 +                    "tls10(%s,%s)", auth_base->cra_name,
6843 +                    enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
6844 +               goto err_drop_enc;
6845 +
6846 +       if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
6847 +                    "tls10(%s,%s)", auth_base->cra_driver_name,
6848 +                    enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
6849 +               goto err_drop_enc;
6850 +
6851 +       inst->alg.base.cra_flags = (auth_base->cra_flags |
6852 +                                       enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
6853 +       inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
6854 +                                       auth_base->cra_priority;
6855 +       inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
6856 +       inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
6857 +                                       enc->base.cra_alignmask;
6858 +       inst->alg.base.cra_ctxsize = sizeof(struct crypto_tls_ctx);
6859 +
6860 +       inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
6861 +       inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
6862 +       inst->alg.maxauthsize = auth->digestsize;
6863 +
6864 +       inst->alg.init = crypto_tls_init_tfm;
6865 +       inst->alg.exit = crypto_tls_exit_tfm;
6866 +
6867 +       inst->alg.setkey = crypto_tls_setkey;
6868 +       inst->alg.encrypt = crypto_tls_encrypt;
6869 +       inst->alg.decrypt = crypto_tls_decrypt;
6870 +
6871 +       inst->free = crypto_tls_free;
6872 +
6873 +       err = aead_register_instance(tmpl, inst);
6874 +       if (err)
6875 +               goto err_drop_enc;
6876 +
6877 +out:
6878 +       crypto_mod_put(auth_base);
6879 +       return err;
6880 +
6881 +err_drop_enc:
6882 +       crypto_drop_skcipher(&ctx->enc);
6883 +err_drop_auth:
6884 +       crypto_drop_ahash(&ctx->auth);
6885 +err_free_inst:
6886 +       kfree(inst);
6887 +out_put_auth:
6888 +       goto out;
6889 +}
6890 +
6891 +static struct crypto_template crypto_tls_tmpl = {
6892 +       .name = "tls10",
6893 +       .create = crypto_tls_create,
6894 +       .module = THIS_MODULE,
6895 +};
6896 +
6897 +static int __init crypto_tls_module_init(void)
6898 +{
6899 +       return crypto_register_template(&crypto_tls_tmpl);
6900 +}
6901 +
6902 +static void __exit crypto_tls_module_exit(void)
6903 +{
6904 +       crypto_unregister_template(&crypto_tls_tmpl);
6905 +}
6906 +
6907 +module_init(crypto_tls_module_init);
6908 +module_exit(crypto_tls_module_exit);
6909 +
6910 +MODULE_LICENSE("GPL");
6911 +MODULE_DESCRIPTION("TLS 1.0 record encryption");
6912 diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
6913 index 64bf3024..3831a6f7 100644
6914 --- a/drivers/crypto/caam/Kconfig
6915 +++ b/drivers/crypto/caam/Kconfig
6916 @@ -1,6 +1,11 @@
6917 +config CRYPTO_DEV_FSL_CAAM_COMMON
6918 +       tristate
6919 +
6920  config CRYPTO_DEV_FSL_CAAM
6921 -       tristate "Freescale CAAM-Multicore driver backend"
6922 +       tristate "Freescale CAAM-Multicore platform driver backend"
6923         depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
6924 +       select CRYPTO_DEV_FSL_CAAM_COMMON
6925 +       select SOC_BUS
6926         help
6927           Enables the driver module for Freescale's Cryptographic Accelerator
6928           and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
6929 @@ -11,9 +16,16 @@ config CRYPTO_DEV_FSL_CAAM
6930           To compile this driver as a module, choose M here: the module
6931           will be called caam.
6932  
6933 +if CRYPTO_DEV_FSL_CAAM
6934 +
6935 +config CRYPTO_DEV_FSL_CAAM_DEBUG
6936 +       bool "Enable debug output in CAAM driver"
6937 +       help
6938 +         Selecting this will enable printing of various debug
6939 +         information in the CAAM driver.
6940 +
6941  config CRYPTO_DEV_FSL_CAAM_JR
6942         tristate "Freescale CAAM Job Ring driver backend"
6943 -       depends on CRYPTO_DEV_FSL_CAAM
6944         default y
6945         help
6946           Enables the driver module for Job Rings which are part of
6947 @@ -24,9 +36,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
6948           To compile this driver as a module, choose M here: the module
6949           will be called caam_jr.
6950  
6951 +if CRYPTO_DEV_FSL_CAAM_JR
6952 +
6953  config CRYPTO_DEV_FSL_CAAM_RINGSIZE
6954         int "Job Ring size"
6955 -       depends on CRYPTO_DEV_FSL_CAAM_JR
6956         range 2 9
6957         default "9"
6958         help
6959 @@ -44,7 +57,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
6960  
6961  config CRYPTO_DEV_FSL_CAAM_INTC
6962         bool "Job Ring interrupt coalescing"
6963 -       depends on CRYPTO_DEV_FSL_CAAM_JR
6964         help
6965           Enable the Job Ring's interrupt coalescing feature.
6966  
6967 @@ -74,7 +86,6 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
6968  
6969  config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
6970         tristate "Register algorithm implementations with the Crypto API"
6971 -       depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
6972         default y
6973         select CRYPTO_AEAD
6974         select CRYPTO_AUTHENC
6975 @@ -87,9 +98,25 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
6976           To compile this as a module, choose M here: the module
6977           will be called caamalg.
6978  
6979 +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
6980 +       tristate "Queue Interface as Crypto API backend"
6981 +       depends on FSL_SDK_DPA && NET
6982 +       default y
6983 +       select CRYPTO_AUTHENC
6984 +       select CRYPTO_BLKCIPHER
6985 +       help
6986 +         Selecting this will use CAAM Queue Interface (QI) for sending
6987 +         & receiving crypto jobs to/from CAAM. This gives better performance
6988 +         than job ring interface when the number of cores are more than the
6989 +         number of job rings assigned to the kernel. The number of portals
6990 +         assigned to the kernel should also be more than the number of
6991 +         job rings.
6992 +
6993 +         To compile this as a module, choose M here: the module
6994 +         will be called caamalg_qi.
6995 +
6996  config CRYPTO_DEV_FSL_CAAM_AHASH_API
6997         tristate "Register hash algorithm implementations with Crypto API"
6998 -       depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
6999         default y
7000         select CRYPTO_HASH
7001         help
7002 @@ -101,7 +128,6 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
7003  
7004  config CRYPTO_DEV_FSL_CAAM_PKC_API
7005          tristate "Register public key cryptography implementations with Crypto API"
7006 -        depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
7007          default y
7008          select CRYPTO_RSA
7009          help
7010 @@ -113,7 +139,6 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
7011  
7012  config CRYPTO_DEV_FSL_CAAM_RNG_API
7013         tristate "Register caam device for hwrng API"
7014 -       depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
7015         default y
7016         select CRYPTO_RNG
7017         select HW_RANDOM
7018 @@ -124,13 +149,26 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
7019           To compile this as a module, choose M here: the module
7020           will be called caamrng.
7021  
7022 -config CRYPTO_DEV_FSL_CAAM_IMX
7023 -       def_bool SOC_IMX6 || SOC_IMX7D
7024 -       depends on CRYPTO_DEV_FSL_CAAM
7025 +endif # CRYPTO_DEV_FSL_CAAM_JR
7026  
7027 -config CRYPTO_DEV_FSL_CAAM_DEBUG
7028 -       bool "Enable debug output in CAAM driver"
7029 -       depends on CRYPTO_DEV_FSL_CAAM
7030 -       help
7031 -         Selecting this will enable printing of various debug
7032 -         information in the CAAM driver.
7033 +endif # CRYPTO_DEV_FSL_CAAM
7034 +
7035 +config CRYPTO_DEV_FSL_DPAA2_CAAM
7036 +       tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
7037 +       depends on FSL_MC_DPIO
7038 +       select CRYPTO_DEV_FSL_CAAM_COMMON
7039 +       select CRYPTO_BLKCIPHER
7040 +       select CRYPTO_AUTHENC
7041 +       select CRYPTO_AEAD
7042 +       ---help---
7043 +         CAAM driver for QorIQ Data Path Acceleration Architecture 2.
7044 +         It handles DPSECI DPAA2 objects that sit on the Management Complex
7045 +         (MC) fsl-mc bus.
7046 +
7047 +         To compile this as a module, choose M here: the module
7048 +         will be called dpaa2_caam.
7049 +
7050 +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
7051 +       def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
7052 +                     CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
7053 +                     CRYPTO_DEV_FSL_DPAA2_CAAM)
7054 diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
7055 index 08bf5515..01f73a25 100644
7056 --- a/drivers/crypto/caam/Makefile
7057 +++ b/drivers/crypto/caam/Makefile
7058 @@ -5,13 +5,26 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
7059         ccflags-y := -DDEBUG
7060  endif
7061  
7062 +ccflags-y += -DVERSION=\"\"
7063 +
7064 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
7065  obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
7066  obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
7067  obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
7068 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
7069 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
7070  obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
7071  obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
7072  obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
7073  
7074  caam-objs := ctrl.o
7075 -caam_jr-objs := jr.o key_gen.o error.o
7076 +caam_jr-objs := jr.o key_gen.o
7077  caam_pkc-y := caampkc.o pkc_desc.o
7078 +ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
7079 +       ccflags-y += -DCONFIG_CAAM_QI
7080 +       caam-objs += qi.o
7081 +endif
7082 +
7083 +obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
7084 +
7085 +dpaa2_caam-y    := caamalg_qi2.o dpseci.o
7086 diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
7087 index 0d743c63..6480a01f 100644
7088 --- a/drivers/crypto/caam/caamalg.c
7089 +++ b/drivers/crypto/caam/caamalg.c
7090 @@ -2,6 +2,7 @@
7091   * caam - Freescale FSL CAAM support for crypto API
7092   *
7093   * Copyright 2008-2011 Freescale Semiconductor, Inc.
7094 + * Copyright 2016 NXP
7095   *
7096   * Based on talitos crypto API driver.
7097   *
7098 @@ -53,6 +54,7 @@
7099  #include "error.h"
7100  #include "sg_sw_sec4.h"
7101  #include "key_gen.h"
7102 +#include "caamalg_desc.h"
7103  
7104  /*
7105   * crypto alg
7106 @@ -62,8 +64,6 @@
7107  #define CAAM_MAX_KEY_SIZE              (AES_MAX_KEY_SIZE + \
7108                                          CTR_RFC3686_NONCE_SIZE + \
7109                                          SHA512_DIGEST_SIZE * 2)
7110 -/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
7111 -#define CAAM_MAX_IV_LENGTH             16
7112  
7113  #define AEAD_DESC_JOB_IO_LEN           (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
7114  #define GCM_DESC_JOB_IO_LEN            (AEAD_DESC_JOB_IO_LEN + \
7115 @@ -71,37 +71,6 @@
7116  #define AUTHENC_DESC_JOB_IO_LEN                (AEAD_DESC_JOB_IO_LEN + \
7117                                          CAAM_CMD_SZ * 5)
7118  
7119 -/* length of descriptors text */
7120 -#define DESC_AEAD_BASE                 (4 * CAAM_CMD_SZ)
7121 -#define DESC_AEAD_ENC_LEN              (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
7122 -#define DESC_AEAD_DEC_LEN              (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
7123 -#define DESC_AEAD_GIVENC_LEN           (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
7124 -
7125 -/* Note: Nonce is counted in enckeylen */
7126 -#define DESC_AEAD_CTR_RFC3686_LEN      (4 * CAAM_CMD_SZ)
7127 -
7128 -#define DESC_AEAD_NULL_BASE            (3 * CAAM_CMD_SZ)
7129 -#define DESC_AEAD_NULL_ENC_LEN         (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
7130 -#define DESC_AEAD_NULL_DEC_LEN         (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
7131 -
7132 -#define DESC_GCM_BASE                  (3 * CAAM_CMD_SZ)
7133 -#define DESC_GCM_ENC_LEN               (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
7134 -#define DESC_GCM_DEC_LEN               (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
7135 -
7136 -#define DESC_RFC4106_BASE              (3 * CAAM_CMD_SZ)
7137 -#define DESC_RFC4106_ENC_LEN           (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
7138 -#define DESC_RFC4106_DEC_LEN           (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
7139 -
7140 -#define DESC_RFC4543_BASE              (3 * CAAM_CMD_SZ)
7141 -#define DESC_RFC4543_ENC_LEN           (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
7142 -#define DESC_RFC4543_DEC_LEN           (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
7143 -
7144 -#define DESC_ABLKCIPHER_BASE           (3 * CAAM_CMD_SZ)
7145 -#define DESC_ABLKCIPHER_ENC_LEN                (DESC_ABLKCIPHER_BASE + \
7146 -                                        20 * CAAM_CMD_SZ)
7147 -#define DESC_ABLKCIPHER_DEC_LEN                (DESC_ABLKCIPHER_BASE + \
7148 -                                        15 * CAAM_CMD_SZ)
7149 -
7150  #define DESC_MAX_USED_BYTES            (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
7151  #define DESC_MAX_USED_LEN              (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
7152  
7153 @@ -112,47 +81,11 @@
7154  #define debug(format, arg...)
7155  #endif
7156  
7157 -#ifdef DEBUG
7158 -#include <linux/highmem.h>
7159 -
7160 -static void dbg_dump_sg(const char *level, const char *prefix_str,
7161 -                       int prefix_type, int rowsize, int groupsize,
7162 -                       struct scatterlist *sg, size_t tlen, bool ascii,
7163 -                       bool may_sleep)
7164 -{
7165 -       struct scatterlist *it;
7166 -       void *it_page;
7167 -       size_t len;
7168 -       void *buf;
7169 -
7170 -       for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
7171 -               /*
7172 -                * make sure the scatterlist's page
7173 -                * has a valid virtual memory mapping
7174 -                */
7175 -               it_page = kmap_atomic(sg_page(it));
7176 -               if (unlikely(!it_page)) {
7177 -                       printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
7178 -                       return;
7179 -               }
7180 -
7181 -               buf = it_page + it->offset;
7182 -               len = min_t(size_t, tlen, it->length);
7183 -               print_hex_dump(level, prefix_str, prefix_type, rowsize,
7184 -                              groupsize, buf, len, ascii);
7185 -               tlen -= len;
7186 -
7187 -               kunmap_atomic(it_page);
7188 -       }
7189 -}
7190 -#endif
7191 -
7192  static struct list_head alg_list;
7193  
7194  struct caam_alg_entry {
7195         int class1_alg_type;
7196         int class2_alg_type;
7197 -       int alg_op;
7198         bool rfc3686;
7199         bool geniv;
7200  };
7201 @@ -163,302 +96,67 @@ struct caam_aead_alg {
7202         bool registered;
7203  };
7204  
7205 -/* Set DK bit in class 1 operation if shared */
7206 -static inline void append_dec_op1(u32 *desc, u32 type)
7207 -{
7208 -       u32 *jump_cmd, *uncond_jump_cmd;
7209 -
7210 -       /* DK bit is valid only for AES */
7211 -       if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
7212 -               append_operation(desc, type | OP_ALG_AS_INITFINAL |
7213 -                                OP_ALG_DECRYPT);
7214 -               return;
7215 -       }
7216 -
7217 -       jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
7218 -       append_operation(desc, type | OP_ALG_AS_INITFINAL |
7219 -                        OP_ALG_DECRYPT);
7220 -       uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
7221 -       set_jump_tgt_here(desc, jump_cmd);
7222 -       append_operation(desc, type | OP_ALG_AS_INITFINAL |
7223 -                        OP_ALG_DECRYPT | OP_ALG_AAI_DK);
7224 -       set_jump_tgt_here(desc, uncond_jump_cmd);
7225 -}
7226 -
7227 -/*
7228 - * For aead functions, read payload and write payload,
7229 - * both of which are specified in req->src and req->dst
7230 - */
7231 -static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
7232 -{
7233 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7234 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
7235 -                            KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
7236 -}
7237 -
7238 -/*
7239 - * For ablkcipher encrypt and decrypt, read from req->src and
7240 - * write to req->dst
7241 - */
7242 -static inline void ablkcipher_append_src_dst(u32 *desc)
7243 -{
7244 -       append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7245 -       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7246 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
7247 -                            KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
7248 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7249 -}
7250 -
7251  /*
7252   * per-session context
7253   */
7254  struct caam_ctx {
7255 -       struct device *jrdev;
7256         u32 sh_desc_enc[DESC_MAX_USED_LEN];
7257         u32 sh_desc_dec[DESC_MAX_USED_LEN];
7258         u32 sh_desc_givenc[DESC_MAX_USED_LEN];
7259 +       u8 key[CAAM_MAX_KEY_SIZE];
7260         dma_addr_t sh_desc_enc_dma;
7261         dma_addr_t sh_desc_dec_dma;
7262         dma_addr_t sh_desc_givenc_dma;
7263 -       u32 class1_alg_type;
7264 -       u32 class2_alg_type;
7265 -       u32 alg_op;
7266 -       u8 key[CAAM_MAX_KEY_SIZE];
7267         dma_addr_t key_dma;
7268 -       unsigned int enckeylen;
7269 -       unsigned int split_key_len;
7270 -       unsigned int split_key_pad_len;
7271 +       struct device *jrdev;
7272 +       struct alginfo adata;
7273 +       struct alginfo cdata;
7274         unsigned int authsize;
7275  };
7276  
7277 -static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
7278 -                           int keys_fit_inline, bool is_rfc3686)
7279 -{
7280 -       u32 *nonce;
7281 -       unsigned int enckeylen = ctx->enckeylen;
7282 -
7283 -       /*
7284 -        * RFC3686 specific:
7285 -        *      | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
7286 -        *      | enckeylen = encryption key size + nonce size
7287 -        */
7288 -       if (is_rfc3686)
7289 -               enckeylen -= CTR_RFC3686_NONCE_SIZE;
7290 -
7291 -       if (keys_fit_inline) {
7292 -               append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7293 -                                 ctx->split_key_len, CLASS_2 |
7294 -                                 KEY_DEST_MDHA_SPLIT | KEY_ENC);
7295 -               append_key_as_imm(desc, (void *)ctx->key +
7296 -                                 ctx->split_key_pad_len, enckeylen,
7297 -                                 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7298 -       } else {
7299 -               append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7300 -                          KEY_DEST_MDHA_SPLIT | KEY_ENC);
7301 -               append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
7302 -                          enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7303 -       }
7304 -
7305 -       /* Load Counter into CONTEXT1 reg */
7306 -       if (is_rfc3686) {
7307 -               nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
7308 -                              enckeylen);
7309 -               append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
7310 -                                  LDST_CLASS_IND_CCB |
7311 -                                  LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
7312 -               append_move(desc,
7313 -                           MOVE_SRC_OUTFIFO |
7314 -                           MOVE_DEST_CLASS1CTX |
7315 -                           (16 << MOVE_OFFSET_SHIFT) |
7316 -                           (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
7317 -       }
7318 -}
7319 -
7320 -static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
7321 -                                 int keys_fit_inline, bool is_rfc3686)
7322 -{
7323 -       u32 *key_jump_cmd;
7324 -
7325 -       /* Note: Context registers are saved. */
7326 -       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
7327 -
7328 -       /* Skip if already shared */
7329 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7330 -                                  JUMP_COND_SHRD);
7331 -
7332 -       append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7333 -
7334 -       set_jump_tgt_here(desc, key_jump_cmd);
7335 -}
7336 -
7337  static int aead_null_set_sh_desc(struct crypto_aead *aead)
7338  {
7339         struct caam_ctx *ctx = crypto_aead_ctx(aead);
7340         struct device *jrdev = ctx->jrdev;
7341 -       bool keys_fit_inline = false;
7342 -       u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
7343         u32 *desc;
7344 +       int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
7345 +                       ctx->adata.keylen_pad;
7346  
7347         /*
7348          * Job Descriptor and Shared Descriptors
7349          * must all fit into the 64-word Descriptor h/w Buffer
7350          */
7351 -       if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
7352 -           ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
7353 -               keys_fit_inline = true;
7354 +       if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
7355 +               ctx->adata.key_inline = true;
7356 +               ctx->adata.key_virt = ctx->key;
7357 +       } else {
7358 +               ctx->adata.key_inline = false;
7359 +               ctx->adata.key_dma = ctx->key_dma;
7360 +       }
7361  
7362         /* aead_encrypt shared descriptor */
7363         desc = ctx->sh_desc_enc;
7364 -
7365 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
7366 -
7367 -       /* Skip if already shared */
7368 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7369 -                                  JUMP_COND_SHRD);
7370 -       if (keys_fit_inline)
7371 -               append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7372 -                                 ctx->split_key_len, CLASS_2 |
7373 -                                 KEY_DEST_MDHA_SPLIT | KEY_ENC);
7374 -       else
7375 -               append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7376 -                          KEY_DEST_MDHA_SPLIT | KEY_ENC);
7377 -       set_jump_tgt_here(desc, key_jump_cmd);
7378 -
7379 -       /* assoclen + cryptlen = seqinlen */
7380 -       append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
7381 -
7382 -       /* Prepare to read and write cryptlen + assoclen bytes */
7383 -       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7384 -       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7385 -
7386 -       /*
7387 -        * MOVE_LEN opcode is not available in all SEC HW revisions,
7388 -        * thus need to do some magic, i.e. self-patch the descriptor
7389 -        * buffer.
7390 -        */
7391 -       read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
7392 -                                   MOVE_DEST_MATH3 |
7393 -                                   (0x6 << MOVE_LEN_SHIFT));
7394 -       write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
7395 -                                    MOVE_DEST_DESCBUF |
7396 -                                    MOVE_WAITCOMP |
7397 -                                    (0x8 << MOVE_LEN_SHIFT));
7398 -
7399 -       /* Class 2 operation */
7400 -       append_operation(desc, ctx->class2_alg_type |
7401 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7402 -
7403 -       /* Read and write cryptlen bytes */
7404 -       aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
7405 -
7406 -       set_move_tgt_here(desc, read_move_cmd);
7407 -       set_move_tgt_here(desc, write_move_cmd);
7408 -       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7409 -       append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
7410 -                   MOVE_AUX_LS);
7411 -
7412 -       /* Write ICV */
7413 -       append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7414 -                        LDST_SRCDST_BYTE_CONTEXT);
7415 -
7416 -       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7417 -                                             desc_bytes(desc),
7418 -                                             DMA_TO_DEVICE);
7419 -       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7420 -               dev_err(jrdev, "unable to map shared descriptor\n");
7421 -               return -ENOMEM;
7422 -       }
7423 -#ifdef DEBUG
7424 -       print_hex_dump(KERN_ERR,
7425 -                      "aead null enc shdesc@"__stringify(__LINE__)": ",
7426 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
7427 -                      desc_bytes(desc), 1);
7428 -#endif
7429 +       cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
7430 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7431 +                                  desc_bytes(desc), DMA_TO_DEVICE);
7432  
7433         /*
7434          * Job Descriptor and Shared Descriptors
7435          * must all fit into the 64-word Descriptor h/w Buffer
7436          */
7437 -       keys_fit_inline = false;
7438 -       if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
7439 -           ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
7440 -               keys_fit_inline = true;
7441 -
7442 -       desc = ctx->sh_desc_dec;
7443 +       if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
7444 +               ctx->adata.key_inline = true;
7445 +               ctx->adata.key_virt = ctx->key;
7446 +       } else {
7447 +               ctx->adata.key_inline = false;
7448 +               ctx->adata.key_dma = ctx->key_dma;
7449 +       }
7450  
7451         /* aead_decrypt shared descriptor */
7452 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
7453 -
7454 -       /* Skip if already shared */
7455 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7456 -                                  JUMP_COND_SHRD);
7457 -       if (keys_fit_inline)
7458 -               append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7459 -                                 ctx->split_key_len, CLASS_2 |
7460 -                                 KEY_DEST_MDHA_SPLIT | KEY_ENC);
7461 -       else
7462 -               append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7463 -                          KEY_DEST_MDHA_SPLIT | KEY_ENC);
7464 -       set_jump_tgt_here(desc, key_jump_cmd);
7465 -
7466 -       /* Class 2 operation */
7467 -       append_operation(desc, ctx->class2_alg_type |
7468 -                        OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
7469 -
7470 -       /* assoclen + cryptlen = seqoutlen */
7471 -       append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7472 -
7473 -       /* Prepare to read and write cryptlen + assoclen bytes */
7474 -       append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
7475 -       append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
7476 -
7477 -       /*
7478 -        * MOVE_LEN opcode is not available in all SEC HW revisions,
7479 -        * thus need to do some magic, i.e. self-patch the descriptor
7480 -        * buffer.
7481 -        */
7482 -       read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
7483 -                                   MOVE_DEST_MATH2 |
7484 -                                   (0x6 << MOVE_LEN_SHIFT));
7485 -       write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
7486 -                                    MOVE_DEST_DESCBUF |
7487 -                                    MOVE_WAITCOMP |
7488 -                                    (0x8 << MOVE_LEN_SHIFT));
7489 -
7490 -       /* Read and write cryptlen bytes */
7491 -       aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
7492 -
7493 -       /*
7494 -        * Insert a NOP here, since we need at least 4 instructions between
7495 -        * code patching the descriptor buffer and the location being patched.
7496 -        */
7497 -       jump_cmd = append_jump(desc, JUMP_TEST_ALL);
7498 -       set_jump_tgt_here(desc, jump_cmd);
7499 -
7500 -       set_move_tgt_here(desc, read_move_cmd);
7501 -       set_move_tgt_here(desc, write_move_cmd);
7502 -       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7503 -       append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
7504 -                   MOVE_AUX_LS);
7505 -       append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
7506 -
7507 -       /* Load ICV */
7508 -       append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
7509 -                            FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
7510 -
7511 -       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
7512 -                                             desc_bytes(desc),
7513 -                                             DMA_TO_DEVICE);
7514 -       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
7515 -               dev_err(jrdev, "unable to map shared descriptor\n");
7516 -               return -ENOMEM;
7517 -       }
7518 -#ifdef DEBUG
7519 -       print_hex_dump(KERN_ERR,
7520 -                      "aead null dec shdesc@"__stringify(__LINE__)": ",
7521 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
7522 -                      desc_bytes(desc), 1);
7523 -#endif
7524 +       desc = ctx->sh_desc_dec;
7525 +       cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
7526 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
7527 +                                  desc_bytes(desc), DMA_TO_DEVICE);
7528  
7529         return 0;
7530  }
7531 @@ -470,11 +168,11 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
7532         unsigned int ivsize = crypto_aead_ivsize(aead);
7533         struct caam_ctx *ctx = crypto_aead_ctx(aead);
7534         struct device *jrdev = ctx->jrdev;
7535 -       bool keys_fit_inline;
7536 -       u32 geniv, moveiv;
7537         u32 ctx1_iv_off = 0;
7538 -       u32 *desc;
7539 -       const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
7540 +       u32 *desc, *nonce = NULL;
7541 +       u32 inl_mask;
7542 +       unsigned int data_len[2];
7543 +       const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
7544                                OP_ALG_AAI_CTR_MOD128);
7545         const bool is_rfc3686 = alg->caam.rfc3686;
7546  
7547 @@ -482,7 +180,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
7548                 return 0;
7549  
7550         /* NULL encryption / decryption */
7551 -       if (!ctx->enckeylen)
7552 +       if (!ctx->cdata.keylen)
7553                 return aead_null_set_sh_desc(aead);
7554  
7555         /*
7556 @@ -497,8 +195,14 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
7557          * RFC3686 specific:
7558          *      CONTEXT1[255:128] = {NONCE, IV, COUNTER}
7559          */
7560 -       if (is_rfc3686)
7561 +       if (is_rfc3686) {
7562                 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
7563 +               nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
7564 +                               ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
7565 +       }
7566 +
7567 +       data_len[0] = ctx->adata.keylen_pad;
7568 +       data_len[1] = ctx->cdata.keylen;
7569  
7570         if (alg->caam.geniv)
7571                 goto skip_enc;
7572 @@ -507,146 +211,64 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
7573          * Job Descriptor and Shared Descriptors
7574          * must all fit into the 64-word Descriptor h/w Buffer
7575          */
7576 -       keys_fit_inline = false;
7577 -       if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7578 -           ctx->split_key_pad_len + ctx->enckeylen +
7579 -           (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7580 -           CAAM_DESC_BYTES_MAX)
7581 -               keys_fit_inline = true;
7582 -
7583 -       /* aead_encrypt shared descriptor */
7584 -       desc = ctx->sh_desc_enc;
7585 -
7586 -       /* Note: Context registers are saved. */
7587 -       init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7588 -
7589 -       /* Class 2 operation */
7590 -       append_operation(desc, ctx->class2_alg_type |
7591 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7592 +       if (desc_inline_query(DESC_AEAD_ENC_LEN +
7593 +                             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7594 +                             AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7595 +                             ARRAY_SIZE(data_len)) < 0)
7596 +               return -EINVAL;
7597  
7598 -       /* Read and write assoclen bytes */
7599 -       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7600 -       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7601 +       if (inl_mask & 1)
7602 +               ctx->adata.key_virt = ctx->key;
7603 +       else
7604 +               ctx->adata.key_dma = ctx->key_dma;
7605  
7606 -       /* Skip assoc data */
7607 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7608 +       if (inl_mask & 2)
7609 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7610 +       else
7611 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7612  
7613 -       /* read assoc before reading payload */
7614 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7615 -                                     FIFOLDST_VLF);
7616 +       ctx->adata.key_inline = !!(inl_mask & 1);
7617 +       ctx->cdata.key_inline = !!(inl_mask & 2);
7618  
7619 -       /* Load Counter into CONTEXT1 reg */
7620 -       if (is_rfc3686)
7621 -               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7622 -                                    LDST_SRCDST_BYTE_CONTEXT |
7623 -                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7624 -                                     LDST_OFFSET_SHIFT));
7625 -
7626 -       /* Class 1 operation */
7627 -       append_operation(desc, ctx->class1_alg_type |
7628 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7629 -
7630 -       /* Read and write cryptlen bytes */
7631 -       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7632 -       append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7633 -       aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
7634 -
7635 -       /* Write ICV */
7636 -       append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7637 -                        LDST_SRCDST_BYTE_CONTEXT);
7638 -
7639 -       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7640 -                                             desc_bytes(desc),
7641 -                                             DMA_TO_DEVICE);
7642 -       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7643 -               dev_err(jrdev, "unable to map shared descriptor\n");
7644 -               return -ENOMEM;
7645 -       }
7646 -#ifdef DEBUG
7647 -       print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
7648 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
7649 -                      desc_bytes(desc), 1);
7650 -#endif
7651 +       /* aead_encrypt shared descriptor */
7652 +       desc = ctx->sh_desc_enc;
7653 +       cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
7654 +                              ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
7655 +                              false);
7656 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7657 +                                  desc_bytes(desc), DMA_TO_DEVICE);
7658  
7659  skip_enc:
7660         /*
7661          * Job Descriptor and Shared Descriptors
7662          * must all fit into the 64-word Descriptor h/w Buffer
7663          */
7664 -       keys_fit_inline = false;
7665 -       if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7666 -           ctx->split_key_pad_len + ctx->enckeylen +
7667 -           (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7668 -           CAAM_DESC_BYTES_MAX)
7669 -               keys_fit_inline = true;
7670 -
7671 -       /* aead_decrypt shared descriptor */
7672 -       desc = ctx->sh_desc_dec;
7673 -
7674 -       /* Note: Context registers are saved. */
7675 -       init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7676 -
7677 -       /* Class 2 operation */
7678 -       append_operation(desc, ctx->class2_alg_type |
7679 -                        OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
7680 +       if (desc_inline_query(DESC_AEAD_DEC_LEN +
7681 +                             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7682 +                             AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7683 +                             ARRAY_SIZE(data_len)) < 0)
7684 +               return -EINVAL;
7685  
7686 -       /* Read and write assoclen bytes */
7687 -       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7688 -       if (alg->caam.geniv)
7689 -               append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
7690 +       if (inl_mask & 1)
7691 +               ctx->adata.key_virt = ctx->key;
7692         else
7693 -               append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7694 -
7695 -       /* Skip assoc data */
7696 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7697 +               ctx->adata.key_dma = ctx->key_dma;
7698  
7699 -       /* read assoc before reading payload */
7700 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7701 -                            KEY_VLF);
7702 -
7703 -       if (alg->caam.geniv) {
7704 -               append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
7705 -                               LDST_SRCDST_BYTE_CONTEXT |
7706 -                               (ctx1_iv_off << LDST_OFFSET_SHIFT));
7707 -               append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
7708 -                           (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
7709 -       }
7710 -
7711 -       /* Load Counter into CONTEXT1 reg */
7712 -       if (is_rfc3686)
7713 -               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7714 -                                    LDST_SRCDST_BYTE_CONTEXT |
7715 -                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7716 -                                     LDST_OFFSET_SHIFT));
7717 -
7718 -       /* Choose operation */
7719 -       if (ctr_mode)
7720 -               append_operation(desc, ctx->class1_alg_type |
7721 -                                OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
7722 +       if (inl_mask & 2)
7723 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7724         else
7725 -               append_dec_op1(desc, ctx->class1_alg_type);
7726 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7727  
7728 -       /* Read and write cryptlen bytes */
7729 -       append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7730 -       append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7731 -       aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
7732 +       ctx->adata.key_inline = !!(inl_mask & 1);
7733 +       ctx->cdata.key_inline = !!(inl_mask & 2);
7734  
7735 -       /* Load ICV */
7736 -       append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
7737 -                            FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
7738 -
7739 -       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
7740 -                                             desc_bytes(desc),
7741 -                                             DMA_TO_DEVICE);
7742 -       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
7743 -               dev_err(jrdev, "unable to map shared descriptor\n");
7744 -               return -ENOMEM;
7745 -       }
7746 -#ifdef DEBUG
7747 -       print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
7748 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
7749 -                      desc_bytes(desc), 1);
7750 -#endif
7751 +       /* aead_decrypt shared descriptor */
7752 +       desc = ctx->sh_desc_dec;
7753 +       cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
7754 +                              ctx->authsize, alg->caam.geniv, is_rfc3686,
7755 +                              nonce, ctx1_iv_off, false);
7756 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
7757 +                                  desc_bytes(desc), DMA_TO_DEVICE);
7758  
7759         if (!alg->caam.geniv)
7760                 goto skip_givenc;
7761 @@ -655,107 +277,32 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
7762          * Job Descriptor and Shared Descriptors
7763          * must all fit into the 64-word Descriptor h/w Buffer
7764          */
7765 -       keys_fit_inline = false;
7766 -       if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7767 -           ctx->split_key_pad_len + ctx->enckeylen +
7768 -           (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7769 -           CAAM_DESC_BYTES_MAX)
7770 -               keys_fit_inline = true;
7771 +       if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
7772 +                             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7773 +                             AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7774 +                             ARRAY_SIZE(data_len)) < 0)
7775 +               return -EINVAL;
7776  
7777 -       /* aead_givencrypt shared descriptor */
7778 -       desc = ctx->sh_desc_enc;
7779 +       if (inl_mask & 1)
7780 +               ctx->adata.key_virt = ctx->key;
7781 +       else
7782 +               ctx->adata.key_dma = ctx->key_dma;
7783  
7784 -       /* Note: Context registers are saved. */
7785 -       init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7786 +       if (inl_mask & 2)
7787 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7788 +       else
7789 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7790  
7791 -       if (is_rfc3686)
7792 -               goto copy_iv;
7793 -
7794 -       /* Generate IV */
7795 -       geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
7796 -               NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
7797 -               NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
7798 -       append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
7799 -                           LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
7800 -       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7801 -       append_move(desc, MOVE_WAITCOMP |
7802 -                   MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
7803 -                   (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
7804 -                   (ivsize << MOVE_LEN_SHIFT));
7805 -       append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
7806 -
7807 -copy_iv:
7808 -       /* Copy IV to class 1 context */
7809 -       append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
7810 -                   (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
7811 -                   (ivsize << MOVE_LEN_SHIFT));
7812 -
7813 -       /* Return to encryption */
7814 -       append_operation(desc, ctx->class2_alg_type |
7815 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7816 -
7817 -       /* Read and write assoclen bytes */
7818 -       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7819 -       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7820 -
7821 -       /* ivsize + cryptlen = seqoutlen - authsize */
7822 -       append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
7823 -
7824 -       /* Skip assoc data */
7825 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7826 -
7827 -       /* read assoc before reading payload */
7828 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7829 -                            KEY_VLF);
7830 -
7831 -       /* Copy iv from outfifo to class 2 fifo */
7832 -       moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
7833 -                NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
7834 -       append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
7835 -                           LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
7836 -       append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
7837 -                           LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
7838 -
7839 -       /* Load Counter into CONTEXT1 reg */
7840 -       if (is_rfc3686)
7841 -               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7842 -                                    LDST_SRCDST_BYTE_CONTEXT |
7843 -                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7844 -                                     LDST_OFFSET_SHIFT));
7845 -
7846 -       /* Class 1 operation */
7847 -       append_operation(desc, ctx->class1_alg_type |
7848 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7849 -
7850 -       /* Will write ivsize + cryptlen */
7851 -       append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7852 -
7853 -       /* Not need to reload iv */
7854 -       append_seq_fifo_load(desc, ivsize,
7855 -                            FIFOLD_CLASS_SKIP);
7856 -
7857 -       /* Will read cryptlen */
7858 -       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7859 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
7860 -                            FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
7861 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7862 -
7863 -       /* Write ICV */
7864 -       append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7865 -                        LDST_SRCDST_BYTE_CONTEXT);
7866 -
7867 -       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7868 -                                             desc_bytes(desc),
7869 -                                             DMA_TO_DEVICE);
7870 -       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7871 -               dev_err(jrdev, "unable to map shared descriptor\n");
7872 -               return -ENOMEM;
7873 -       }
7874 -#ifdef DEBUG
7875 -       print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
7876 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
7877 -                      desc_bytes(desc), 1);
7878 -#endif
7879 +       ctx->adata.key_inline = !!(inl_mask & 1);
7880 +       ctx->cdata.key_inline = !!(inl_mask & 2);
7881 +
7882 +       /* aead_givencrypt shared descriptor */
7883 +       desc = ctx->sh_desc_enc;
7884 +       cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
7885 +                                 ctx->authsize, is_rfc3686, nonce,
7886 +                                 ctx1_iv_off, false);
7887 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7888 +                                  desc_bytes(desc), DMA_TO_DEVICE);
7889  
7890  skip_givenc:
7891         return 0;
7892 @@ -776,12 +323,12 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
7893  {
7894         struct caam_ctx *ctx = crypto_aead_ctx(aead);
7895         struct device *jrdev = ctx->jrdev;
7896 -       bool keys_fit_inline = false;
7897 -       u32 *key_jump_cmd, *zero_payload_jump_cmd,
7898 -           *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
7899 +       unsigned int ivsize = crypto_aead_ivsize(aead);
7900         u32 *desc;
7901 +       int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
7902 +                       ctx->cdata.keylen;
7903  
7904 -       if (!ctx->enckeylen || !ctx->authsize)
7905 +       if (!ctx->cdata.keylen || !ctx->authsize)
7906                 return 0;
7907  
7908         /*
7909 @@ -789,175 +336,35 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
7910          * Job Descriptor and Shared Descriptor
7911          * must fit into the 64-word Descriptor h/w Buffer
7912          */
7913 -       if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
7914 -           ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
7915 -               keys_fit_inline = true;
7916 +       if (rem_bytes >= DESC_GCM_ENC_LEN) {
7917 +               ctx->cdata.key_inline = true;
7918 +               ctx->cdata.key_virt = ctx->key;
7919 +       } else {
7920 +               ctx->cdata.key_inline = false;
7921 +               ctx->cdata.key_dma = ctx->key_dma;
7922 +       }
7923  
7924         desc = ctx->sh_desc_enc;
7925 -
7926 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
7927 -
7928 -       /* skip key loading if they are loaded due to sharing */
7929 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7930 -                                  JUMP_COND_SHRD | JUMP_COND_SELF);
7931 -       if (keys_fit_inline)
7932 -               append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
7933 -                                 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7934 -       else
7935 -               append_key(desc, ctx->key_dma, ctx->enckeylen,
7936 -                          CLASS_1 | KEY_DEST_CLASS_REG);
7937 -       set_jump_tgt_here(desc, key_jump_cmd);
7938 -
7939 -       /* class 1 operation */
7940 -       append_operation(desc, ctx->class1_alg_type |
7941 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7942 -
7943 -       /* if assoclen + cryptlen is ZERO, skip to ICV write */
7944 -       append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7945 -       zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
7946 -                                                JUMP_COND_MATH_Z);
7947 -
7948 -       /* if assoclen is ZERO, skip reading the assoc data */
7949 -       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7950 -       zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
7951 -                                                JUMP_COND_MATH_Z);
7952 -
7953 -       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7954 -
7955 -       /* skip assoc data */
7956 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7957 -
7958 -       /* cryptlen = seqinlen - assoclen */
7959 -       append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
7960 -
7961 -       /* if cryptlen is ZERO jump to zero-payload commands */
7962 -       zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
7963 -                                           JUMP_COND_MATH_Z);
7964 -
7965 -       /* read assoc data */
7966 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
7967 -                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
7968 -       set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
7969 -
7970 -       append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7971 -
7972 -       /* write encrypted data */
7973 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
7974 -
7975 -       /* read payload data */
7976 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
7977 -                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
7978 -
7979 -       /* jump the zero-payload commands */
7980 -       append_jump(desc, JUMP_TEST_ALL | 2);
7981 -
7982 -       /* zero-payload commands */
7983 -       set_jump_tgt_here(desc, zero_payload_jump_cmd);
7984 -
7985 -       /* read assoc data */
7986 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
7987 -                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
7988 -
7989 -       /* There is no input data */
7990 -       set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
7991 -
7992 -       /* write ICV */
7993 -       append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
7994 -                        LDST_SRCDST_BYTE_CONTEXT);
7995 -
7996 -       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7997 -                                             desc_bytes(desc),
7998 -                                             DMA_TO_DEVICE);
7999 -       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8000 -               dev_err(jrdev, "unable to map shared descriptor\n");
8001 -               return -ENOMEM;
8002 -       }
8003 -#ifdef DEBUG
8004 -       print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
8005 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
8006 -                      desc_bytes(desc), 1);
8007 -#endif
8008 +       cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
8009 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8010 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8011  
8012         /*
8013          * Job Descriptor and Shared Descriptors
8014          * must all fit into the 64-word Descriptor h/w Buffer
8015          */
8016 -       keys_fit_inline = false;
8017 -       if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
8018 -           ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8019 -               keys_fit_inline = true;
8020 +       if (rem_bytes >= DESC_GCM_DEC_LEN) {
8021 +               ctx->cdata.key_inline = true;
8022 +               ctx->cdata.key_virt = ctx->key;
8023 +       } else {
8024 +               ctx->cdata.key_inline = false;
8025 +               ctx->cdata.key_dma = ctx->key_dma;
8026 +       }
8027  
8028         desc = ctx->sh_desc_dec;
8029 -
8030 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
8031 -
8032 -       /* skip key loading if they are loaded due to sharing */
8033 -       key_jump_cmd = append_jump(desc, JUMP_JSL |
8034 -                                  JUMP_TEST_ALL | JUMP_COND_SHRD |
8035 -                                  JUMP_COND_SELF);
8036 -       if (keys_fit_inline)
8037 -               append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8038 -                                 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8039 -       else
8040 -               append_key(desc, ctx->key_dma, ctx->enckeylen,
8041 -                          CLASS_1 | KEY_DEST_CLASS_REG);
8042 -       set_jump_tgt_here(desc, key_jump_cmd);
8043 -
8044 -       /* class 1 operation */
8045 -       append_operation(desc, ctx->class1_alg_type |
8046 -                        OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8047 -
8048 -       /* if assoclen is ZERO, skip reading the assoc data */
8049 -       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
8050 -       zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
8051 -                                                JUMP_COND_MATH_Z);
8052 -
8053 -       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8054 -
8055 -       /* skip assoc data */
8056 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8057 -
8058 -       /* read assoc data */
8059 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8060 -                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8061 -
8062 -       set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
8063 -
8064 -       /* cryptlen = seqoutlen - assoclen */
8065 -       append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8066 -
8067 -       /* jump to zero-payload command if cryptlen is zero */
8068 -       zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
8069 -                                           JUMP_COND_MATH_Z);
8070 -
8071 -       append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8072 -
8073 -       /* store encrypted data */
8074 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8075 -
8076 -       /* read payload data */
8077 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8078 -                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
8079 -
8080 -       /* zero-payload command */
8081 -       set_jump_tgt_here(desc, zero_payload_jump_cmd);
8082 -
8083 -       /* read ICV */
8084 -       append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8085 -                            FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8086 -
8087 -       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8088 -                                             desc_bytes(desc),
8089 -                                             DMA_TO_DEVICE);
8090 -       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8091 -               dev_err(jrdev, "unable to map shared descriptor\n");
8092 -               return -ENOMEM;
8093 -       }
8094 -#ifdef DEBUG
8095 -       print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
8096 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
8097 -                      desc_bytes(desc), 1);
8098 -#endif
8099 +       cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
8100 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8101 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8102  
8103         return 0;
8104  }
8105 @@ -976,11 +383,12 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
8106  {
8107         struct caam_ctx *ctx = crypto_aead_ctx(aead);
8108         struct device *jrdev = ctx->jrdev;
8109 -       bool keys_fit_inline = false;
8110 -       u32 *key_jump_cmd;
8111 +       unsigned int ivsize = crypto_aead_ivsize(aead);
8112         u32 *desc;
8113 +       int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
8114 +                       ctx->cdata.keylen;
8115  
8116 -       if (!ctx->enckeylen || !ctx->authsize)
8117 +       if (!ctx->cdata.keylen || !ctx->authsize)
8118                 return 0;
8119  
8120         /*
8121 @@ -988,148 +396,37 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
8122          * Job Descriptor and Shared Descriptor
8123          * must fit into the 64-word Descriptor h/w Buffer
8124          */
8125 -       if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
8126 -           ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8127 -               keys_fit_inline = true;
8128 +       if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
8129 +               ctx->cdata.key_inline = true;
8130 +               ctx->cdata.key_virt = ctx->key;
8131 +       } else {
8132 +               ctx->cdata.key_inline = false;
8133 +               ctx->cdata.key_dma = ctx->key_dma;
8134 +       }
8135  
8136         desc = ctx->sh_desc_enc;
8137 -
8138 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
8139 -
8140 -       /* Skip key loading if it is loaded due to sharing */
8141 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8142 -                                  JUMP_COND_SHRD);
8143 -       if (keys_fit_inline)
8144 -               append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8145 -                                 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8146 -       else
8147 -               append_key(desc, ctx->key_dma, ctx->enckeylen,
8148 -                          CLASS_1 | KEY_DEST_CLASS_REG);
8149 -       set_jump_tgt_here(desc, key_jump_cmd);
8150 -
8151 -       /* Class 1 operation */
8152 -       append_operation(desc, ctx->class1_alg_type |
8153 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8154 -
8155 -       append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
8156 -       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8157 -
8158 -       /* Read assoc data */
8159 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8160 -                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8161 -
8162 -       /* Skip IV */
8163 -       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
8164 -
8165 -       /* Will read cryptlen bytes */
8166 -       append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8167 -
8168 -       /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
8169 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
8170 -
8171 -       /* Skip assoc data */
8172 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8173 -
8174 -       /* cryptlen = seqoutlen - assoclen */
8175 -       append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
8176 -
8177 -       /* Write encrypted data */
8178 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8179 -
8180 -       /* Read payload data */
8181 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8182 -                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
8183 -
8184 -       /* Write ICV */
8185 -       append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
8186 -                        LDST_SRCDST_BYTE_CONTEXT);
8187 -
8188 -       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8189 -                                             desc_bytes(desc),
8190 -                                             DMA_TO_DEVICE);
8191 -       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8192 -               dev_err(jrdev, "unable to map shared descriptor\n");
8193 -               return -ENOMEM;
8194 -       }
8195 -#ifdef DEBUG
8196 -       print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
8197 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
8198 -                      desc_bytes(desc), 1);
8199 -#endif
8200 +       cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
8201 +                                 false);
8202 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8203 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8204  
8205         /*
8206          * Job Descriptor and Shared Descriptors
8207          * must all fit into the 64-word Descriptor h/w Buffer
8208          */
8209 -       keys_fit_inline = false;
8210 -       if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
8211 -           ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8212 -               keys_fit_inline = true;
8213 +       if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
8214 +               ctx->cdata.key_inline = true;
8215 +               ctx->cdata.key_virt = ctx->key;
8216 +       } else {
8217 +               ctx->cdata.key_inline = false;
8218 +               ctx->cdata.key_dma = ctx->key_dma;
8219 +       }
8220  
8221         desc = ctx->sh_desc_dec;
8222 -
8223 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
8224 -
8225 -       /* Skip key loading if it is loaded due to sharing */
8226 -       key_jump_cmd = append_jump(desc, JUMP_JSL |
8227 -                                  JUMP_TEST_ALL | JUMP_COND_SHRD);
8228 -       if (keys_fit_inline)
8229 -               append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8230 -                                 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8231 -       else
8232 -               append_key(desc, ctx->key_dma, ctx->enckeylen,
8233 -                          CLASS_1 | KEY_DEST_CLASS_REG);
8234 -       set_jump_tgt_here(desc, key_jump_cmd);
8235 -
8236 -       /* Class 1 operation */
8237 -       append_operation(desc, ctx->class1_alg_type |
8238 -                        OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8239 -
8240 -       append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
8241 -       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8242 -
8243 -       /* Read assoc data */
8244 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8245 -                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8246 -
8247 -       /* Skip IV */
8248 -       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
8249 -
8250 -       /* Will read cryptlen bytes */
8251 -       append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
8252 -
8253 -       /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
8254 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
8255 -
8256 -       /* Skip assoc data */
8257 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8258 -
8259 -       /* Will write cryptlen bytes */
8260 -       append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8261 -
8262 -       /* Store payload data */
8263 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8264 -
8265 -       /* Read encrypted data */
8266 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8267 -                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
8268 -
8269 -       /* Read ICV */
8270 -       append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8271 -                            FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8272 -
8273 -       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8274 -                                             desc_bytes(desc),
8275 -                                             DMA_TO_DEVICE);
8276 -       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8277 -               dev_err(jrdev, "unable to map shared descriptor\n");
8278 -               return -ENOMEM;
8279 -       }
8280 -#ifdef DEBUG
8281 -       print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
8282 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
8283 -                      desc_bytes(desc), 1);
8284 -#endif
8285 +       cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
8286 +                                 false);
8287 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8288 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8289  
8290         return 0;
8291  }
8292 @@ -1149,12 +446,12 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
8293  {
8294         struct caam_ctx *ctx = crypto_aead_ctx(aead);
8295         struct device *jrdev = ctx->jrdev;
8296 -       bool keys_fit_inline = false;
8297 -       u32 *key_jump_cmd;
8298 -       u32 *read_move_cmd, *write_move_cmd;
8299 +       unsigned int ivsize = crypto_aead_ivsize(aead);
8300         u32 *desc;
8301 +       int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
8302 +                       ctx->cdata.keylen;
8303  
8304 -       if (!ctx->enckeylen || !ctx->authsize)
8305 +       if (!ctx->cdata.keylen || !ctx->authsize)
8306                 return 0;
8307  
8308         /*
8309 @@ -1162,151 +459,37 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
8310          * Job Descriptor and Shared Descriptor
8311          * must fit into the 64-word Descriptor h/w Buffer
8312          */
8313 -       if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
8314 -           ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8315 -               keys_fit_inline = true;
8316 -
8317 -       desc = ctx->sh_desc_enc;
8318 -
8319 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
8320 -
8321 -       /* Skip key loading if it is loaded due to sharing */
8322 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8323 -                                  JUMP_COND_SHRD);
8324 -       if (keys_fit_inline)
8325 -               append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8326 -                                 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8327 -       else
8328 -               append_key(desc, ctx->key_dma, ctx->enckeylen,
8329 -                          CLASS_1 | KEY_DEST_CLASS_REG);
8330 -       set_jump_tgt_here(desc, key_jump_cmd);
8331 -
8332 -       /* Class 1 operation */
8333 -       append_operation(desc, ctx->class1_alg_type |
8334 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8335 -
8336 -       /* assoclen + cryptlen = seqinlen */
8337 -       append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
8338 -
8339 -       /*
8340 -        * MOVE_LEN opcode is not available in all SEC HW revisions,
8341 -        * thus need to do some magic, i.e. self-patch the descriptor
8342 -        * buffer.
8343 -        */
8344 -       read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
8345 -                                   (0x6 << MOVE_LEN_SHIFT));
8346 -       write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
8347 -                                    (0x8 << MOVE_LEN_SHIFT));
8348 -
8349 -       /* Will read assoclen + cryptlen bytes */
8350 -       append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8351 -
8352 -       /* Will write assoclen + cryptlen bytes */
8353 -       append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8354 -
8355 -       /* Read and write assoclen + cryptlen bytes */
8356 -       aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
8357 -
8358 -       set_move_tgt_here(desc, read_move_cmd);
8359 -       set_move_tgt_here(desc, write_move_cmd);
8360 -       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8361 -       /* Move payload data to OFIFO */
8362 -       append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
8363 -
8364 -       /* Write ICV */
8365 -       append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
8366 -                        LDST_SRCDST_BYTE_CONTEXT);
8367 -
8368 -       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8369 -                                             desc_bytes(desc),
8370 -                                             DMA_TO_DEVICE);
8371 -       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8372 -               dev_err(jrdev, "unable to map shared descriptor\n");
8373 -               return -ENOMEM;
8374 +       if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
8375 +               ctx->cdata.key_inline = true;
8376 +               ctx->cdata.key_virt = ctx->key;
8377 +       } else {
8378 +               ctx->cdata.key_inline = false;
8379 +               ctx->cdata.key_dma = ctx->key_dma;
8380         }
8381 -#ifdef DEBUG
8382 -       print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
8383 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
8384 -                      desc_bytes(desc), 1);
8385 -#endif
8386 +
8387 +       desc = ctx->sh_desc_enc;
8388 +       cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
8389 +                                 false);
8390 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8391 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8392  
8393         /*
8394          * Job Descriptor and Shared Descriptors
8395          * must all fit into the 64-word Descriptor h/w Buffer
8396          */
8397 -       keys_fit_inline = false;
8398 -       if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
8399 -           ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8400 -               keys_fit_inline = true;
8401 +       if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
8402 +               ctx->cdata.key_inline = true;
8403 +               ctx->cdata.key_virt = ctx->key;
8404 +       } else {
8405 +               ctx->cdata.key_inline = false;
8406 +               ctx->cdata.key_dma = ctx->key_dma;
8407 +       }
8408  
8409         desc = ctx->sh_desc_dec;
8410 -
8411 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
8412 -
8413 -       /* Skip key loading if it is loaded due to sharing */
8414 -       key_jump_cmd = append_jump(desc, JUMP_JSL |
8415 -                                  JUMP_TEST_ALL | JUMP_COND_SHRD);
8416 -       if (keys_fit_inline)
8417 -               append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8418 -                                 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8419 -       else
8420 -               append_key(desc, ctx->key_dma, ctx->enckeylen,
8421 -                          CLASS_1 | KEY_DEST_CLASS_REG);
8422 -       set_jump_tgt_here(desc, key_jump_cmd);
8423 -
8424 -       /* Class 1 operation */
8425 -       append_operation(desc, ctx->class1_alg_type |
8426 -                        OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8427 -
8428 -       /* assoclen + cryptlen = seqoutlen */
8429 -       append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8430 -
8431 -       /*
8432 -        * MOVE_LEN opcode is not available in all SEC HW revisions,
8433 -        * thus need to do some magic, i.e. self-patch the descriptor
8434 -        * buffer.
8435 -        */
8436 -       read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
8437 -                                   (0x6 << MOVE_LEN_SHIFT));
8438 -       write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
8439 -                                    (0x8 << MOVE_LEN_SHIFT));
8440 -
8441 -       /* Will read assoclen + cryptlen bytes */
8442 -       append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8443 -
8444 -       /* Will write assoclen + cryptlen bytes */
8445 -       append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8446 -
8447 -       /* Store payload data */
8448 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8449 -
8450 -       /* In-snoop assoclen + cryptlen data */
8451 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
8452 -                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
8453 -
8454 -       set_move_tgt_here(desc, read_move_cmd);
8455 -       set_move_tgt_here(desc, write_move_cmd);
8456 -       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8457 -       /* Move payload data to OFIFO */
8458 -       append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
8459 -       append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
8460 -
8461 -       /* Read ICV */
8462 -       append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8463 -                            FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8464 -
8465 -       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8466 -                                             desc_bytes(desc),
8467 -                                             DMA_TO_DEVICE);
8468 -       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8469 -               dev_err(jrdev, "unable to map shared descriptor\n");
8470 -               return -ENOMEM;
8471 -       }
8472 -#ifdef DEBUG
8473 -       print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
8474 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
8475 -                      desc_bytes(desc), 1);
8476 -#endif
8477 +       cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
8478 +                                 false);
8479 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8480 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8481  
8482         return 0;
8483  }
8484 @@ -1322,19 +505,9 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
8485         return 0;
8486  }
8487  
8488 -static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
8489 -                             u32 authkeylen)
8490 -{
8491 -       return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
8492 -                              ctx->split_key_pad_len, key_in, authkeylen,
8493 -                              ctx->alg_op);
8494 -}
8495 -
8496  static int aead_setkey(struct crypto_aead *aead,
8497                                const u8 *key, unsigned int keylen)
8498  {
8499 -       /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
8500 -       static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
8501         struct caam_ctx *ctx = crypto_aead_ctx(aead);
8502         struct device *jrdev = ctx->jrdev;
8503         struct crypto_authenc_keys keys;
8504 @@ -1343,53 +516,32 @@ static int aead_setkey(struct crypto_aead *aead,
8505         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
8506                 goto badkey;
8507  
8508 -       /* Pick class 2 key length from algorithm submask */
8509 -       ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
8510 -                                     OP_ALG_ALGSEL_SHIFT] * 2;
8511 -       ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
8512 -
8513 -       if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
8514 -               goto badkey;
8515 -
8516  #ifdef DEBUG
8517         printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
8518                keys.authkeylen + keys.enckeylen, keys.enckeylen,
8519                keys.authkeylen);
8520 -       printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
8521 -              ctx->split_key_len, ctx->split_key_pad_len);
8522         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8523                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
8524  #endif
8525  
8526 -       ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
8527 +       ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
8528 +                           keys.authkeylen, CAAM_MAX_KEY_SIZE -
8529 +                           keys.enckeylen);
8530         if (ret) {
8531                 goto badkey;
8532         }
8533  
8534         /* postpend encryption key to auth split key */
8535 -       memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
8536 -
8537 -       ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
8538 -                                     keys.enckeylen, DMA_TO_DEVICE);
8539 -       if (dma_mapping_error(jrdev, ctx->key_dma)) {
8540 -               dev_err(jrdev, "unable to map key i/o memory\n");
8541 -               return -ENOMEM;
8542 -       }
8543 +       memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
8544 +       dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
8545 +                                  keys.enckeylen, DMA_TO_DEVICE);
8546  #ifdef DEBUG
8547         print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
8548                        DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
8549 -                      ctx->split_key_pad_len + keys.enckeylen, 1);
8550 +                      ctx->adata.keylen_pad + keys.enckeylen, 1);
8551  #endif
8552 -
8553 -       ctx->enckeylen = keys.enckeylen;
8554 -
8555 -       ret = aead_set_sh_desc(aead);
8556 -       if (ret) {
8557 -               dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
8558 -                                keys.enckeylen, DMA_TO_DEVICE);
8559 -       }
8560 -
8561 -       return ret;
8562 +       ctx->cdata.keylen = keys.enckeylen;
8563 +       return aead_set_sh_desc(aead);
8564  badkey:
8565         crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
8566         return -EINVAL;
8567 @@ -1400,7 +552,6 @@ static int gcm_setkey(struct crypto_aead *aead,
8568  {
8569         struct caam_ctx *ctx = crypto_aead_ctx(aead);
8570         struct device *jrdev = ctx->jrdev;
8571 -       int ret = 0;
8572  
8573  #ifdef DEBUG
8574         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8575 @@ -1408,21 +559,10 @@ static int gcm_setkey(struct crypto_aead *aead,
8576  #endif
8577  
8578         memcpy(ctx->key, key, keylen);
8579 -       ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
8580 -                                     DMA_TO_DEVICE);
8581 -       if (dma_mapping_error(jrdev, ctx->key_dma)) {
8582 -               dev_err(jrdev, "unable to map key i/o memory\n");
8583 -               return -ENOMEM;
8584 -       }
8585 -       ctx->enckeylen = keylen;
8586 -
8587 -       ret = gcm_set_sh_desc(aead);
8588 -       if (ret) {
8589 -               dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8590 -                                DMA_TO_DEVICE);
8591 -       }
8592 +       dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
8593 +       ctx->cdata.keylen = keylen;
8594  
8595 -       return ret;
8596 +       return gcm_set_sh_desc(aead);
8597  }
8598  
8599  static int rfc4106_setkey(struct crypto_aead *aead,
8600 @@ -1430,7 +570,6 @@ static int rfc4106_setkey(struct crypto_aead *aead,
8601  {
8602         struct caam_ctx *ctx = crypto_aead_ctx(aead);
8603         struct device *jrdev = ctx->jrdev;
8604 -       int ret = 0;
8605  
8606         if (keylen < 4)
8607                 return -EINVAL;
8608 @@ -1446,22 +585,10 @@ static int rfc4106_setkey(struct crypto_aead *aead,
8609          * The last four bytes of the key material are used as the salt value
8610          * in the nonce. Update the AES key length.
8611          */
8612 -       ctx->enckeylen = keylen - 4;
8613 -
8614 -       ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
8615 -                                     DMA_TO_DEVICE);
8616 -       if (dma_mapping_error(jrdev, ctx->key_dma)) {
8617 -               dev_err(jrdev, "unable to map key i/o memory\n");
8618 -               return -ENOMEM;
8619 -       }
8620 -
8621 -       ret = rfc4106_set_sh_desc(aead);
8622 -       if (ret) {
8623 -               dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8624 -                                DMA_TO_DEVICE);
8625 -       }
8626 -
8627 -       return ret;
8628 +       ctx->cdata.keylen = keylen - 4;
8629 +       dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
8630 +                                  DMA_TO_DEVICE);
8631 +       return rfc4106_set_sh_desc(aead);
8632  }
8633  
8634  static int rfc4543_setkey(struct crypto_aead *aead,
8635 @@ -1469,7 +596,6 @@ static int rfc4543_setkey(struct crypto_aead *aead,
8636  {
8637         struct caam_ctx *ctx = crypto_aead_ctx(aead);
8638         struct device *jrdev = ctx->jrdev;
8639 -       int ret = 0;
8640  
8641         if (keylen < 4)
8642                 return -EINVAL;
8643 @@ -1485,43 +611,28 @@ static int rfc4543_setkey(struct crypto_aead *aead,
8644          * The last four bytes of the key material are used as the salt value
8645          * in the nonce. Update the AES key length.
8646          */
8647 -       ctx->enckeylen = keylen - 4;
8648 -
8649 -       ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
8650 -                                     DMA_TO_DEVICE);
8651 -       if (dma_mapping_error(jrdev, ctx->key_dma)) {
8652 -               dev_err(jrdev, "unable to map key i/o memory\n");
8653 -               return -ENOMEM;
8654 -       }
8655 -
8656 -       ret = rfc4543_set_sh_desc(aead);
8657 -       if (ret) {
8658 -               dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8659 -                                DMA_TO_DEVICE);
8660 -       }
8661 -
8662 -       return ret;
8663 +       ctx->cdata.keylen = keylen - 4;
8664 +       dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
8665 +                                  DMA_TO_DEVICE);
8666 +       return rfc4543_set_sh_desc(aead);
8667  }
8668  
8669  static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
8670                              const u8 *key, unsigned int keylen)
8671  {
8672         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
8673 -       struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
8674         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
8675         const char *alg_name = crypto_tfm_alg_name(tfm);
8676         struct device *jrdev = ctx->jrdev;
8677 -       int ret = 0;
8678 -       u32 *key_jump_cmd;
8679 +       unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
8680         u32 *desc;
8681 -       u8 *nonce;
8682 -       u32 geniv;
8683         u32 ctx1_iv_off = 0;
8684 -       const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
8685 +       const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
8686                                OP_ALG_AAI_CTR_MOD128);
8687         const bool is_rfc3686 = (ctr_mode &&
8688                                  (strstr(alg_name, "rfc3686") != NULL));
8689  
8690 +       memcpy(ctx->key, key, keylen);
8691  #ifdef DEBUG
8692         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8693                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
8694 @@ -1544,215 +655,33 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
8695                 keylen -= CTR_RFC3686_NONCE_SIZE;
8696         }
8697  
8698 -       memcpy(ctx->key, key, keylen);
8699 -       ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
8700 -                                     DMA_TO_DEVICE);
8701 -       if (dma_mapping_error(jrdev, ctx->key_dma)) {
8702 -               dev_err(jrdev, "unable to map key i/o memory\n");
8703 -               return -ENOMEM;
8704 -       }
8705 -       ctx->enckeylen = keylen;
8706 +       dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
8707 +       ctx->cdata.keylen = keylen;
8708 +       ctx->cdata.key_virt = ctx->key;
8709 +       ctx->cdata.key_inline = true;
8710  
8711         /* ablkcipher_encrypt shared descriptor */
8712         desc = ctx->sh_desc_enc;
8713 -       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8714 -       /* Skip if already shared */
8715 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8716 -                                  JUMP_COND_SHRD);
8717 -
8718 -       /* Load class1 key only */
8719 -       append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8720 -                         ctx->enckeylen, CLASS_1 |
8721 -                         KEY_DEST_CLASS_REG);
8722 -
8723 -       /* Load nonce into CONTEXT1 reg */
8724 -       if (is_rfc3686) {
8725 -               nonce = (u8 *)key + keylen;
8726 -               append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8727 -                                  LDST_CLASS_IND_CCB |
8728 -                                  LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8729 -               append_move(desc, MOVE_WAITCOMP |
8730 -                           MOVE_SRC_OUTFIFO |
8731 -                           MOVE_DEST_CLASS1CTX |
8732 -                           (16 << MOVE_OFFSET_SHIFT) |
8733 -                           (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8734 -       }
8735 -
8736 -       set_jump_tgt_here(desc, key_jump_cmd);
8737 +       cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
8738 +                                    ctx1_iv_off);
8739 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8740 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8741  
8742 -       /* Load iv */
8743 -       append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
8744 -                       LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
8745 -
8746 -       /* Load counter into CONTEXT1 reg */
8747 -       if (is_rfc3686)
8748 -               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8749 -                                    LDST_SRCDST_BYTE_CONTEXT |
8750 -                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8751 -                                     LDST_OFFSET_SHIFT));
8752 -
8753 -       /* Load operation */
8754 -       append_operation(desc, ctx->class1_alg_type |
8755 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8756 -
8757 -       /* Perform operation */
8758 -       ablkcipher_append_src_dst(desc);
8759 -
8760 -       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8761 -                                             desc_bytes(desc),
8762 -                                             DMA_TO_DEVICE);
8763 -       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8764 -               dev_err(jrdev, "unable to map shared descriptor\n");
8765 -               return -ENOMEM;
8766 -       }
8767 -#ifdef DEBUG
8768 -       print_hex_dump(KERN_ERR,
8769 -                      "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
8770 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
8771 -                      desc_bytes(desc), 1);
8772 -#endif
8773         /* ablkcipher_decrypt shared descriptor */
8774         desc = ctx->sh_desc_dec;
8775 +       cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
8776 +                                    ctx1_iv_off);
8777 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8778 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8779  
8780 -       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8781 -       /* Skip if already shared */
8782 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8783 -                                  JUMP_COND_SHRD);
8784 -
8785 -       /* Load class1 key only */
8786 -       append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8787 -                         ctx->enckeylen, CLASS_1 |
8788 -                         KEY_DEST_CLASS_REG);
8789 -
8790 -       /* Load nonce into CONTEXT1 reg */
8791 -       if (is_rfc3686) {
8792 -               nonce = (u8 *)key + keylen;
8793 -               append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8794 -                                  LDST_CLASS_IND_CCB |
8795 -                                  LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8796 -               append_move(desc, MOVE_WAITCOMP |
8797 -                           MOVE_SRC_OUTFIFO |
8798 -                           MOVE_DEST_CLASS1CTX |
8799 -                           (16 << MOVE_OFFSET_SHIFT) |
8800 -                           (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8801 -       }
8802 -
8803 -       set_jump_tgt_here(desc, key_jump_cmd);
8804 -
8805 -       /* load IV */
8806 -       append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
8807 -                       LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
8808 -
8809 -       /* Load counter into CONTEXT1 reg */
8810 -       if (is_rfc3686)
8811 -               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8812 -                                    LDST_SRCDST_BYTE_CONTEXT |
8813 -                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8814 -                                     LDST_OFFSET_SHIFT));
8815 -
8816 -       /* Choose operation */
8817 -       if (ctr_mode)
8818 -               append_operation(desc, ctx->class1_alg_type |
8819 -                                OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
8820 -       else
8821 -               append_dec_op1(desc, ctx->class1_alg_type);
8822 -
8823 -       /* Perform operation */
8824 -       ablkcipher_append_src_dst(desc);
8825 -
8826 -       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8827 -                                             desc_bytes(desc),
8828 -                                             DMA_TO_DEVICE);
8829 -       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8830 -               dev_err(jrdev, "unable to map shared descriptor\n");
8831 -               return -ENOMEM;
8832 -       }
8833 -
8834 -#ifdef DEBUG
8835 -       print_hex_dump(KERN_ERR,
8836 -                      "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
8837 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
8838 -                      desc_bytes(desc), 1);
8839 -#endif
8840         /* ablkcipher_givencrypt shared descriptor */
8841         desc = ctx->sh_desc_givenc;
8842 +       cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
8843 +                                       ctx1_iv_off);
8844 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
8845 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8846  
8847 -       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8848 -       /* Skip if already shared */
8849 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8850 -                                  JUMP_COND_SHRD);
8851 -
8852 -       /* Load class1 key only */
8853 -       append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8854 -                         ctx->enckeylen, CLASS_1 |
8855 -                         KEY_DEST_CLASS_REG);
8856 -
8857 -       /* Load Nonce into CONTEXT1 reg */
8858 -       if (is_rfc3686) {
8859 -               nonce = (u8 *)key + keylen;
8860 -               append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8861 -                                  LDST_CLASS_IND_CCB |
8862 -                                  LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8863 -               append_move(desc, MOVE_WAITCOMP |
8864 -                           MOVE_SRC_OUTFIFO |
8865 -                           MOVE_DEST_CLASS1CTX |
8866 -                           (16 << MOVE_OFFSET_SHIFT) |
8867 -                           (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8868 -       }
8869 -       set_jump_tgt_here(desc, key_jump_cmd);
8870 -
8871 -       /* Generate IV */
8872 -       geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
8873 -               NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
8874 -               NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
8875 -       append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
8876 -                           LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
8877 -       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8878 -       append_move(desc, MOVE_WAITCOMP |
8879 -                   MOVE_SRC_INFIFO |
8880 -                   MOVE_DEST_CLASS1CTX |
8881 -                   (crt->ivsize << MOVE_LEN_SHIFT) |
8882 -                   (ctx1_iv_off << MOVE_OFFSET_SHIFT));
8883 -       append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
8884 -
8885 -       /* Copy generated IV to memory */
8886 -       append_seq_store(desc, crt->ivsize,
8887 -                        LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
8888 -                        (ctx1_iv_off << LDST_OFFSET_SHIFT));
8889 -
8890 -       /* Load Counter into CONTEXT1 reg */
8891 -       if (is_rfc3686)
8892 -               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8893 -                                    LDST_SRCDST_BYTE_CONTEXT |
8894 -                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8895 -                                     LDST_OFFSET_SHIFT));
8896 -
8897 -       if (ctx1_iv_off)
8898 -               append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
8899 -                           (1 << JUMP_OFFSET_SHIFT));
8900 -
8901 -       /* Load operation */
8902 -       append_operation(desc, ctx->class1_alg_type |
8903 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8904 -
8905 -       /* Perform operation */
8906 -       ablkcipher_append_src_dst(desc);
8907 -
8908 -       ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
8909 -                                                desc_bytes(desc),
8910 -                                                DMA_TO_DEVICE);
8911 -       if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
8912 -               dev_err(jrdev, "unable to map shared descriptor\n");
8913 -               return -ENOMEM;
8914 -       }
8915 -#ifdef DEBUG
8916 -       print_hex_dump(KERN_ERR,
8917 -                      "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
8918 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
8919 -                      desc_bytes(desc), 1);
8920 -#endif
8921 -
8922 -       return ret;
8923 +       return 0;
8924  }
8925  
8926  static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
8927 @@ -1760,8 +689,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
8928  {
8929         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
8930         struct device *jrdev = ctx->jrdev;
8931 -       u32 *key_jump_cmd, *desc;
8932 -       __be64 sector_size = cpu_to_be64(512);
8933 +       u32 *desc;
8934  
8935         if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
8936                 crypto_ablkcipher_set_flags(ablkcipher,
8937 @@ -1771,126 +699,38 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
8938         }
8939  
8940         memcpy(ctx->key, key, keylen);
8941 -       ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
8942 -       if (dma_mapping_error(jrdev, ctx->key_dma)) {
8943 -               dev_err(jrdev, "unable to map key i/o memory\n");
8944 -               return -ENOMEM;
8945 -       }
8946 -       ctx->enckeylen = keylen;
8947 +       dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
8948 +       ctx->cdata.keylen = keylen;
8949 +       ctx->cdata.key_virt = ctx->key;
8950 +       ctx->cdata.key_inline = true;
8951  
8952         /* xts_ablkcipher_encrypt shared descriptor */
8953         desc = ctx->sh_desc_enc;
8954 -       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8955 -       /* Skip if already shared */
8956 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8957 -                                  JUMP_COND_SHRD);
8958 -
8959 -       /* Load class1 keys only */
8960 -       append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8961 -                         ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8962 -
8963 -       /* Load sector size with index 40 bytes (0x28) */
8964 -       append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
8965 -                  LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
8966 -       append_data(desc, (void *)&sector_size, 8);
8967 -
8968 -       set_jump_tgt_here(desc, key_jump_cmd);
8969 -
8970 -       /*
8971 -        * create sequence for loading the sector index
8972 -        * Upper 8B of IV - will be used as sector index
8973 -        * Lower 8B of IV - will be discarded
8974 -        */
8975 -       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
8976 -                  LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
8977 -       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
8978 -
8979 -       /* Load operation */
8980 -       append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
8981 -                        OP_ALG_ENCRYPT);
8982 -
8983 -       /* Perform operation */
8984 -       ablkcipher_append_src_dst(desc);
8985 -
8986 -       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
8987 -                                             DMA_TO_DEVICE);
8988 -       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8989 -               dev_err(jrdev, "unable to map shared descriptor\n");
8990 -               return -ENOMEM;
8991 -       }
8992 -#ifdef DEBUG
8993 -       print_hex_dump(KERN_ERR,
8994 -                      "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
8995 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
8996 -#endif
8997 +       cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
8998 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8999 +                                  desc_bytes(desc), DMA_TO_DEVICE);
9000  
9001         /* xts_ablkcipher_decrypt shared descriptor */
9002         desc = ctx->sh_desc_dec;
9003 -
9004 -       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
9005 -       /* Skip if already shared */
9006 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
9007 -                                  JUMP_COND_SHRD);
9008 -
9009 -       /* Load class1 key only */
9010 -       append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
9011 -                         ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
9012 -
9013 -       /* Load sector size with index 40 bytes (0x28) */
9014 -       append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
9015 -                  LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
9016 -       append_data(desc, (void *)&sector_size, 8);
9017 -
9018 -       set_jump_tgt_here(desc, key_jump_cmd);
9019 -
9020 -       /*
9021 -        * create sequence for loading the sector index
9022 -        * Upper 8B of IV - will be used as sector index
9023 -        * Lower 8B of IV - will be discarded
9024 -        */
9025 -       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
9026 -                  LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
9027 -       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
9028 -
9029 -       /* Load operation */
9030 -       append_dec_op1(desc, ctx->class1_alg_type);
9031 -
9032 -       /* Perform operation */
9033 -       ablkcipher_append_src_dst(desc);
9034 -
9035 -       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
9036 -                                             DMA_TO_DEVICE);
9037 -       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
9038 -               dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
9039 -                                desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
9040 -               dev_err(jrdev, "unable to map shared descriptor\n");
9041 -               return -ENOMEM;
9042 -       }
9043 -#ifdef DEBUG
9044 -       print_hex_dump(KERN_ERR,
9045 -                      "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
9046 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
9047 -#endif
9048 +       cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
9049 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
9050 +                                  desc_bytes(desc), DMA_TO_DEVICE);
9051  
9052         return 0;
9053  }
9054  
9055  /*
9056   * aead_edesc - s/w-extended aead descriptor
9057 - * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
9058 - * @src_nents: number of segments in input scatterlist
9059 - * @dst_nents: number of segments in output scatterlist
9060 - * @iv_dma: dma address of iv for checking continuity and link table
9061 - * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
9062 + * @src_nents: number of segments in input s/w scatterlist
9063 + * @dst_nents: number of segments in output s/w scatterlist
9064   * @sec4_sg_bytes: length of dma mapped sec4_sg space
9065   * @sec4_sg_dma: bus physical mapped address of h/w link table
9066 + * @sec4_sg: pointer to h/w link table
9067   * @hw_desc: the h/w job descriptor followed by any referenced link tables
9068   */
9069  struct aead_edesc {
9070 -       int assoc_nents;
9071         int src_nents;
9072         int dst_nents;
9073 -       dma_addr_t iv_dma;
9074         int sec4_sg_bytes;
9075         dma_addr_t sec4_sg_dma;
9076         struct sec4_sg_entry *sec4_sg;
9077 @@ -1899,12 +739,12 @@ struct aead_edesc {
9078  
9079  /*
9080   * ablkcipher_edesc - s/w-extended ablkcipher descriptor
9081 - * @src_nents: number of segments in input scatterlist
9082 - * @dst_nents: number of segments in output scatterlist
9083 + * @src_nents: number of segments in input s/w scatterlist
9084 + * @dst_nents: number of segments in output s/w scatterlist
9085   * @iv_dma: dma address of iv for checking continuity and link table
9086 - * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
9087   * @sec4_sg_bytes: length of dma mapped sec4_sg space
9088   * @sec4_sg_dma: bus physical mapped address of h/w link table
9089 + * @sec4_sg: pointer to h/w link table
9090   * @hw_desc: the h/w job descriptor followed by any referenced link tables
9091   */
9092  struct ablkcipher_edesc {
9093 @@ -1924,10 +764,11 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
9094                        int sec4_sg_bytes)
9095  {
9096         if (dst != src) {
9097 -               dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
9098 -               dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
9099 +               if (src_nents)
9100 +                       dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
9101 +               dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
9102         } else {
9103 -               dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
9104 +               dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
9105         }
9106  
9107         if (iv_dma)
9108 @@ -2021,8 +862,7 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
9109         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
9110  #endif
9111  
9112 -       edesc = (struct ablkcipher_edesc *)((char *)desc -
9113 -                offsetof(struct ablkcipher_edesc, hw_desc));
9114 +       edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
9115  
9116         if (err)
9117                 caam_jr_strstatus(jrdev, err);
9118 @@ -2031,10 +871,10 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
9119         print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
9120                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9121                        edesc->src_nents > 1 ? 100 : ivsize, 1);
9122 -       dbg_dump_sg(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
9123 -                   DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9124 -                   edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
9125  #endif
9126 +       caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
9127 +                    DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9128 +                    edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
9129  
9130         ablkcipher_unmap(jrdev, edesc, req);
9131  
9132 @@ -2062,8 +902,7 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
9133         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
9134  #endif
9135  
9136 -       edesc = (struct ablkcipher_edesc *)((char *)desc -
9137 -                offsetof(struct ablkcipher_edesc, hw_desc));
9138 +       edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
9139         if (err)
9140                 caam_jr_strstatus(jrdev, err);
9141  
9142 @@ -2071,10 +910,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
9143         print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
9144                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9145                        ivsize, 1);
9146 -       dbg_dump_sg(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
9147 -                   DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9148 -                   edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
9149  #endif
9150 +       caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
9151 +                    DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9152 +                    edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
9153  
9154         ablkcipher_unmap(jrdev, edesc, req);
9155  
9156 @@ -2114,7 +953,7 @@ static void init_aead_job(struct aead_request *req,
9157         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9158  
9159         if (all_contig) {
9160 -               src_dma = sg_dma_address(req->src);
9161 +               src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0;
9162                 in_options = 0;
9163         } else {
9164                 src_dma = edesc->sec4_sg_dma;
9165 @@ -2129,7 +968,7 @@ static void init_aead_job(struct aead_request *req,
9166         out_options = in_options;
9167  
9168         if (unlikely(req->src != req->dst)) {
9169 -               if (!edesc->dst_nents) {
9170 +               if (edesc->dst_nents == 1) {
9171                         dst_dma = sg_dma_address(req->dst);
9172                 } else {
9173                         dst_dma = edesc->sec4_sg_dma +
9174 @@ -2175,7 +1014,7 @@ static void init_gcm_job(struct aead_request *req,
9175                          FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
9176         /* Append Salt */
9177         if (!generic_gcm)
9178 -               append_data(desc, ctx->key + ctx->enckeylen, 4);
9179 +               append_data(desc, ctx->key + ctx->cdata.keylen, 4);
9180         /* Append IV */
9181         append_data(desc, req->iv, ivsize);
9182         /* End of blank commands */
9183 @@ -2190,7 +1029,7 @@ static void init_authenc_job(struct aead_request *req,
9184                                                  struct caam_aead_alg, aead);
9185         unsigned int ivsize = crypto_aead_ivsize(aead);
9186         struct caam_ctx *ctx = crypto_aead_ctx(aead);
9187 -       const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
9188 +       const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
9189                                OP_ALG_AAI_CTR_MOD128);
9190         const bool is_rfc3686 = alg->caam.rfc3686;
9191         u32 *desc = edesc->hw_desc;
9192 @@ -2236,16 +1075,15 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
9193         int len, sec4_sg_index = 0;
9194  
9195  #ifdef DEBUG
9196 -       bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9197 -                                             CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9198         print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
9199                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9200                        ivsize, 1);
9201 -       printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
9202 -       dbg_dump_sg(KERN_ERR, "src    @"__stringify(__LINE__)": ",
9203 -                   DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9204 -                   edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
9205 +       pr_err("asked=%d, nbytes%d\n",
9206 +              (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
9207  #endif
9208 +       caam_dump_sg(KERN_ERR, "src    @" __stringify(__LINE__)": ",
9209 +                    DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9210 +                    edesc->src_nents > 1 ? 100 : req->nbytes, 1);
9211  
9212         len = desc_len(sh_desc);
9213         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9214 @@ -2261,7 +1099,7 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
9215         append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
9216  
9217         if (likely(req->src == req->dst)) {
9218 -               if (!edesc->src_nents && iv_contig) {
9219 +               if (edesc->src_nents == 1 && iv_contig) {
9220                         dst_dma = sg_dma_address(req->src);
9221                 } else {
9222                         dst_dma = edesc->sec4_sg_dma +
9223 @@ -2269,7 +1107,7 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
9224                         out_options = LDST_SGF;
9225                 }
9226         } else {
9227 -               if (!edesc->dst_nents) {
9228 +               if (edesc->dst_nents == 1) {
9229                         dst_dma = sg_dma_address(req->dst);
9230                 } else {
9231                         dst_dma = edesc->sec4_sg_dma +
9232 @@ -2296,20 +1134,18 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
9233         int len, sec4_sg_index = 0;
9234  
9235  #ifdef DEBUG
9236 -       bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9237 -                                             CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9238         print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
9239                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9240                        ivsize, 1);
9241 -       dbg_dump_sg(KERN_ERR, "src    @" __stringify(__LINE__) ": ",
9242 -                   DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9243 -                   edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
9244  #endif
9245 +       caam_dump_sg(KERN_ERR, "src    @" __stringify(__LINE__) ": ",
9246 +                    DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9247 +                    edesc->src_nents > 1 ? 100 : req->nbytes, 1);
9248  
9249         len = desc_len(sh_desc);
9250         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9251  
9252 -       if (!edesc->src_nents) {
9253 +       if (edesc->src_nents == 1) {
9254                 src_dma = sg_dma_address(req->src);
9255                 in_options = 0;
9256         } else {
9257 @@ -2340,87 +1176,100 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
9258         struct crypto_aead *aead = crypto_aead_reqtfm(req);
9259         struct caam_ctx *ctx = crypto_aead_ctx(aead);
9260         struct device *jrdev = ctx->jrdev;
9261 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9262 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
9263 -       int src_nents, dst_nents = 0;
9264 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9265 +                      GFP_KERNEL : GFP_ATOMIC;
9266 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
9267         struct aead_edesc *edesc;
9268 -       int sgc;
9269 -       bool all_contig = true;
9270 -       int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
9271 +       int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
9272         unsigned int authsize = ctx->authsize;
9273  
9274         if (unlikely(req->dst != req->src)) {
9275 -               src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
9276 -               dst_nents = sg_count(req->dst,
9277 -                                    req->assoclen + req->cryptlen +
9278 -                                       (encrypt ? authsize : (-authsize)));
9279 -       } else {
9280 -               src_nents = sg_count(req->src,
9281 -                                    req->assoclen + req->cryptlen +
9282 -                                       (encrypt ? authsize : 0));
9283 -       }
9284 -
9285 -       /* Check if data are contiguous. */
9286 -       all_contig = !src_nents;
9287 -       if (!all_contig) {
9288 -               src_nents = src_nents ? : 1;
9289 -               sec4_sg_len = src_nents;
9290 -       }
9291 -
9292 -       sec4_sg_len += dst_nents;
9293 -
9294 -       sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
9295 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
9296 +                                            req->cryptlen);
9297 +               if (unlikely(src_nents < 0)) {
9298 +                       dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9299 +                               req->assoclen + req->cryptlen);
9300 +                       return ERR_PTR(src_nents);
9301 +               }
9302  
9303 -       /* allocate space for base edesc and hw desc commands, link tables */
9304 -       edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9305 -                       GFP_DMA | flags);
9306 -       if (!edesc) {
9307 -               dev_err(jrdev, "could not allocate extended descriptor\n");
9308 -               return ERR_PTR(-ENOMEM);
9309 +               dst_nents = sg_nents_for_len(req->dst, req->assoclen +
9310 +                                            req->cryptlen +
9311 +                                               (encrypt ? authsize :
9312 +                                                          (-authsize)));
9313 +               if (unlikely(dst_nents < 0)) {
9314 +                       dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9315 +                               req->assoclen + req->cryptlen +
9316 +                               (encrypt ? authsize : (-authsize)));
9317 +                       return ERR_PTR(dst_nents);
9318 +               }
9319 +       } else {
9320 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
9321 +                                            req->cryptlen +
9322 +                                            (encrypt ? authsize : 0));
9323 +               if (unlikely(src_nents < 0)) {
9324 +                       dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9325 +                               req->assoclen + req->cryptlen +
9326 +                               (encrypt ? authsize : 0));
9327 +                       return ERR_PTR(src_nents);
9328 +               }
9329         }
9330  
9331         if (likely(req->src == req->dst)) {
9332 -               sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9333 -                                DMA_BIDIRECTIONAL);
9334 -               if (unlikely(!sgc)) {
9335 +               mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9336 +                                             DMA_BIDIRECTIONAL);
9337 +               if (unlikely(!mapped_src_nents)) {
9338                         dev_err(jrdev, "unable to map source\n");
9339 -                       kfree(edesc);
9340                         return ERR_PTR(-ENOMEM);
9341                 }
9342         } else {
9343 -               sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9344 -                                DMA_TO_DEVICE);
9345 -               if (unlikely(!sgc)) {
9346 -                       dev_err(jrdev, "unable to map source\n");
9347 -                       kfree(edesc);
9348 -                       return ERR_PTR(-ENOMEM);
9349 +               /* Cover also the case of null (zero length) input data */
9350 +               if (src_nents) {
9351 +                       mapped_src_nents = dma_map_sg(jrdev, req->src,
9352 +                                                     src_nents, DMA_TO_DEVICE);
9353 +                       if (unlikely(!mapped_src_nents)) {
9354 +                               dev_err(jrdev, "unable to map source\n");
9355 +                               return ERR_PTR(-ENOMEM);
9356 +                       }
9357 +               } else {
9358 +                       mapped_src_nents = 0;
9359                 }
9360  
9361 -               sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9362 -                                DMA_FROM_DEVICE);
9363 -               if (unlikely(!sgc)) {
9364 +               mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9365 +                                             DMA_FROM_DEVICE);
9366 +               if (unlikely(!mapped_dst_nents)) {
9367                         dev_err(jrdev, "unable to map destination\n");
9368 -                       dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
9369 -                                    DMA_TO_DEVICE);
9370 -                       kfree(edesc);
9371 +                       dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9372                         return ERR_PTR(-ENOMEM);
9373                 }
9374         }
9375  
9376 +       sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
9377 +       sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
9378 +       sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
9379 +
9380 +       /* allocate space for base edesc and hw desc commands, link tables */
9381 +       edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9382 +                       GFP_DMA | flags);
9383 +       if (!edesc) {
9384 +               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9385 +                          0, 0, 0);
9386 +               return ERR_PTR(-ENOMEM);
9387 +       }
9388 +
9389         edesc->src_nents = src_nents;
9390         edesc->dst_nents = dst_nents;
9391         edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
9392                          desc_bytes;
9393 -       *all_contig_ptr = all_contig;
9394 +       *all_contig_ptr = !(mapped_src_nents > 1);
9395  
9396         sec4_sg_index = 0;
9397 -       if (!all_contig) {
9398 -               sg_to_sec4_sg_last(req->src, src_nents,
9399 -                             edesc->sec4_sg + sec4_sg_index, 0);
9400 -               sec4_sg_index += src_nents;
9401 +       if (mapped_src_nents > 1) {
9402 +               sg_to_sec4_sg_last(req->src, mapped_src_nents,
9403 +                                  edesc->sec4_sg + sec4_sg_index, 0);
9404 +               sec4_sg_index += mapped_src_nents;
9405         }
9406 -       if (dst_nents) {
9407 -               sg_to_sec4_sg_last(req->dst, dst_nents,
9408 +       if (mapped_dst_nents > 1) {
9409 +               sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9410                                    edesc->sec4_sg + sec4_sg_index, 0);
9411         }
9412  
9413 @@ -2573,13 +1422,9 @@ static int aead_decrypt(struct aead_request *req)
9414         u32 *desc;
9415         int ret = 0;
9416  
9417 -#ifdef DEBUG
9418 -       bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9419 -                                             CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9420 -       dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
9421 -                   DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9422 -                   req->assoclen + req->cryptlen, 1, may_sleep);
9423 -#endif
9424 +       caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
9425 +                    DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9426 +                    req->assoclen + req->cryptlen, 1);
9427  
9428         /* allocate extended descriptor */
9429         edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
9430 @@ -2619,51 +1464,80 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
9431         struct device *jrdev = ctx->jrdev;
9432         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9433                        GFP_KERNEL : GFP_ATOMIC;
9434 -       int src_nents, dst_nents = 0, sec4_sg_bytes;
9435 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
9436         struct ablkcipher_edesc *edesc;
9437         dma_addr_t iv_dma = 0;
9438 -       bool iv_contig = false;
9439 -       int sgc;
9440 +       bool in_contig;
9441         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
9442 -       int sec4_sg_index;
9443 +       int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
9444  
9445 -       src_nents = sg_count(req->src, req->nbytes);
9446 +       src_nents = sg_nents_for_len(req->src, req->nbytes);
9447 +       if (unlikely(src_nents < 0)) {
9448 +               dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9449 +                       req->nbytes);
9450 +               return ERR_PTR(src_nents);
9451 +       }
9452  
9453 -       if (req->dst != req->src)
9454 -               dst_nents = sg_count(req->dst, req->nbytes);
9455 +       if (req->dst != req->src) {
9456 +               dst_nents = sg_nents_for_len(req->dst, req->nbytes);
9457 +               if (unlikely(dst_nents < 0)) {
9458 +                       dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9459 +                               req->nbytes);
9460 +                       return ERR_PTR(dst_nents);
9461 +               }
9462 +       }
9463  
9464         if (likely(req->src == req->dst)) {
9465 -               sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9466 -                                DMA_BIDIRECTIONAL);
9467 +               mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9468 +                                             DMA_BIDIRECTIONAL);
9469 +               if (unlikely(!mapped_src_nents)) {
9470 +                       dev_err(jrdev, "unable to map source\n");
9471 +                       return ERR_PTR(-ENOMEM);
9472 +               }
9473         } else {
9474 -               sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9475 -                                DMA_TO_DEVICE);
9476 -               sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9477 -                                DMA_FROM_DEVICE);
9478 +               mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9479 +                                             DMA_TO_DEVICE);
9480 +               if (unlikely(!mapped_src_nents)) {
9481 +                       dev_err(jrdev, "unable to map source\n");
9482 +                       return ERR_PTR(-ENOMEM);
9483 +               }
9484 +
9485 +               mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9486 +                                             DMA_FROM_DEVICE);
9487 +               if (unlikely(!mapped_dst_nents)) {
9488 +                       dev_err(jrdev, "unable to map destination\n");
9489 +                       dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9490 +                       return ERR_PTR(-ENOMEM);
9491 +               }
9492         }
9493  
9494         iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
9495         if (dma_mapping_error(jrdev, iv_dma)) {
9496                 dev_err(jrdev, "unable to map IV\n");
9497 +               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9498 +                          0, 0, 0);
9499                 return ERR_PTR(-ENOMEM);
9500         }
9501  
9502 -       /*
9503 -        * Check if iv can be contiguous with source and destination.
9504 -        * If so, include it. If not, create scatterlist.
9505 -        */
9506 -       if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
9507 -               iv_contig = true;
9508 -       else
9509 -               src_nents = src_nents ? : 1;
9510 -       sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
9511 -                       sizeof(struct sec4_sg_entry);
9512 +       if (mapped_src_nents == 1 &&
9513 +           iv_dma + ivsize == sg_dma_address(req->src)) {
9514 +               in_contig = true;
9515 +               sec4_sg_ents = 0;
9516 +       } else {
9517 +               in_contig = false;
9518 +               sec4_sg_ents = 1 + mapped_src_nents;
9519 +       }
9520 +       dst_sg_idx = sec4_sg_ents;
9521 +       sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
9522 +       sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
9523  
9524         /* allocate space for base edesc and hw desc commands, link tables */
9525         edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9526                         GFP_DMA | flags);
9527         if (!edesc) {
9528                 dev_err(jrdev, "could not allocate extended descriptor\n");
9529 +               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9530 +                          iv_dma, ivsize, 0, 0);
9531                 return ERR_PTR(-ENOMEM);
9532         }
9533  
9534 @@ -2673,23 +1547,24 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
9535         edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
9536                          desc_bytes;
9537  
9538 -       sec4_sg_index = 0;
9539 -       if (!iv_contig) {
9540 +       if (!in_contig) {
9541                 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
9542 -               sg_to_sec4_sg_last(req->src, src_nents,
9543 +               sg_to_sec4_sg_last(req->src, mapped_src_nents,
9544                                    edesc->sec4_sg + 1, 0);
9545 -               sec4_sg_index += 1 + src_nents;
9546         }
9547  
9548 -       if (dst_nents) {
9549 -               sg_to_sec4_sg_last(req->dst, dst_nents,
9550 -                       edesc->sec4_sg + sec4_sg_index, 0);
9551 +       if (mapped_dst_nents > 1) {
9552 +               sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9553 +                                  edesc->sec4_sg + dst_sg_idx, 0);
9554         }
9555  
9556         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
9557                                             sec4_sg_bytes, DMA_TO_DEVICE);
9558         if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
9559                 dev_err(jrdev, "unable to map S/G table\n");
9560 +               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9561 +                          iv_dma, ivsize, 0, 0);
9562 +               kfree(edesc);
9563                 return ERR_PTR(-ENOMEM);
9564         }
9565  
9566 @@ -2701,7 +1576,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
9567                        sec4_sg_bytes, 1);
9568  #endif
9569  
9570 -       *iv_contig_out = iv_contig;
9571 +       *iv_contig_out = in_contig;
9572         return edesc;
9573  }
9574  
9575 @@ -2792,30 +1667,54 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
9576         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
9577         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
9578         struct device *jrdev = ctx->jrdev;
9579 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9580 -                                         CRYPTO_TFM_REQ_MAY_SLEEP)) ?
9581 +       gfp_t flags = (req->base.flags &  CRYPTO_TFM_REQ_MAY_SLEEP) ?
9582                        GFP_KERNEL : GFP_ATOMIC;
9583 -       int src_nents, dst_nents = 0, sec4_sg_bytes;
9584 +       int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
9585         struct ablkcipher_edesc *edesc;
9586         dma_addr_t iv_dma = 0;
9587 -       bool iv_contig = false;
9588 -       int sgc;
9589 +       bool out_contig;
9590         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
9591 -       int sec4_sg_index;
9592 +       int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
9593  
9594 -       src_nents = sg_count(req->src, req->nbytes);
9595 -
9596 -       if (unlikely(req->dst != req->src))
9597 -               dst_nents = sg_count(req->dst, req->nbytes);
9598 +       src_nents = sg_nents_for_len(req->src, req->nbytes);
9599 +       if (unlikely(src_nents < 0)) {
9600 +               dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9601 +                       req->nbytes);
9602 +               return ERR_PTR(src_nents);
9603 +       }
9604  
9605         if (likely(req->src == req->dst)) {
9606 -               sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9607 -                                DMA_BIDIRECTIONAL);
9608 +               mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9609 +                                             DMA_BIDIRECTIONAL);
9610 +               if (unlikely(!mapped_src_nents)) {
9611 +                       dev_err(jrdev, "unable to map source\n");
9612 +                       return ERR_PTR(-ENOMEM);
9613 +               }
9614 +
9615 +               dst_nents = src_nents;
9616 +               mapped_dst_nents = src_nents;
9617         } else {
9618 -               sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9619 -                                DMA_TO_DEVICE);
9620 -               sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9621 -                                DMA_FROM_DEVICE);
9622 +               mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9623 +                                             DMA_TO_DEVICE);
9624 +               if (unlikely(!mapped_src_nents)) {
9625 +                       dev_err(jrdev, "unable to map source\n");
9626 +                       return ERR_PTR(-ENOMEM);
9627 +               }
9628 +
9629 +               dst_nents = sg_nents_for_len(req->dst, req->nbytes);
9630 +               if (unlikely(dst_nents < 0)) {
9631 +                       dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9632 +                               req->nbytes);
9633 +                       return ERR_PTR(dst_nents);
9634 +               }
9635 +
9636 +               mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9637 +                                             DMA_FROM_DEVICE);
9638 +               if (unlikely(!mapped_dst_nents)) {
9639 +                       dev_err(jrdev, "unable to map destination\n");
9640 +                       dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9641 +                       return ERR_PTR(-ENOMEM);
9642 +               }
9643         }
9644  
9645         /*
9646 @@ -2825,21 +1724,29 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
9647         iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
9648         if (dma_mapping_error(jrdev, iv_dma)) {
9649                 dev_err(jrdev, "unable to map IV\n");
9650 +               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9651 +                          0, 0, 0);
9652                 return ERR_PTR(-ENOMEM);
9653         }
9654  
9655 -       if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
9656 -               iv_contig = true;
9657 -       else
9658 -               dst_nents = dst_nents ? : 1;
9659 -       sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
9660 -                       sizeof(struct sec4_sg_entry);
9661 +       sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
9662 +       dst_sg_idx = sec4_sg_ents;
9663 +       if (mapped_dst_nents == 1 &&
9664 +           iv_dma + ivsize == sg_dma_address(req->dst)) {
9665 +               out_contig = true;
9666 +       } else {
9667 +               out_contig = false;
9668 +               sec4_sg_ents += 1 + mapped_dst_nents;
9669 +       }
9670  
9671         /* allocate space for base edesc and hw desc commands, link tables */
9672 +       sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
9673         edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9674                         GFP_DMA | flags);
9675         if (!edesc) {
9676                 dev_err(jrdev, "could not allocate extended descriptor\n");
9677 +               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9678 +                          iv_dma, ivsize, 0, 0);
9679                 return ERR_PTR(-ENOMEM);
9680         }
9681  
9682 @@ -2849,24 +1756,24 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
9683         edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
9684                          desc_bytes;
9685  
9686 -       sec4_sg_index = 0;
9687 -       if (src_nents) {
9688 -               sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
9689 -               sec4_sg_index += src_nents;
9690 -       }
9691 +       if (mapped_src_nents > 1)
9692 +               sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
9693 +                                  0);
9694  
9695 -       if (!iv_contig) {
9696 -               dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
9697 +       if (!out_contig) {
9698 +               dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx,
9699                                    iv_dma, ivsize, 0);
9700 -               sec4_sg_index += 1;
9701 -               sg_to_sec4_sg_last(req->dst, dst_nents,
9702 -                                  edesc->sec4_sg + sec4_sg_index, 0);
9703 +               sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9704 +                                  edesc->sec4_sg + dst_sg_idx + 1, 0);
9705         }
9706  
9707         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
9708                                             sec4_sg_bytes, DMA_TO_DEVICE);
9709         if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
9710                 dev_err(jrdev, "unable to map S/G table\n");
9711 +               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9712 +                          iv_dma, ivsize, 0, 0);
9713 +               kfree(edesc);
9714                 return ERR_PTR(-ENOMEM);
9715         }
9716         edesc->iv_dma = iv_dma;
9717 @@ -2878,7 +1785,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
9718                        sec4_sg_bytes, 1);
9719  #endif
9720  
9721 -       *iv_contig_out = iv_contig;
9722 +       *iv_contig_out = out_contig;
9723         return edesc;
9724  }
9725  
9726 @@ -2889,7 +1796,7 @@ static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
9727         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
9728         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
9729         struct device *jrdev = ctx->jrdev;
9730 -       bool iv_contig;
9731 +       bool iv_contig = false;
9732         u32 *desc;
9733         int ret = 0;
9734  
9735 @@ -2933,7 +1840,6 @@ struct caam_alg_template {
9736         } template_u;
9737         u32 class1_alg_type;
9738         u32 class2_alg_type;
9739 -       u32 alg_op;
9740  };
9741  
9742  static struct caam_alg_template driver_algs[] = {
9743 @@ -3118,7 +2024,6 @@ static struct caam_aead_alg driver_aeads[] = {
9744                 .caam = {
9745                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9746                                            OP_ALG_AAI_HMAC_PRECOMP,
9747 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9748                 },
9749         },
9750         {
9751 @@ -3140,7 +2045,6 @@ static struct caam_aead_alg driver_aeads[] = {
9752                 .caam = {
9753                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9754                                            OP_ALG_AAI_HMAC_PRECOMP,
9755 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9756                 },
9757         },
9758         {
9759 @@ -3162,7 +2066,6 @@ static struct caam_aead_alg driver_aeads[] = {
9760                 .caam = {
9761                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9762                                            OP_ALG_AAI_HMAC_PRECOMP,
9763 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9764                 },
9765         },
9766         {
9767 @@ -3184,7 +2087,6 @@ static struct caam_aead_alg driver_aeads[] = {
9768                 .caam = {
9769                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9770                                            OP_ALG_AAI_HMAC_PRECOMP,
9771 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9772                 },
9773         },
9774         {
9775 @@ -3206,7 +2108,6 @@ static struct caam_aead_alg driver_aeads[] = {
9776                 .caam = {
9777                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9778                                            OP_ALG_AAI_HMAC_PRECOMP,
9779 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9780                 },
9781         },
9782         {
9783 @@ -3228,7 +2129,6 @@ static struct caam_aead_alg driver_aeads[] = {
9784                 .caam = {
9785                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9786                                            OP_ALG_AAI_HMAC_PRECOMP,
9787 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9788                 },
9789         },
9790         {
9791 @@ -3250,7 +2150,6 @@ static struct caam_aead_alg driver_aeads[] = {
9792                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9793                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9794                                            OP_ALG_AAI_HMAC_PRECOMP,
9795 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9796                 },
9797         },
9798         {
9799 @@ -3273,7 +2172,6 @@ static struct caam_aead_alg driver_aeads[] = {
9800                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9801                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9802                                            OP_ALG_AAI_HMAC_PRECOMP,
9803 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9804                         .geniv = true,
9805                 },
9806         },
9807 @@ -3296,7 +2194,6 @@ static struct caam_aead_alg driver_aeads[] = {
9808                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9809                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9810                                            OP_ALG_AAI_HMAC_PRECOMP,
9811 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9812                 },
9813         },
9814         {
9815 @@ -3319,7 +2216,6 @@ static struct caam_aead_alg driver_aeads[] = {
9816                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9817                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9818                                            OP_ALG_AAI_HMAC_PRECOMP,
9819 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9820                         .geniv = true,
9821                 },
9822         },
9823 @@ -3342,7 +2238,6 @@ static struct caam_aead_alg driver_aeads[] = {
9824                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9825                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9826                                            OP_ALG_AAI_HMAC_PRECOMP,
9827 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9828                 },
9829         },
9830         {
9831 @@ -3365,7 +2260,6 @@ static struct caam_aead_alg driver_aeads[] = {
9832                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9833                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9834                                            OP_ALG_AAI_HMAC_PRECOMP,
9835 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9836                         .geniv = true,
9837                 },
9838         },
9839 @@ -3388,7 +2282,6 @@ static struct caam_aead_alg driver_aeads[] = {
9840                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9841                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9842                                            OP_ALG_AAI_HMAC_PRECOMP,
9843 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9844                 },
9845         },
9846         {
9847 @@ -3411,7 +2304,6 @@ static struct caam_aead_alg driver_aeads[] = {
9848                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9849                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9850                                            OP_ALG_AAI_HMAC_PRECOMP,
9851 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9852                         .geniv = true,
9853                 },
9854         },
9855 @@ -3434,7 +2326,6 @@ static struct caam_aead_alg driver_aeads[] = {
9856                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9857                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9858                                            OP_ALG_AAI_HMAC_PRECOMP,
9859 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9860                 },
9861         },
9862         {
9863 @@ -3457,7 +2348,6 @@ static struct caam_aead_alg driver_aeads[] = {
9864                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9865                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9866                                            OP_ALG_AAI_HMAC_PRECOMP,
9867 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9868                         .geniv = true,
9869                 },
9870         },
9871 @@ -3480,7 +2370,6 @@ static struct caam_aead_alg driver_aeads[] = {
9872                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9873                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9874                                            OP_ALG_AAI_HMAC_PRECOMP,
9875 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9876                 },
9877         },
9878         {
9879 @@ -3503,7 +2392,6 @@ static struct caam_aead_alg driver_aeads[] = {
9880                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9881                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9882                                            OP_ALG_AAI_HMAC_PRECOMP,
9883 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9884                         .geniv = true,
9885                 },
9886         },
9887 @@ -3526,7 +2414,6 @@ static struct caam_aead_alg driver_aeads[] = {
9888                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9889                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9890                                            OP_ALG_AAI_HMAC_PRECOMP,
9891 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9892                 }
9893         },
9894         {
9895 @@ -3549,7 +2436,6 @@ static struct caam_aead_alg driver_aeads[] = {
9896                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9897                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9898                                            OP_ALG_AAI_HMAC_PRECOMP,
9899 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9900                         .geniv = true,
9901                 }
9902         },
9903 @@ -3573,7 +2459,6 @@ static struct caam_aead_alg driver_aeads[] = {
9904                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9905                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9906                                            OP_ALG_AAI_HMAC_PRECOMP,
9907 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9908                 },
9909         },
9910         {
9911 @@ -3597,7 +2482,6 @@ static struct caam_aead_alg driver_aeads[] = {
9912                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9913                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9914                                            OP_ALG_AAI_HMAC_PRECOMP,
9915 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9916                         .geniv = true,
9917                 },
9918         },
9919 @@ -3621,7 +2505,6 @@ static struct caam_aead_alg driver_aeads[] = {
9920                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9921                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9922                                            OP_ALG_AAI_HMAC_PRECOMP,
9923 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9924                 },
9925         },
9926         {
9927 @@ -3645,7 +2528,6 @@ static struct caam_aead_alg driver_aeads[] = {
9928                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9929                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9930                                            OP_ALG_AAI_HMAC_PRECOMP,
9931 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9932                         .geniv = true,
9933                 },
9934         },
9935 @@ -3669,7 +2551,6 @@ static struct caam_aead_alg driver_aeads[] = {
9936                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9937                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9938                                            OP_ALG_AAI_HMAC_PRECOMP,
9939 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9940                 },
9941         },
9942         {
9943 @@ -3693,7 +2574,6 @@ static struct caam_aead_alg driver_aeads[] = {
9944                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9945                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9946                                            OP_ALG_AAI_HMAC_PRECOMP,
9947 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9948                         .geniv = true,
9949                 },
9950         },
9951 @@ -3717,7 +2597,6 @@ static struct caam_aead_alg driver_aeads[] = {
9952                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9953                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9954                                            OP_ALG_AAI_HMAC_PRECOMP,
9955 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9956                 },
9957         },
9958         {
9959 @@ -3741,7 +2620,6 @@ static struct caam_aead_alg driver_aeads[] = {
9960                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9961                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9962                                            OP_ALG_AAI_HMAC_PRECOMP,
9963 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9964                         .geniv = true,
9965                 },
9966         },
9967 @@ -3765,7 +2643,6 @@ static struct caam_aead_alg driver_aeads[] = {
9968                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9969                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9970                                            OP_ALG_AAI_HMAC_PRECOMP,
9971 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9972                 },
9973         },
9974         {
9975 @@ -3789,7 +2666,6 @@ static struct caam_aead_alg driver_aeads[] = {
9976                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9977                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9978                                            OP_ALG_AAI_HMAC_PRECOMP,
9979 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9980                         .geniv = true,
9981                 },
9982         },
9983 @@ -3812,7 +2688,6 @@ static struct caam_aead_alg driver_aeads[] = {
9984                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
9985                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9986                                            OP_ALG_AAI_HMAC_PRECOMP,
9987 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9988                 },
9989         },
9990         {
9991 @@ -3835,7 +2710,6 @@ static struct caam_aead_alg driver_aeads[] = {
9992                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
9993                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9994                                            OP_ALG_AAI_HMAC_PRECOMP,
9995 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9996                         .geniv = true,
9997                 },
9998         },
9999 @@ -3858,7 +2732,6 @@ static struct caam_aead_alg driver_aeads[] = {
10000                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10001                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10002                                            OP_ALG_AAI_HMAC_PRECOMP,
10003 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10004                 },
10005         },
10006         {
10007 @@ -3881,7 +2754,6 @@ static struct caam_aead_alg driver_aeads[] = {
10008                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10009                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10010                                            OP_ALG_AAI_HMAC_PRECOMP,
10011 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10012                         .geniv = true,
10013                 },
10014         },
10015 @@ -3904,7 +2776,6 @@ static struct caam_aead_alg driver_aeads[] = {
10016                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10017                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10018                                            OP_ALG_AAI_HMAC_PRECOMP,
10019 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10020                 },
10021         },
10022         {
10023 @@ -3927,7 +2798,6 @@ static struct caam_aead_alg driver_aeads[] = {
10024                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10025                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10026                                            OP_ALG_AAI_HMAC_PRECOMP,
10027 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10028                         .geniv = true,
10029                 },
10030         },
10031 @@ -3950,7 +2820,6 @@ static struct caam_aead_alg driver_aeads[] = {
10032                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10033                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10034                                            OP_ALG_AAI_HMAC_PRECOMP,
10035 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10036                 },
10037         },
10038         {
10039 @@ -3973,7 +2842,6 @@ static struct caam_aead_alg driver_aeads[] = {
10040                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10041                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10042                                            OP_ALG_AAI_HMAC_PRECOMP,
10043 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10044                         .geniv = true,
10045                 },
10046         },
10047 @@ -3996,7 +2864,6 @@ static struct caam_aead_alg driver_aeads[] = {
10048                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10049                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10050                                            OP_ALG_AAI_HMAC_PRECOMP,
10051 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10052                 },
10053         },
10054         {
10055 @@ -4019,7 +2886,6 @@ static struct caam_aead_alg driver_aeads[] = {
10056                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10057                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10058                                            OP_ALG_AAI_HMAC_PRECOMP,
10059 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10060                         .geniv = true,
10061                 },
10062         },
10063 @@ -4042,7 +2908,6 @@ static struct caam_aead_alg driver_aeads[] = {
10064                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10065                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10066                                            OP_ALG_AAI_HMAC_PRECOMP,
10067 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10068                 },
10069         },
10070         {
10071 @@ -4065,7 +2930,6 @@ static struct caam_aead_alg driver_aeads[] = {
10072                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10073                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10074                                            OP_ALG_AAI_HMAC_PRECOMP,
10075 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10076                         .geniv = true,
10077                 },
10078         },
10079 @@ -4090,7 +2954,6 @@ static struct caam_aead_alg driver_aeads[] = {
10080                                            OP_ALG_AAI_CTR_MOD128,
10081                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
10082                                            OP_ALG_AAI_HMAC_PRECOMP,
10083 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
10084                         .rfc3686 = true,
10085                 },
10086         },
10087 @@ -4115,7 +2978,6 @@ static struct caam_aead_alg driver_aeads[] = {
10088                                            OP_ALG_AAI_CTR_MOD128,
10089                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
10090                                            OP_ALG_AAI_HMAC_PRECOMP,
10091 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
10092                         .rfc3686 = true,
10093                         .geniv = true,
10094                 },
10095 @@ -4141,7 +3003,6 @@ static struct caam_aead_alg driver_aeads[] = {
10096                                            OP_ALG_AAI_CTR_MOD128,
10097                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10098                                            OP_ALG_AAI_HMAC_PRECOMP,
10099 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10100                         .rfc3686 = true,
10101                 },
10102         },
10103 @@ -4166,7 +3027,6 @@ static struct caam_aead_alg driver_aeads[] = {
10104                                            OP_ALG_AAI_CTR_MOD128,
10105                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10106                                            OP_ALG_AAI_HMAC_PRECOMP,
10107 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10108                         .rfc3686 = true,
10109                         .geniv = true,
10110                 },
10111 @@ -4192,7 +3052,6 @@ static struct caam_aead_alg driver_aeads[] = {
10112                                            OP_ALG_AAI_CTR_MOD128,
10113                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10114                                            OP_ALG_AAI_HMAC_PRECOMP,
10115 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10116                         .rfc3686 = true,
10117                 },
10118         },
10119 @@ -4217,7 +3076,6 @@ static struct caam_aead_alg driver_aeads[] = {
10120                                            OP_ALG_AAI_CTR_MOD128,
10121                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10122                                            OP_ALG_AAI_HMAC_PRECOMP,
10123 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10124                         .rfc3686 = true,
10125                         .geniv = true,
10126                 },
10127 @@ -4243,7 +3101,6 @@ static struct caam_aead_alg driver_aeads[] = {
10128                                            OP_ALG_AAI_CTR_MOD128,
10129                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10130                                            OP_ALG_AAI_HMAC_PRECOMP,
10131 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10132                         .rfc3686 = true,
10133                 },
10134         },
10135 @@ -4268,7 +3125,6 @@ static struct caam_aead_alg driver_aeads[] = {
10136                                            OP_ALG_AAI_CTR_MOD128,
10137                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10138                                            OP_ALG_AAI_HMAC_PRECOMP,
10139 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10140                         .rfc3686 = true,
10141                         .geniv = true,
10142                 },
10143 @@ -4294,7 +3150,6 @@ static struct caam_aead_alg driver_aeads[] = {
10144                                            OP_ALG_AAI_CTR_MOD128,
10145                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10146                                            OP_ALG_AAI_HMAC_PRECOMP,
10147 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10148                         .rfc3686 = true,
10149                 },
10150         },
10151 @@ -4319,7 +3174,6 @@ static struct caam_aead_alg driver_aeads[] = {
10152                                            OP_ALG_AAI_CTR_MOD128,
10153                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10154                                            OP_ALG_AAI_HMAC_PRECOMP,
10155 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10156                         .rfc3686 = true,
10157                         .geniv = true,
10158                 },
10159 @@ -4345,7 +3199,6 @@ static struct caam_aead_alg driver_aeads[] = {
10160                                            OP_ALG_AAI_CTR_MOD128,
10161                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10162                                            OP_ALG_AAI_HMAC_PRECOMP,
10163 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10164                         .rfc3686 = true,
10165                 },
10166         },
10167 @@ -4370,7 +3223,6 @@ static struct caam_aead_alg driver_aeads[] = {
10168                                            OP_ALG_AAI_CTR_MOD128,
10169                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10170                                            OP_ALG_AAI_HMAC_PRECOMP,
10171 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10172                         .rfc3686 = true,
10173                         .geniv = true,
10174                 },
10175 @@ -4385,16 +3237,34 @@ struct caam_crypto_alg {
10176  
10177  static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
10178  {
10179 +       dma_addr_t dma_addr;
10180 +
10181         ctx->jrdev = caam_jr_alloc();
10182         if (IS_ERR(ctx->jrdev)) {
10183                 pr_err("Job Ring Device allocation for transform failed\n");
10184                 return PTR_ERR(ctx->jrdev);
10185         }
10186  
10187 +       dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
10188 +                                       offsetof(struct caam_ctx,
10189 +                                                sh_desc_enc_dma),
10190 +                                       DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
10191 +       if (dma_mapping_error(ctx->jrdev, dma_addr)) {
10192 +               dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
10193 +               caam_jr_free(ctx->jrdev);
10194 +               return -ENOMEM;
10195 +       }
10196 +
10197 +       ctx->sh_desc_enc_dma = dma_addr;
10198 +       ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
10199 +                                                  sh_desc_dec);
10200 +       ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
10201 +                                                     sh_desc_givenc);
10202 +       ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
10203 +
10204         /* copy descriptor header template value */
10205 -       ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
10206 -       ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
10207 -       ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
10208 +       ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
10209 +       ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
10210  
10211         return 0;
10212  }
10213 @@ -4421,25 +3291,9 @@ static int caam_aead_init(struct crypto_aead *tfm)
10214  
10215  static void caam_exit_common(struct caam_ctx *ctx)
10216  {
10217 -       if (ctx->sh_desc_enc_dma &&
10218 -           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
10219 -               dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
10220 -                                desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
10221 -       if (ctx->sh_desc_dec_dma &&
10222 -           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
10223 -               dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
10224 -                                desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
10225 -       if (ctx->sh_desc_givenc_dma &&
10226 -           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
10227 -               dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
10228 -                                desc_bytes(ctx->sh_desc_givenc),
10229 -                                DMA_TO_DEVICE);
10230 -       if (ctx->key_dma &&
10231 -           !dma_mapping_error(ctx->jrdev, ctx->key_dma))
10232 -               dma_unmap_single(ctx->jrdev, ctx->key_dma,
10233 -                                ctx->enckeylen + ctx->split_key_pad_len,
10234 -                                DMA_TO_DEVICE);
10235 -
10236 +       dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
10237 +                              offsetof(struct caam_ctx, sh_desc_enc_dma),
10238 +                              DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
10239         caam_jr_free(ctx->jrdev);
10240  }
10241  
10242 @@ -4515,7 +3369,6 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
10243  
10244         t_alg->caam.class1_alg_type = template->class1_alg_type;
10245         t_alg->caam.class2_alg_type = template->class2_alg_type;
10246 -       t_alg->caam.alg_op = template->alg_op;
10247  
10248         return t_alg;
10249  }
10250 diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
10251 new file mode 100644
10252 index 00000000..d162120a
10253 --- /dev/null
10254 +++ b/drivers/crypto/caam/caamalg_desc.c
10255 @@ -0,0 +1,1913 @@
10256 +/*
10257 + * Shared descriptors for aead, ablkcipher algorithms
10258 + *
10259 + * Copyright 2016 NXP
10260 + */
10261 +
10262 +#include "compat.h"
10263 +#include "desc_constr.h"
10264 +#include "caamalg_desc.h"
10265 +
10266 +/*
10267 + * For aead functions, read payload and write payload,
10268 + * both of which are specified in req->src and req->dst
10269 + */
10270 +static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
10271 +{
10272 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
10273 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
10274 +                            KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
10275 +}
10276 +
10277 +/* Set DK bit in class 1 operation if shared */
10278 +static inline void append_dec_op1(u32 *desc, u32 type)
10279 +{
10280 +       u32 *jump_cmd, *uncond_jump_cmd;
10281 +
10282 +       /* DK bit is valid only for AES */
10283 +       if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
10284 +               append_operation(desc, type | OP_ALG_AS_INITFINAL |
10285 +                                OP_ALG_DECRYPT);
10286 +               return;
10287 +       }
10288 +
10289 +       jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
10290 +       append_operation(desc, type | OP_ALG_AS_INITFINAL |
10291 +                        OP_ALG_DECRYPT);
10292 +       uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
10293 +       set_jump_tgt_here(desc, jump_cmd);
10294 +       append_operation(desc, type | OP_ALG_AS_INITFINAL |
10295 +                        OP_ALG_DECRYPT | OP_ALG_AAI_DK);
10296 +       set_jump_tgt_here(desc, uncond_jump_cmd);
10297 +}
10298 +
10299 +/**
10300 + * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
10301 + *                               (non-protocol) with no (null) encryption.
10302 + * @desc: pointer to buffer used for descriptor construction
10303 + * @adata: pointer to authentication transform definitions. Note that since a
10304 + *         split key is to be used, the size of the split key itself is
10305 + *         specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
10306 + *         SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10307 + * @icvsize: integrity check value (ICV) size (truncated or full)
10308 + *
10309 + * Note: Requires an MDHA split key.
10310 + */
10311 +void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
10312 +                                unsigned int icvsize)
10313 +{
10314 +       u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
10315 +
10316 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
10317 +
10318 +       /* Skip if already shared */
10319 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10320 +                                  JUMP_COND_SHRD);
10321 +       if (adata->key_inline)
10322 +               append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
10323 +                                 adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
10324 +                                 KEY_ENC);
10325 +       else
10326 +               append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
10327 +                          KEY_DEST_MDHA_SPLIT | KEY_ENC);
10328 +       set_jump_tgt_here(desc, key_jump_cmd);
10329 +
10330 +       /* assoclen + cryptlen = seqinlen */
10331 +       append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
10332 +
10333 +       /* Prepare to read and write cryptlen + assoclen bytes */
10334 +       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10335 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10336 +
10337 +       /*
10338 +        * MOVE_LEN opcode is not available in all SEC HW revisions,
10339 +        * thus need to do some magic, i.e. self-patch the descriptor
10340 +        * buffer.
10341 +        */
10342 +       read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
10343 +                                   MOVE_DEST_MATH3 |
10344 +                                   (0x6 << MOVE_LEN_SHIFT));
10345 +       write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
10346 +                                    MOVE_DEST_DESCBUF |
10347 +                                    MOVE_WAITCOMP |
10348 +                                    (0x8 << MOVE_LEN_SHIFT));
10349 +
10350 +       /* Class 2 operation */
10351 +       append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10352 +                        OP_ALG_ENCRYPT);
10353 +
10354 +       /* Read and write cryptlen bytes */
10355 +       aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
10356 +
10357 +       set_move_tgt_here(desc, read_move_cmd);
10358 +       set_move_tgt_here(desc, write_move_cmd);
10359 +       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10360 +       append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
10361 +                   MOVE_AUX_LS);
10362 +
10363 +       /* Write ICV */
10364 +       append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10365 +                        LDST_SRCDST_BYTE_CONTEXT);
10366 +
10367 +#ifdef DEBUG
10368 +       print_hex_dump(KERN_ERR,
10369 +                      "aead null enc shdesc@" __stringify(__LINE__)": ",
10370 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10371 +#endif
10372 +}
10373 +EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
10374 +
10375 +/**
10376 + * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
10377 + *                               (non-protocol) with no (null) decryption.
10378 + * @desc: pointer to buffer used for descriptor construction
10379 + * @adata: pointer to authentication transform definitions. Note that since a
10380 + *         split key is to be used, the size of the split key itself is
10381 + *         specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
10382 + *         SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10383 + * @icvsize: integrity check value (ICV) size (truncated or full)
10384 + *
10385 + * Note: Requires an MDHA split key.
10386 + */
10387 +void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
10388 +                                unsigned int icvsize)
10389 +{
10390 +       u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
10391 +
10392 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
10393 +
10394 +       /* Skip if already shared */
10395 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10396 +                                  JUMP_COND_SHRD);
10397 +       if (adata->key_inline)
10398 +               append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
10399 +                                 adata->keylen, CLASS_2 |
10400 +                                 KEY_DEST_MDHA_SPLIT | KEY_ENC);
10401 +       else
10402 +               append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
10403 +                          KEY_DEST_MDHA_SPLIT | KEY_ENC);
10404 +       set_jump_tgt_here(desc, key_jump_cmd);
10405 +
10406 +       /* Class 2 operation */
10407 +       append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10408 +                        OP_ALG_DECRYPT | OP_ALG_ICV_ON);
10409 +
10410 +       /* assoclen + cryptlen = seqoutlen */
10411 +       append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10412 +
10413 +       /* Prepare to read and write cryptlen + assoclen bytes */
10414 +       append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
10415 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
10416 +
10417 +       /*
10418 +        * MOVE_LEN opcode is not available in all SEC HW revisions,
10419 +        * thus need to do some magic, i.e. self-patch the descriptor
10420 +        * buffer.
10421 +        */
10422 +       read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
10423 +                                   MOVE_DEST_MATH2 |
10424 +                                   (0x6 << MOVE_LEN_SHIFT));
10425 +       write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
10426 +                                    MOVE_DEST_DESCBUF |
10427 +                                    MOVE_WAITCOMP |
10428 +                                    (0x8 << MOVE_LEN_SHIFT));
10429 +
10430 +       /* Read and write cryptlen bytes */
10431 +       aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
10432 +
10433 +       /*
10434 +        * Insert a NOP here, since we need at least 4 instructions between
10435 +        * code patching the descriptor buffer and the location being patched.
10436 +        */
10437 +       jump_cmd = append_jump(desc, JUMP_TEST_ALL);
10438 +       set_jump_tgt_here(desc, jump_cmd);
10439 +
10440 +       set_move_tgt_here(desc, read_move_cmd);
10441 +       set_move_tgt_here(desc, write_move_cmd);
10442 +       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10443 +       append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
10444 +                   MOVE_AUX_LS);
10445 +       append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
10446 +
10447 +       /* Load ICV */
10448 +       append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
10449 +                            FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
10450 +
10451 +#ifdef DEBUG
10452 +       print_hex_dump(KERN_ERR,
10453 +                      "aead null dec shdesc@" __stringify(__LINE__)": ",
10454 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10455 +#endif
10456 +}
10457 +EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
10458 +
10459 +static void init_sh_desc_key_aead(u32 * const desc,
10460 +                                 struct alginfo * const cdata,
10461 +                                 struct alginfo * const adata,
10462 +                                 const bool is_rfc3686, u32 *nonce)
10463 +{
10464 +       u32 *key_jump_cmd;
10465 +       unsigned int enckeylen = cdata->keylen;
10466 +
10467 +       /* Note: Context registers are saved. */
10468 +       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
10469 +
10470 +       /* Skip if already shared */
10471 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10472 +                                  JUMP_COND_SHRD);
10473 +
10474 +       /*
10475 +        * RFC3686 specific:
10476 +        *      | key = {AUTH_KEY, ENC_KEY, NONCE}
10477 +        *      | enckeylen = encryption key size + nonce size
10478 +        */
10479 +       if (is_rfc3686)
10480 +               enckeylen -= CTR_RFC3686_NONCE_SIZE;
10481 +
10482 +       if (adata->key_inline)
10483 +               append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
10484 +                                 adata->keylen, CLASS_2 |
10485 +                                 KEY_DEST_MDHA_SPLIT | KEY_ENC);
10486 +       else
10487 +               append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
10488 +                          KEY_DEST_MDHA_SPLIT | KEY_ENC);
10489 +
10490 +       if (cdata->key_inline)
10491 +               append_key_as_imm(desc, cdata->key_virt, enckeylen,
10492 +                                 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
10493 +       else
10494 +               append_key(desc, cdata->key_dma, enckeylen, CLASS_1 |
10495 +                          KEY_DEST_CLASS_REG);
10496 +
10497 +       /* Load Counter into CONTEXT1 reg */
10498 +       if (is_rfc3686) {
10499 +               append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
10500 +                                  LDST_CLASS_IND_CCB |
10501 +                                  LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
10502 +               append_move(desc,
10503 +                           MOVE_SRC_OUTFIFO |
10504 +                           MOVE_DEST_CLASS1CTX |
10505 +                           (16 << MOVE_OFFSET_SHIFT) |
10506 +                           (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
10507 +       }
10508 +
10509 +       set_jump_tgt_here(desc, key_jump_cmd);
10510 +}
10511 +
10512 +/**
10513 + * cnstr_shdsc_aead_encap - IPSec ESP encapsulation shared descriptor
10514 + *                          (non-protocol).
10515 + * @desc: pointer to buffer used for descriptor construction
10516 + * @cdata: pointer to block cipher transform definitions
10517 + *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10518 + *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10519 + * @adata: pointer to authentication transform definitions. Note that since a
10520 + *         split key is to be used, the size of the split key itself is
10521 + *         specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
10522 + *         SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10523 + * @ivsize: initialization vector size
10524 + * @icvsize: integrity check value (ICV) size (truncated or full)
10525 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10526 + * @nonce: pointer to rfc3686 nonce
10527 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10528 + * @is_qi: true when called from caam/qi
10529 + *
10530 + * Note: Requires an MDHA split key.
10531 + */
10532 +void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
10533 +                           struct alginfo *adata, unsigned int ivsize,
10534 +                           unsigned int icvsize, const bool is_rfc3686,
10535 +                           u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
10536 +{
10537 +       /* Note: Context registers are saved. */
10538 +       init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
10539 +
10540 +       /* Class 2 operation */
10541 +       append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10542 +                        OP_ALG_ENCRYPT);
10543 +
10544 +       if (is_qi) {
10545 +               u32 *wait_load_cmd;
10546 +
10547 +               /* REG3 = assoclen */
10548 +               append_seq_load(desc, 4, LDST_CLASS_DECO |
10549 +                               LDST_SRCDST_WORD_DECO_MATH3 |
10550 +                               (4 << LDST_OFFSET_SHIFT));
10551 +
10552 +               wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10553 +                                           JUMP_COND_CALM | JUMP_COND_NCP |
10554 +                                           JUMP_COND_NOP | JUMP_COND_NIP |
10555 +                                           JUMP_COND_NIFP);
10556 +               set_jump_tgt_here(desc, wait_load_cmd);
10557 +
10558 +               append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10559 +                               LDST_SRCDST_BYTE_CONTEXT |
10560 +                               (ctx1_iv_off << LDST_OFFSET_SHIFT));
10561 +       }
10562 +
10563 +       /* Read and write assoclen bytes */
10564 +       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10565 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10566 +
10567 +       /* Skip assoc data */
10568 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10569 +
10570 +       /* read assoc before reading payload */
10571 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10572 +                                     FIFOLDST_VLF);
10573 +
10574 +       /* Load Counter into CONTEXT1 reg */
10575 +       if (is_rfc3686)
10576 +               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10577 +                                    LDST_SRCDST_BYTE_CONTEXT |
10578 +                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10579 +                                     LDST_OFFSET_SHIFT));
10580 +
10581 +       /* Class 1 operation */
10582 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10583 +                        OP_ALG_ENCRYPT);
10584 +
10585 +       /* Read and write cryptlen bytes */
10586 +       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10587 +       append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10588 +       aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
10589 +
10590 +       /* Write ICV */
10591 +       append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10592 +                        LDST_SRCDST_BYTE_CONTEXT);
10593 +
10594 +#ifdef DEBUG
10595 +       print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ",
10596 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10597 +#endif
10598 +}
10599 +EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
10600 +
10601 +/**
10602 + * cnstr_shdsc_aead_decap - IPSec ESP decapsulation shared descriptor
10603 + *                          (non-protocol).
10604 + * @desc: pointer to buffer used for descriptor construction
10605 + * @cdata: pointer to block cipher transform definitions
10606 + *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10607 + *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10608 + * @adata: pointer to authentication transform definitions. Note that since a
10609 + *         split key is to be used, the size of the split key itself is
10610 + *         specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
10611 + *         SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10612 + * @ivsize: initialization vector size
10613 + * @icvsize: integrity check value (ICV) size (truncated or full)
10614 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10615 + * @nonce: pointer to rfc3686 nonce
10616 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10617 + * @is_qi: true when called from caam/qi
10618 + *
10619 + * Note: Requires an MDHA split key.
10620 + */
10621 +void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
10622 +                           struct alginfo *adata, unsigned int ivsize,
10623 +                           unsigned int icvsize, const bool geniv,
10624 +                           const bool is_rfc3686, u32 *nonce,
10625 +                           const u32 ctx1_iv_off, const bool is_qi)
10626 +{
10627 +       /* Note: Context registers are saved. */
10628 +       init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
10629 +
10630 +       /* Class 2 operation */
10631 +       append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10632 +                        OP_ALG_DECRYPT | OP_ALG_ICV_ON);
10633 +
10634 +       if (is_qi) {
10635 +               u32 *wait_load_cmd;
10636 +
10637 +               /* REG3 = assoclen */
10638 +               append_seq_load(desc, 4, LDST_CLASS_DECO |
10639 +                               LDST_SRCDST_WORD_DECO_MATH3 |
10640 +                               (4 << LDST_OFFSET_SHIFT));
10641 +
10642 +               wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10643 +                                           JUMP_COND_CALM | JUMP_COND_NCP |
10644 +                                           JUMP_COND_NOP | JUMP_COND_NIP |
10645 +                                           JUMP_COND_NIFP);
10646 +               set_jump_tgt_here(desc, wait_load_cmd);
10647 +
10648 +               if (!geniv)
10649 +                       append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10650 +                                       LDST_SRCDST_BYTE_CONTEXT |
10651 +                                       (ctx1_iv_off << LDST_OFFSET_SHIFT));
10652 +       }
10653 +
10654 +       /* Read and write assoclen bytes */
10655 +       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10656 +       if (geniv)
10657 +               append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
10658 +       else
10659 +               append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10660 +
10661 +       /* Skip assoc data */
10662 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10663 +
10664 +       /* read assoc before reading payload */
10665 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10666 +                            KEY_VLF);
10667 +
10668 +       if (geniv) {
10669 +               append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10670 +                               LDST_SRCDST_BYTE_CONTEXT |
10671 +                               (ctx1_iv_off << LDST_OFFSET_SHIFT));
10672 +               append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
10673 +                           (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
10674 +       }
10675 +
10676 +       /* Load Counter into CONTEXT1 reg */
10677 +       if (is_rfc3686)
10678 +               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10679 +                                    LDST_SRCDST_BYTE_CONTEXT |
10680 +                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10681 +                                     LDST_OFFSET_SHIFT));
10682 +
10683 +       /* Choose operation */
10684 +       if (ctx1_iv_off)
10685 +               append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10686 +                                OP_ALG_DECRYPT);
10687 +       else
10688 +               append_dec_op1(desc, cdata->algtype);
10689 +
10690 +       /* Read and write cryptlen bytes */
10691 +       append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10692 +       append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10693 +       aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
10694 +
10695 +       /* Load ICV */
10696 +       append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
10697 +                            FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
10698 +
10699 +#ifdef DEBUG
10700 +       print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ",
10701 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10702 +#endif
10703 +}
10704 +EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
10705 +
10706 +/**
10707 + * cnstr_shdsc_aead_givencap - IPSec ESP encapsulation shared descriptor
10708 + *                             (non-protocol) with HW-generated initialization
10709 + *                             vector.
10710 + * @desc: pointer to buffer used for descriptor construction
10711 + * @cdata: pointer to block cipher transform definitions
10712 + *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10713 + *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10714 + * @adata: pointer to authentication transform definitions. Note that since a
10715 + *         split key is to be used, the size of the split key itself is
10716 + *         specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
10717 + *         SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10718 + * @ivsize: initialization vector size
10719 + * @icvsize: integrity check value (ICV) size (truncated or full)
10720 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10721 + * @nonce: pointer to rfc3686 nonce
10722 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10723 + * @is_qi: true when called from caam/qi
10724 + *
10725 + * Note: Requires an MDHA split key.
10726 + */
10727 +void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
10728 +                              struct alginfo *adata, unsigned int ivsize,
10729 +                              unsigned int icvsize, const bool is_rfc3686,
10730 +                              u32 *nonce, const u32 ctx1_iv_off,
10731 +                              const bool is_qi)
10732 +{
10733 +       u32 geniv, moveiv;
10734 +
10735 +       /* Note: Context registers are saved. */
10736 +       init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
10737 +
10738 +       if (is_qi) {
10739 +               u32 *wait_load_cmd;
10740 +
10741 +               /* REG3 = assoclen */
10742 +               append_seq_load(desc, 4, LDST_CLASS_DECO |
10743 +                               LDST_SRCDST_WORD_DECO_MATH3 |
10744 +                               (4 << LDST_OFFSET_SHIFT));
10745 +
10746 +               wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10747 +                                           JUMP_COND_CALM | JUMP_COND_NCP |
10748 +                                           JUMP_COND_NOP | JUMP_COND_NIP |
10749 +                                           JUMP_COND_NIFP);
10750 +               set_jump_tgt_here(desc, wait_load_cmd);
10751 +       }
10752 +
10753 +       if (is_rfc3686) {
10754 +               if (is_qi)
10755 +                       append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10756 +                                       LDST_SRCDST_BYTE_CONTEXT |
10757 +                                       (ctx1_iv_off << LDST_OFFSET_SHIFT));
10758 +
10759 +               goto copy_iv;
10760 +       }
10761 +
10762 +       /* Generate IV */
10763 +       geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
10764 +               NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
10765 +               NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
10766 +       append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
10767 +                           LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
10768 +       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10769 +       append_move(desc, MOVE_WAITCOMP |
10770 +                   MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
10771 +                   (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
10772 +                   (ivsize << MOVE_LEN_SHIFT));
10773 +       append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
10774 +
10775 +copy_iv:
10776 +       /* Copy IV to class 1 context */
10777 +       append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
10778 +                   (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
10779 +                   (ivsize << MOVE_LEN_SHIFT));
10780 +
10781 +       /* Return to encryption */
10782 +       append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10783 +                        OP_ALG_ENCRYPT);
10784 +
10785 +       /* Read and write assoclen bytes */
10786 +       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10787 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10788 +
10789 +       /* Skip assoc data */
10790 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10791 +
10792 +       /* read assoc before reading payload */
10793 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10794 +                            KEY_VLF);
10795 +
10796 +       /* Copy iv from outfifo to class 2 fifo */
10797 +       moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
10798 +                NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
10799 +       append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
10800 +                           LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
10801 +       append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
10802 +                           LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
10803 +
10804 +       /* Load Counter into CONTEXT1 reg */
10805 +       if (is_rfc3686)
10806 +               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10807 +                                    LDST_SRCDST_BYTE_CONTEXT |
10808 +                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10809 +                                     LDST_OFFSET_SHIFT));
10810 +
10811 +       /* Class 1 operation */
10812 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10813 +                        OP_ALG_ENCRYPT);
10814 +
10815 +       /* Will write ivsize + cryptlen */
10816 +       append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10817 +
10818 +       /* Not need to reload iv */
10819 +       append_seq_fifo_load(desc, ivsize,
10820 +                            FIFOLD_CLASS_SKIP);
10821 +
10822 +       /* Will read cryptlen */
10823 +       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10824 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
10825 +                            FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
10826 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
10827 +
10828 +       /* Write ICV */
10829 +       append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10830 +                        LDST_SRCDST_BYTE_CONTEXT);
10831 +
10832 +#ifdef DEBUG
10833 +       print_hex_dump(KERN_ERR,
10834 +                      "aead givenc shdesc@" __stringify(__LINE__)": ",
10835 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10836 +#endif
10837 +}
10838 +EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
10839 +
10840 +/**
10841 + * cnstr_shdsc_tls_encap - tls encapsulation shared descriptor
10842 + * @desc: pointer to buffer used for descriptor construction
10843 + * @cdata: pointer to block cipher transform definitions
10844 + *         Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
10845 + *         with OP_ALG_AAI_CBC
10846 + * @adata: pointer to authentication transform definitions. Note that since a
10847 + *         split key is to be used, the size of the split key itself is
10848 + *         specified. Valid algorithm values OP_ALG_ALGSEL_SHA1 ANDed with
10849 + *         OP_ALG_AAI_HMAC_PRECOMP.
10850 + * @assoclen: associated data length
10851 + * @ivsize: initialization vector size
10852 + * @authsize: authentication data size
10853 + * @blocksize: block cipher size
10854 + */
10855 +void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
10856 +                          struct alginfo *adata, unsigned int assoclen,
10857 +                          unsigned int ivsize, unsigned int authsize,
10858 +                          unsigned int blocksize)
10859 +{
10860 +       u32 *key_jump_cmd, *zero_payload_jump_cmd;
10861 +       u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
10862 +
10863 +       /*
10864 +        * Compute the index (in bytes) for the LOAD with destination of
10865 +        * Class 1 Data Size Register and for the LOAD that generates padding
10866 +        */
10867 +       if (adata->key_inline) {
10868 +               idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad +
10869 +                               cdata->keylen - 4 * CAAM_CMD_SZ;
10870 +               idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad +
10871 +                            cdata->keylen - 2 * CAAM_CMD_SZ;
10872 +       } else {
10873 +               idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
10874 +                               4 * CAAM_CMD_SZ;
10875 +               idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
10876 +                            2 * CAAM_CMD_SZ;
10877 +       }
10878 +
10879 +       stidx = 1 << HDR_START_IDX_SHIFT;
10880 +       init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
10881 +
10882 +       /* skip key loading if they are loaded due to sharing */
10883 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10884 +                                  JUMP_COND_SHRD);
10885 +
10886 +       if (adata->key_inline)
10887 +               append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
10888 +                                 adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
10889 +                                 KEY_ENC);
10890 +       else
10891 +               append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
10892 +                          KEY_DEST_MDHA_SPLIT | KEY_ENC);
10893 +
10894 +       if (cdata->key_inline)
10895 +               append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
10896 +                                 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
10897 +       else
10898 +               append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
10899 +                          KEY_DEST_CLASS_REG);
10900 +
10901 +       set_jump_tgt_here(desc, key_jump_cmd);
10902 +
10903 +       /* class 2 operation */
10904 +       append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10905 +                        OP_ALG_ENCRYPT);
10906 +       /* class 1 operation */
10907 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10908 +                        OP_ALG_ENCRYPT);
10909 +
10910 +       /* payloadlen = input data length - (assoclen + ivlen) */
10911 +       append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize);
10912 +
10913 +       /* math1 = payloadlen + icvlen */
10914 +       append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize);
10915 +
10916 +       /* padlen = block_size - math1 % block_size */
10917 +       append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
10918 +       append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
10919 +
10920 +       /* cryptlen = payloadlen + icvlen + padlen */
10921 +       append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
10922 +
10923 +       /*
10924 +        * update immediate data with the padding length value
10925 +        * for the LOAD in the class 1 data size register.
10926 +        */
10927 +       append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
10928 +                       (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7);
10929 +       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
10930 +                       (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8);
10931 +
10932 +       /* overwrite PL field for the padding iNFO FIFO entry  */
10933 +       append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
10934 +                       (idx_ld_pad << MOVE_OFFSET_SHIFT) | 7);
10935 +       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
10936 +                       (idx_ld_pad << MOVE_OFFSET_SHIFT) | 8);
10937 +
10938 +       /* store encrypted payload, icv and padding */
10939 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
10940 +
10941 +       /* if payload length is zero, jump to zero-payload commands */
10942 +       append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4);
10943 +       zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
10944 +                                           JUMP_COND_MATH_Z);
10945 +
10946 +       /* load iv in context1 */
10947 +       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
10948 +                  LDST_CLASS_1_CCB | ivsize);
10949 +
10950 +       /* read assoc for authentication */
10951 +       append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
10952 +                            FIFOLD_TYPE_MSG);
10953 +       /* insnoop payload */
10954 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
10955 +                            FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
10956 +
10957 +       /* jump the zero-payload commands */
10958 +       append_jump(desc, JUMP_TEST_ALL | 3);
10959 +
10960 +       /* zero-payload commands */
10961 +       set_jump_tgt_here(desc, zero_payload_jump_cmd);
10962 +
10963 +       /* load iv in context1 */
10964 +       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
10965 +                  LDST_CLASS_1_CCB | ivsize);
10966 +
10967 +       /* assoc data is the only data for authentication */
10968 +       append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
10969 +                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
10970 +
10971 +       /* send icv to encryption */
10972 +       append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
10973 +                   authsize);
10974 +
10975 +       /* update class 1 data size register with padding length */
10976 +       append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB |
10977 +                           LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
10978 +
10979 +       /* generate padding and send it to encryption */
10980 +       genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
10981 +             NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
10982 +       append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
10983 +                           LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
10984 +
10985 +#ifdef DEBUG
10986 +       print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ",
10987 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
10988 +                      desc_bytes(desc), 1);
10989 +#endif
10990 +}
10991 +EXPORT_SYMBOL(cnstr_shdsc_tls_encap);
10992 +
10993 +/**
10994 + * cnstr_shdsc_tls_decap - tls decapsulation shared descriptor
10995 + * @desc: pointer to buffer used for descriptor construction
10996 + * @cdata: pointer to block cipher transform definitions
10997 + *         Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
10998 + *         with OP_ALG_AAI_CBC
10999 + * @adata: pointer to authentication transform definitions. Note that since a
11000 + *         split key is to be used, the size of the split key itself is
11001 + *         specified. Valid algorithm values OP_ALG_ALGSEL_ SHA1 ANDed with
11002 + *         OP_ALG_AAI_HMAC_PRECOMP.
11003 + * @assoclen: associated data length
11004 + * @ivsize: initialization vector size
11005 + * @authsize: authentication data size
11006 + * @blocksize: block cipher size
11007 + */
11008 +void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
11009 +                          struct alginfo *adata, unsigned int assoclen,
11010 +                          unsigned int ivsize, unsigned int authsize,
11011 +                          unsigned int blocksize)
11012 +{
11013 +       u32 stidx, jumpback;
11014 +       u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
11015 +       /*
11016 +        * Pointer Size bool determines the size of address pointers.
11017 +        * false - Pointers fit in one 32-bit word.
11018 +        * true - Pointers fit in two 32-bit words.
11019 +        */
11020 +       static const bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ);
11021 +
11022 +       stidx = 1 << HDR_START_IDX_SHIFT;
11023 +       init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
11024 +
11025 +       /* skip key loading if they are loaded due to sharing */
11026 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11027 +                                  JUMP_COND_SHRD);
11028 +
11029 +       append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
11030 +                  KEY_DEST_MDHA_SPLIT | KEY_ENC);
11031 +
11032 +       append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11033 +                  KEY_DEST_CLASS_REG);
11034 +
11035 +       set_jump_tgt_here(desc, key_jump_cmd);
11036 +
11037 +       /* class 2 operation */
11038 +       append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
11039 +                        OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11040 +       /* class 1 operation */
11041 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11042 +                        OP_ALG_DECRYPT);
11043 +
11044 +       /* VSIL = input data length - 2 * block_size */
11045 +       append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
11046 +                               blocksize);
11047 +
11048 +       /*
11049 +        * payloadlen + icvlen + padlen = input data length - (assoclen +
11050 +        * ivsize)
11051 +        */
11052 +       append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize);
11053 +
11054 +       /* skip data to the last but one cipher block */
11055 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
11056 +
11057 +       /* load iv for the last cipher block */
11058 +       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
11059 +                  LDST_CLASS_1_CCB | ivsize);
11060 +
11061 +       /* read last cipher block */
11062 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
11063 +                            FIFOLD_TYPE_LAST1 | blocksize);
11064 +
11065 +       /* move decrypted block into math0 and math1 */
11066 +       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
11067 +                   blocksize);
11068 +
11069 +       /* reset AES CHA */
11070 +       append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB |
11071 +                           LDST_SRCDST_WORD_CHACTRL | LDST_IMM);
11072 +
11073 +       /* rewind input sequence */
11074 +       append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
11075 +
11076 +       /* key1 is in decryption form */
11077 +       append_operation(desc, cdata->algtype | OP_ALG_AAI_DK |
11078 +                        OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
11079 +
11080 +       /* load iv in context1 */
11081 +       append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
11082 +                  LDST_SRCDST_WORD_CLASS_CTX | ivsize);
11083 +
11084 +       /* read sequence number */
11085 +       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
11086 +       /* load Type, Version and Len fields in math0 */
11087 +       append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
11088 +                  LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5);
11089 +
11090 +       /* compute (padlen - 1) */
11091 +       append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
11092 +
11093 +       /* math2 = icvlen + (padlen - 1) + 1 */
11094 +       append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1);
11095 +
11096 +       append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11097 +
11098 +       /* VSOL = payloadlen + icvlen + padlen */
11099 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
11100 +
11101 +#ifdef __LITTLE_ENDIAN
11102 +       append_moveb(desc, MOVE_WAITCOMP |
11103 +                    MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
11104 +#endif
11105 +       /* update Len field */
11106 +       append_math_sub(desc, REG0, REG0, REG2, 8);
11107 +
11108 +       /* store decrypted payload, icv and padding */
11109 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
11110 +
11111 +       /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
11112 +       append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
11113 +
11114 +       zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11115 +                                           JUMP_COND_MATH_Z);
11116 +
11117 +       /* send Type, Version and Len(pre ICV) fields to authentication */
11118 +       append_move(desc, MOVE_WAITCOMP |
11119 +                   MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
11120 +                   (3 << MOVE_OFFSET_SHIFT) | 5);
11121 +
11122 +       /* outsnooping payload */
11123 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
11124 +                            FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
11125 +                            FIFOLDST_VLF);
11126 +       skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2);
11127 +
11128 +       set_jump_tgt_here(desc, zero_payload_jump_cmd);
11129 +       /* send Type, Version and Len(pre ICV) fields to authentication */
11130 +       append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS |
11131 +                   MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
11132 +                   (3 << MOVE_OFFSET_SHIFT) | 5);
11133 +
11134 +       set_jump_tgt_here(desc, skip_zero_jump_cmd);
11135 +       append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
11136 +
11137 +       /* load icvlen and padlen */
11138 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
11139 +                            FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
11140 +
11141 +       /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
11142 +       append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
11143 +
11144 +       /*
11145 +        * Start a new input sequence using the SEQ OUT PTR command options,
11146 +        * pointer and length used when the current output sequence was defined.
11147 +        */
11148 +       if (ps) {
11149 +               /*
11150 +                * Move the lower 32 bits of Shared Descriptor address, the
11151 +                * SEQ OUT PTR command, Output Pointer (2 words) and
11152 +                * Output Length into math registers.
11153 +                */
11154 +#ifdef __LITTLE_ENDIAN
11155 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11156 +                           MOVE_DEST_MATH0 | (55 * 4 << MOVE_OFFSET_SHIFT) |
11157 +                           20);
11158 +#else
11159 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11160 +                           MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) |
11161 +                           20);
11162 +#endif
11163 +               /* Transform SEQ OUT PTR command in SEQ IN PTR command */
11164 +               append_math_and_imm_u32(desc, REG0, REG0, IMM,
11165 +                                       ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
11166 +               /* Append a JUMP command after the copied fields */
11167 +               jumpback = CMD_JUMP | (char)-9;
11168 +               append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
11169 +                                   LDST_SRCDST_WORD_DECO_MATH2 |
11170 +                                   (4 << LDST_OFFSET_SHIFT));
11171 +               append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11172 +               /* Move the updated fields back to the Job Descriptor */
11173 +#ifdef __LITTLE_ENDIAN
11174 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11175 +                           MOVE_DEST_DESCBUF | (55 * 4 << MOVE_OFFSET_SHIFT) |
11176 +                           24);
11177 +#else
11178 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11179 +                           MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) |
11180 +                           24);
11181 +#endif
11182 +               /*
11183 +                * Read the new SEQ IN PTR command, Input Pointer, Input Length
11184 +                * and then jump back to the next command from the
11185 +                * Shared Descriptor.
11186 +                */
11187 +               append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
11188 +       } else {
11189 +               /*
11190 +                * Move the SEQ OUT PTR command, Output Pointer (1 word) and
11191 +                * Output Length into math registers.
11192 +                */
11193 +#ifdef __LITTLE_ENDIAN
11194 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11195 +                           MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) |
11196 +                           12);
11197 +#else
11198 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11199 +                           MOVE_DEST_MATH0 | (53 * 4 << MOVE_OFFSET_SHIFT) |
11200 +                           12);
11201 +#endif
11202 +               /* Transform SEQ OUT PTR command in SEQ IN PTR command */
11203 +               append_math_and_imm_u64(desc, REG0, REG0, IMM,
11204 +                                       ~(((u64)(CMD_SEQ_IN_PTR ^
11205 +                                                CMD_SEQ_OUT_PTR)) << 32));
11206 +               /* Append a JUMP command after the copied fields */
11207 +               jumpback = CMD_JUMP | (char)-7;
11208 +               append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
11209 +                                   LDST_SRCDST_WORD_DECO_MATH1 |
11210 +                                   (4 << LDST_OFFSET_SHIFT));
11211 +               append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11212 +               /* Move the updated fields back to the Job Descriptor */
11213 +#ifdef __LITTLE_ENDIAN
11214 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11215 +                           MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) |
11216 +                           16);
11217 +#else
11218 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11219 +                           MOVE_DEST_DESCBUF | (53 * 4 << MOVE_OFFSET_SHIFT) |
11220 +                           16);
11221 +#endif
11222 +               /*
11223 +                * Read the new SEQ IN PTR command, Input Pointer, Input Length
11224 +                * and then jump back to the next command from the
11225 +                * Shared Descriptor.
11226 +                */
11227 +                append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5);
11228 +       }
11229 +
11230 +       /* skip payload */
11231 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
11232 +       /* check icv */
11233 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
11234 +                            FIFOLD_TYPE_LAST2 | authsize);
11235 +
11236 +#ifdef DEBUG
11237 +       print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ",
11238 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
11239 +                      desc_bytes(desc), 1);
11240 +#endif
11241 +}
11242 +EXPORT_SYMBOL(cnstr_shdsc_tls_decap);
11243 +
11244 +/**
11245 + * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
11246 + * @desc: pointer to buffer used for descriptor construction
11247 + * @cdata: pointer to block cipher transform definitions
11248 + *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11249 + * @ivsize: initialization vector size
11250 + * @icvsize: integrity check value (ICV) size (truncated or full)
11251 + * @is_qi: true when called from caam/qi
11252 + */
11253 +void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
11254 +                          unsigned int ivsize, unsigned int icvsize,
11255 +                          const bool is_qi)
11256 +{
11257 +       u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
11258 +           *zero_assoc_jump_cmd2;
11259 +
11260 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
11261 +
11262 +       /* skip key loading if they are loaded due to sharing */
11263 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11264 +                                  JUMP_COND_SHRD);
11265 +       if (cdata->key_inline)
11266 +               append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11267 +                                 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11268 +       else
11269 +               append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11270 +                          KEY_DEST_CLASS_REG);
11271 +       set_jump_tgt_here(desc, key_jump_cmd);
11272 +
11273 +       /* class 1 operation */
11274 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11275 +                        OP_ALG_ENCRYPT);
11276 +
11277 +       if (is_qi) {
11278 +               u32 *wait_load_cmd;
11279 +
11280 +               /* REG3 = assoclen */
11281 +               append_seq_load(desc, 4, LDST_CLASS_DECO |
11282 +                               LDST_SRCDST_WORD_DECO_MATH3 |
11283 +                               (4 << LDST_OFFSET_SHIFT));
11284 +
11285 +               wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11286 +                                           JUMP_COND_CALM | JUMP_COND_NCP |
11287 +                                           JUMP_COND_NOP | JUMP_COND_NIP |
11288 +                                           JUMP_COND_NIFP);
11289 +               set_jump_tgt_here(desc, wait_load_cmd);
11290 +
11291 +               append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
11292 +                                       ivsize);
11293 +       } else {
11294 +               append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
11295 +                               CAAM_CMD_SZ);
11296 +       }
11297 +
11298 +       /* if assoclen + cryptlen is ZERO, skip to ICV write */
11299 +       zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
11300 +                                                JUMP_COND_MATH_Z);
11301 +
11302 +       if (is_qi)
11303 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11304 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11305 +
11306 +       /* if assoclen is ZERO, skip reading the assoc data */
11307 +       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
11308 +       zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
11309 +                                          JUMP_COND_MATH_Z);
11310 +
11311 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11312 +
11313 +       /* skip assoc data */
11314 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11315 +
11316 +       /* cryptlen = seqinlen - assoclen */
11317 +       append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
11318 +
11319 +       /* if cryptlen is ZERO jump to zero-payload commands */
11320 +       zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11321 +                                           JUMP_COND_MATH_Z);
11322 +
11323 +       /* read assoc data */
11324 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11325 +                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11326 +       set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
11327 +
11328 +       append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11329 +
11330 +       /* write encrypted data */
11331 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11332 +
11333 +       /* read payload data */
11334 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11335 +                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11336 +
11337 +       /* jump to ICV writing */
11338 +       if (is_qi)
11339 +               append_jump(desc, JUMP_TEST_ALL | 4);
11340 +       else
11341 +               append_jump(desc, JUMP_TEST_ALL | 2);
11342 +
11343 +       /* zero-payload commands */
11344 +       set_jump_tgt_here(desc, zero_payload_jump_cmd);
11345 +
11346 +       /* read assoc data */
11347 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11348 +                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
11349 +       if (is_qi)
11350 +               /* jump to ICV writing */
11351 +               append_jump(desc, JUMP_TEST_ALL | 2);
11352 +
11353 +       /* There is no input data */
11354 +       set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
11355 +
11356 +       if (is_qi)
11357 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11358 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
11359 +                                    FIFOLD_TYPE_LAST1);
11360 +
11361 +       /* write ICV */
11362 +       append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11363 +                        LDST_SRCDST_BYTE_CONTEXT);
11364 +
11365 +#ifdef DEBUG
11366 +       print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ",
11367 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11368 +#endif
11369 +}
11370 +EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
11371 +
11372 +/**
11373 + * cnstr_shdsc_gcm_decap - gcm decapsulation shared descriptor
11374 + * @desc: pointer to buffer used for descriptor construction
11375 + * @cdata: pointer to block cipher transform definitions
11376 + *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11377 + * @ivsize: initialization vector size
11378 + * @icvsize: integrity check value (ICV) size (truncated or full)
11379 + * @is_qi: true when called from caam/qi
11380 + */
11381 +void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
11382 +                          unsigned int ivsize, unsigned int icvsize,
11383 +                          const bool is_qi)
11384 +{
11385 +       u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
11386 +
11387 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
11388 +
11389 +       /* skip key loading if they are loaded due to sharing */
11390 +       key_jump_cmd = append_jump(desc, JUMP_JSL |
11391 +                                  JUMP_TEST_ALL | JUMP_COND_SHRD);
11392 +       if (cdata->key_inline)
11393 +               append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11394 +                                 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11395 +       else
11396 +               append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11397 +                          KEY_DEST_CLASS_REG);
11398 +       set_jump_tgt_here(desc, key_jump_cmd);
11399 +
11400 +       /* class 1 operation */
11401 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11402 +                        OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11403 +
11404 +       if (is_qi) {
11405 +               u32 *wait_load_cmd;
11406 +
11407 +               /* REG3 = assoclen */
11408 +               append_seq_load(desc, 4, LDST_CLASS_DECO |
11409 +                               LDST_SRCDST_WORD_DECO_MATH3 |
11410 +                               (4 << LDST_OFFSET_SHIFT));
11411 +
11412 +               wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11413 +                                           JUMP_COND_CALM | JUMP_COND_NCP |
11414 +                                           JUMP_COND_NOP | JUMP_COND_NIP |
11415 +                                           JUMP_COND_NIFP);
11416 +               set_jump_tgt_here(desc, wait_load_cmd);
11417 +
11418 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11419 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11420 +       }
11421 +
11422 +       /* if assoclen is ZERO, skip reading the assoc data */
11423 +       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
11424 +       zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
11425 +                                                JUMP_COND_MATH_Z);
11426 +
11427 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11428 +
11429 +       /* skip assoc data */
11430 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11431 +
11432 +       /* read assoc data */
11433 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11434 +                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11435 +
11436 +       set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
11437 +
11438 +       /* cryptlen = seqoutlen - assoclen */
11439 +       append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11440 +
11441 +       /* jump to zero-payload command if cryptlen is zero */
11442 +       zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11443 +                                           JUMP_COND_MATH_Z);
11444 +
11445 +       append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11446 +
11447 +       /* store encrypted data */
11448 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11449 +
11450 +       /* read payload data */
11451 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11452 +                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
11453 +
11454 +       /* zero-payload command */
11455 +       set_jump_tgt_here(desc, zero_payload_jump_cmd);
11456 +
11457 +       /* read ICV */
11458 +       append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11459 +                            FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11460 +
11461 +#ifdef DEBUG
11462 +       print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ",
11463 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11464 +#endif
11465 +}
11466 +EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
11467 +
11468 +/**
11469 + * cnstr_shdsc_rfc4106_encap - IPSec ESP gcm encapsulation shared descriptor
11470 + *                             (non-protocol).
11471 + * @desc: pointer to buffer used for descriptor construction
11472 + * @cdata: pointer to block cipher transform definitions
11473 + *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11474 + * @ivsize: initialization vector size
11475 + * @icvsize: integrity check value (ICV) size (truncated or full)
11476 + * @is_qi: true when called from caam/qi
11477 + */
11478 +void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
11479 +                              unsigned int ivsize, unsigned int icvsize,
11480 +                              const bool is_qi)
11481 +{
11482 +       u32 *key_jump_cmd;
11483 +
11484 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
11485 +
11486 +       /* Skip key loading if it is loaded due to sharing */
11487 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11488 +                                  JUMP_COND_SHRD);
11489 +       if (cdata->key_inline)
11490 +               append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11491 +                                 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11492 +       else
11493 +               append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11494 +                          KEY_DEST_CLASS_REG);
11495 +       set_jump_tgt_here(desc, key_jump_cmd);
11496 +
11497 +       /* Class 1 operation */
11498 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11499 +                        OP_ALG_ENCRYPT);
11500 +
11501 +       if (is_qi) {
11502 +               u32 *wait_load_cmd;
11503 +
11504 +               /* REG3 = assoclen */
11505 +               append_seq_load(desc, 4, LDST_CLASS_DECO |
11506 +                               LDST_SRCDST_WORD_DECO_MATH3 |
11507 +                               (4 << LDST_OFFSET_SHIFT));
11508 +
11509 +               wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11510 +                                           JUMP_COND_CALM | JUMP_COND_NCP |
11511 +                                           JUMP_COND_NOP | JUMP_COND_NIP |
11512 +                                           JUMP_COND_NIFP);
11513 +               set_jump_tgt_here(desc, wait_load_cmd);
11514 +
11515 +               /* Read salt and IV */
11516 +               append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11517 +                                       cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11518 +                                       FIFOLD_TYPE_IV);
11519 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11520 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11521 +       }
11522 +
11523 +       append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
11524 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11525 +
11526 +       /* Read assoc data */
11527 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11528 +                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11529 +
11530 +       /* Skip IV */
11531 +       append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
11532 +
11533 +       /* Will read cryptlen bytes */
11534 +       append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11535 +
11536 +       /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
11537 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
11538 +
11539 +       /* Skip assoc data */
11540 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11541 +
11542 +       /* cryptlen = seqoutlen - assoclen */
11543 +       append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
11544 +
11545 +       /* Write encrypted data */
11546 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11547 +
11548 +       /* Read payload data */
11549 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11550 +                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11551 +
11552 +       /* Write ICV */
11553 +       append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11554 +                        LDST_SRCDST_BYTE_CONTEXT);
11555 +
11556 +#ifdef DEBUG
11557 +       print_hex_dump(KERN_ERR,
11558 +                      "rfc4106 enc shdesc@" __stringify(__LINE__)": ",
11559 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11560 +#endif
11561 +}
11562 +EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
11563 +
11564 +/**
11565 + * cnstr_shdsc_rfc4106_decap - IPSec ESP gcm decapsulation shared descriptor
11566 + *                             (non-protocol).
11567 + * @desc: pointer to buffer used for descriptor construction
11568 + * @cdata: pointer to block cipher transform definitions
11569 + *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11570 + * @ivsize: initialization vector size
11571 + * @icvsize: integrity check value (ICV) size (truncated or full)
11572 + * @is_qi: true when called from caam/qi
11573 + */
11574 +void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
11575 +                              unsigned int ivsize, unsigned int icvsize,
11576 +                              const bool is_qi)
11577 +{
11578 +       u32 *key_jump_cmd;
11579 +
11580 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
11581 +
11582 +       /* Skip key loading if it is loaded due to sharing */
11583 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11584 +                                  JUMP_COND_SHRD);
11585 +       if (cdata->key_inline)
11586 +               append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11587 +                                 cdata->keylen, CLASS_1 |
11588 +                                 KEY_DEST_CLASS_REG);
11589 +       else
11590 +               append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11591 +                          KEY_DEST_CLASS_REG);
11592 +       set_jump_tgt_here(desc, key_jump_cmd);
11593 +
11594 +       /* Class 1 operation */
11595 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11596 +                        OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11597 +
11598 +       if (is_qi) {
11599 +               u32 *wait_load_cmd;
11600 +
11601 +               /* REG3 = assoclen */
11602 +               append_seq_load(desc, 4, LDST_CLASS_DECO |
11603 +                               LDST_SRCDST_WORD_DECO_MATH3 |
11604 +                               (4 << LDST_OFFSET_SHIFT));
11605 +
11606 +               wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11607 +                                           JUMP_COND_CALM | JUMP_COND_NCP |
11608 +                                           JUMP_COND_NOP | JUMP_COND_NIP |
11609 +                                           JUMP_COND_NIFP);
11610 +               set_jump_tgt_here(desc, wait_load_cmd);
11611 +
11612 +               /* Read salt and IV */
11613 +               append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11614 +                                       cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11615 +                                       FIFOLD_TYPE_IV);
11616 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11617 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11618 +       }
11619 +
11620 +       append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
11621 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11622 +
11623 +       /* Read assoc data */
11624 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11625 +                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11626 +
11627 +       /* Skip IV */
11628 +       append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
11629 +
11630 +       /* Will read cryptlen bytes */
11631 +       append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
11632 +
11633 +       /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
11634 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
11635 +
11636 +       /* Skip assoc data */
11637 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11638 +
11639 +       /* Will write cryptlen bytes */
11640 +       append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11641 +
11642 +       /* Store payload data */
11643 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11644 +
11645 +       /* Read encrypted data */
11646 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11647 +                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
11648 +
11649 +       /* Read ICV */
11650 +       append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11651 +                            FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11652 +
11653 +#ifdef DEBUG
11654 +       print_hex_dump(KERN_ERR,
11655 +                      "rfc4106 dec shdesc@" __stringify(__LINE__)": ",
11656 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11657 +#endif
11658 +}
11659 +EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
11660 +
11661 +/**
11662 + * cnstr_shdsc_rfc4543_encap - IPSec ESP gmac encapsulation shared descriptor
11663 + *                             (non-protocol).
11664 + * @desc: pointer to buffer used for descriptor construction
11665 + * @cdata: pointer to block cipher transform definitions
11666 + *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11667 + * @ivsize: initialization vector size
11668 + * @icvsize: integrity check value (ICV) size (truncated or full)
11669 + * @is_qi: true when called from caam/qi
11670 + */
11671 +void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
11672 +                              unsigned int ivsize, unsigned int icvsize,
11673 +                              const bool is_qi)
11674 +{
11675 +       u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
11676 +
11677 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
11678 +
11679 +       /* Skip key loading if it is loaded due to sharing */
11680 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11681 +                                  JUMP_COND_SHRD);
11682 +       if (cdata->key_inline)
11683 +               append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11684 +                                 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11685 +       else
11686 +               append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11687 +                          KEY_DEST_CLASS_REG);
11688 +       set_jump_tgt_here(desc, key_jump_cmd);
11689 +
11690 +       /* Class 1 operation */
11691 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11692 +                        OP_ALG_ENCRYPT);
11693 +
11694 +       if (is_qi) {
11695 +               /* assoclen is not needed, skip it */
11696 +               append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
11697 +
11698 +               /* Read salt and IV */
11699 +               append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11700 +                                       cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11701 +                                       FIFOLD_TYPE_IV);
11702 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11703 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11704 +       }
11705 +
11706 +       /* assoclen + cryptlen = seqinlen */
11707 +       append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
11708 +
11709 +       /*
11710 +        * MOVE_LEN opcode is not available in all SEC HW revisions,
11711 +        * thus need to do some magic, i.e. self-patch the descriptor
11712 +        * buffer.
11713 +        */
11714 +       read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
11715 +                                   (0x6 << MOVE_LEN_SHIFT));
11716 +       write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
11717 +                                    (0x8 << MOVE_LEN_SHIFT));
11718 +
11719 +       /* Will read assoclen + cryptlen bytes */
11720 +       append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11721 +
11722 +       /* Will write assoclen + cryptlen bytes */
11723 +       append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11724 +
11725 +       /* Read and write assoclen + cryptlen bytes */
11726 +       aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
11727 +
11728 +       set_move_tgt_here(desc, read_move_cmd);
11729 +       set_move_tgt_here(desc, write_move_cmd);
11730 +       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
11731 +       /* Move payload data to OFIFO */
11732 +       append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
11733 +
11734 +       /* Write ICV */
11735 +       append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11736 +                        LDST_SRCDST_BYTE_CONTEXT);
11737 +
11738 +#ifdef DEBUG
11739 +       print_hex_dump(KERN_ERR,
11740 +                      "rfc4543 enc shdesc@" __stringify(__LINE__)": ",
11741 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11742 +#endif
11743 +}
11744 +EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
11745 +
11746 +/**
11747 + * cnstr_shdsc_rfc4543_decap - IPSec ESP gmac decapsulation shared descriptor
11748 + *                             (non-protocol).
11749 + * @desc: pointer to buffer used for descriptor construction
11750 + * @cdata: pointer to block cipher transform definitions
11751 + *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11752 + * @ivsize: initialization vector size
11753 + * @icvsize: integrity check value (ICV) size (truncated or full)
11754 + * @is_qi: true when called from caam/qi
11755 + */
11756 +void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
11757 +                              unsigned int ivsize, unsigned int icvsize,
11758 +                              const bool is_qi)
11759 +{
11760 +       u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
11761 +
11762 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
11763 +
11764 +       /* Skip key loading if it is loaded due to sharing */
11765 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11766 +                                  JUMP_COND_SHRD);
11767 +       if (cdata->key_inline)
11768 +               append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11769 +                                 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11770 +       else
11771 +               append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11772 +                          KEY_DEST_CLASS_REG);
11773 +       set_jump_tgt_here(desc, key_jump_cmd);
11774 +
11775 +       /* Class 1 operation */
11776 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11777 +                        OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11778 +
11779 +       if (is_qi) {
11780 +               /* assoclen is not needed, skip it */
11781 +               append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
11782 +
11783 +               /* Read salt and IV */
11784 +               append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11785 +                                       cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11786 +                                       FIFOLD_TYPE_IV);
11787 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11788 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11789 +       }
11790 +
11791 +       /* assoclen + cryptlen = seqoutlen */
11792 +       append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11793 +
11794 +       /*
11795 +        * MOVE_LEN opcode is not available in all SEC HW revisions,
11796 +        * thus need to do some magic, i.e. self-patch the descriptor
11797 +        * buffer.
11798 +        */
11799 +       read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
11800 +                                   (0x6 << MOVE_LEN_SHIFT));
11801 +       write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
11802 +                                    (0x8 << MOVE_LEN_SHIFT));
11803 +
11804 +       /* Will read assoclen + cryptlen bytes */
11805 +       append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11806 +
11807 +       /* Will write assoclen + cryptlen bytes */
11808 +       append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11809 +
11810 +       /* Store payload data */
11811 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11812 +
11813 +       /* In-snoop assoclen + cryptlen data */
11814 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
11815 +                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
11816 +
11817 +       set_move_tgt_here(desc, read_move_cmd);
11818 +       set_move_tgt_here(desc, write_move_cmd);
11819 +       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
11820 +       /* Move payload data to OFIFO */
11821 +       append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
11822 +       append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
11823 +
11824 +       /* Read ICV */
11825 +       append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11826 +                            FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11827 +
11828 +#ifdef DEBUG
11829 +       print_hex_dump(KERN_ERR,
11830 +                      "rfc4543 dec shdesc@" __stringify(__LINE__)": ",
11831 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11832 +#endif
11833 +}
11834 +EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
11835 +
11836 +/*
11837 + * For ablkcipher encrypt and decrypt, read from req->src and
11838 + * write to req->dst
11839 + */
11840 +static inline void ablkcipher_append_src_dst(u32 *desc)
11841 +{
11842 +       append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11843 +       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11844 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
11845 +                            KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11846 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
11847 +}
11848 +
11849 +/**
11850 + * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
11851 + * @desc: pointer to buffer used for descriptor construction
11852 + * @cdata: pointer to block cipher transform definitions
11853 + *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
11854 + *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
11855 + * @ivsize: initialization vector size
11856 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
11857 + * @ctx1_iv_off: IV offset in CONTEXT1 register
11858 + */
11859 +void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
11860 +                                 unsigned int ivsize, const bool is_rfc3686,
11861 +                                 const u32 ctx1_iv_off)
11862 +{
11863 +       u32 *key_jump_cmd;
11864 +
11865 +       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
11866 +       /* Skip if already shared */
11867 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11868 +                                  JUMP_COND_SHRD);
11869 +
11870 +       /* Load class1 key only */
11871 +       append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11872 +                         cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11873 +
11874 +       /* Load nonce into CONTEXT1 reg */
11875 +       if (is_rfc3686) {
11876 +               u8 *nonce = cdata->key_virt + cdata->keylen;
11877 +
11878 +               append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
11879 +                                  LDST_CLASS_IND_CCB |
11880 +                                  LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
11881 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
11882 +                           MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
11883 +                           (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
11884 +       }
11885 +
11886 +       set_jump_tgt_here(desc, key_jump_cmd);
11887 +
11888 +       /* Load iv */
11889 +       append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
11890 +                       LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
11891 +
11892 +       /* Load counter into CONTEXT1 reg */
11893 +       if (is_rfc3686)
11894 +               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
11895 +                                    LDST_SRCDST_BYTE_CONTEXT |
11896 +                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
11897 +                                     LDST_OFFSET_SHIFT));
11898 +
11899 +       /* Load operation */
11900 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11901 +                        OP_ALG_ENCRYPT);
11902 +
11903 +       /* Perform operation */
11904 +       ablkcipher_append_src_dst(desc);
11905 +
11906 +#ifdef DEBUG
11907 +       print_hex_dump(KERN_ERR,
11908 +                      "ablkcipher enc shdesc@" __stringify(__LINE__)": ",
11909 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11910 +#endif
11911 +}
11912 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
11913 +
11914 +/**
11915 + * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor
11916 + * @desc: pointer to buffer used for descriptor construction
11917 + * @cdata: pointer to block cipher transform definitions
11918 + *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
11919 + *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
11920 + * @ivsize: initialization vector size
11921 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
11922 + * @ctx1_iv_off: IV offset in CONTEXT1 register
11923 + */
11924 +void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
11925 +                                 unsigned int ivsize, const bool is_rfc3686,
11926 +                                 const u32 ctx1_iv_off)
11927 +{
11928 +       u32 *key_jump_cmd;
11929 +
11930 +       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
11931 +       /* Skip if already shared */
11932 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11933 +                                  JUMP_COND_SHRD);
11934 +
11935 +       /* Load class1 key only */
11936 +       append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11937 +                         cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11938 +
11939 +       /* Load nonce into CONTEXT1 reg */
11940 +       if (is_rfc3686) {
11941 +               u8 *nonce = cdata->key_virt + cdata->keylen;
11942 +
11943 +               append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
11944 +                                  LDST_CLASS_IND_CCB |
11945 +                                  LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
11946 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
11947 +                           MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
11948 +                           (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
11949 +       }
11950 +
11951 +       set_jump_tgt_here(desc, key_jump_cmd);
11952 +
11953 +       /* load IV */
11954 +       append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
11955 +                       LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
11956 +
11957 +       /* Load counter into CONTEXT1 reg */
11958 +       if (is_rfc3686)
11959 +               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
11960 +                                    LDST_SRCDST_BYTE_CONTEXT |
11961 +                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
11962 +                                     LDST_OFFSET_SHIFT));
11963 +
11964 +       /* Choose operation */
11965 +       if (ctx1_iv_off)
11966 +               append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11967 +                                OP_ALG_DECRYPT);
11968 +       else
11969 +               append_dec_op1(desc, cdata->algtype);
11970 +
11971 +       /* Perform operation */
11972 +       ablkcipher_append_src_dst(desc);
11973 +
11974 +#ifdef DEBUG
11975 +       print_hex_dump(KERN_ERR,
11976 +                      "ablkcipher dec shdesc@" __stringify(__LINE__)": ",
11977 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11978 +#endif
11979 +}
11980 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
11981 +
11982 +/**
11983 + * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
11984 + *                                   with HW-generated initialization vector.
11985 + * @desc: pointer to buffer used for descriptor construction
11986 + * @cdata: pointer to block cipher transform definitions
11987 + *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
11988 + *         with OP_ALG_AAI_CBC.
11989 + * @ivsize: initialization vector size
11990 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
11991 + * @ctx1_iv_off: IV offset in CONTEXT1 register
11992 + */
11993 +void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
11994 +                                    unsigned int ivsize, const bool is_rfc3686,
11995 +                                    const u32 ctx1_iv_off)
11996 +{
11997 +       u32 *key_jump_cmd, geniv;
11998 +
11999 +       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12000 +       /* Skip if already shared */
12001 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12002 +                                  JUMP_COND_SHRD);
12003 +
12004 +       /* Load class1 key only */
12005 +       append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12006 +                         cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12007 +
12008 +       /* Load Nonce into CONTEXT1 reg */
12009 +       if (is_rfc3686) {
12010 +               u8 *nonce = cdata->key_virt + cdata->keylen;
12011 +
12012 +               append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
12013 +                                  LDST_CLASS_IND_CCB |
12014 +                                  LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
12015 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
12016 +                           MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
12017 +                           (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
12018 +       }
12019 +       set_jump_tgt_here(desc, key_jump_cmd);
12020 +
12021 +       /* Generate IV */
12022 +       geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
12023 +               NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
12024 +               (ivsize << NFIFOENTRY_DLEN_SHIFT);
12025 +       append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
12026 +                           LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
12027 +       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
12028 +       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
12029 +                   MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
12030 +                   (ctx1_iv_off << MOVE_OFFSET_SHIFT));
12031 +       append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
12032 +
12033 +       /* Copy generated IV to memory */
12034 +       append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
12035 +                        LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
12036 +
12037 +       /* Load Counter into CONTEXT1 reg */
12038 +       if (is_rfc3686)
12039 +               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
12040 +                                    LDST_SRCDST_BYTE_CONTEXT |
12041 +                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
12042 +                                     LDST_OFFSET_SHIFT));
12043 +
12044 +       if (ctx1_iv_off)
12045 +               append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
12046 +                           (1 << JUMP_OFFSET_SHIFT));
12047 +
12048 +       /* Load operation */
12049 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
12050 +                        OP_ALG_ENCRYPT);
12051 +
12052 +       /* Perform operation */
12053 +       ablkcipher_append_src_dst(desc);
12054 +
12055 +#ifdef DEBUG
12056 +       print_hex_dump(KERN_ERR,
12057 +                      "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
12058 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12059 +#endif
12060 +}
12061 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
12062 +
12063 +/**
12064 + * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
12065 + *                                    descriptor
12066 + * @desc: pointer to buffer used for descriptor construction
12067 + * @cdata: pointer to block cipher transform definitions
12068 + *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
12069 + */
12070 +void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
12071 +{
12072 +       __be64 sector_size = cpu_to_be64(512);
12073 +       u32 *key_jump_cmd;
12074 +
12075 +       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12076 +       /* Skip if already shared */
12077 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12078 +                                  JUMP_COND_SHRD);
12079 +
12080 +       /* Load class1 keys only */
12081 +       append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12082 +                         cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12083 +
12084 +       /* Load sector size with index 40 bytes (0x28) */
12085 +       append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
12086 +                          LDST_SRCDST_BYTE_CONTEXT |
12087 +                          (0x28 << LDST_OFFSET_SHIFT));
12088 +
12089 +       set_jump_tgt_here(desc, key_jump_cmd);
12090 +
12091 +       /*
12092 +        * create sequence for loading the sector index
12093 +        * Upper 8B of IV - will be used as sector index
12094 +        * Lower 8B of IV - will be discarded
12095 +        */
12096 +       append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
12097 +                       (0x20 << LDST_OFFSET_SHIFT));
12098 +       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
12099 +
12100 +       /* Load operation */
12101 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
12102 +                        OP_ALG_ENCRYPT);
12103 +
12104 +       /* Perform operation */
12105 +       ablkcipher_append_src_dst(desc);
12106 +
12107 +#ifdef DEBUG
12108 +       print_hex_dump(KERN_ERR,
12109 +                      "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
12110 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12111 +#endif
12112 +}
12113 +EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
12114 +
12115 +/**
12116 + * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared
12117 + *                                    descriptor
12118 + * @desc: pointer to buffer used for descriptor construction
12119 + * @cdata: pointer to block cipher transform definitions
12120 + *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
12121 + */
12122 +void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
12123 +{
12124 +       __be64 sector_size = cpu_to_be64(512);
12125 +       u32 *key_jump_cmd;
12126 +
12127 +       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12128 +       /* Skip if already shared */
12129 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12130 +                                  JUMP_COND_SHRD);
12131 +
12132 +       /* Load class1 key only */
12133 +       append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12134 +                         cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12135 +
12136 +       /* Load sector size with index 40 bytes (0x28) */
12137 +       append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
12138 +                          LDST_SRCDST_BYTE_CONTEXT |
12139 +                          (0x28 << LDST_OFFSET_SHIFT));
12140 +
12141 +       set_jump_tgt_here(desc, key_jump_cmd);
12142 +
12143 +       /*
12144 +        * create sequence for loading the sector index
12145 +        * Upper 8B of IV - will be used as sector index
12146 +        * Lower 8B of IV - will be discarded
12147 +        */
12148 +       append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
12149 +                       (0x20 << LDST_OFFSET_SHIFT));
12150 +       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
12151 +
12152 +       /* Load operation */
12153 +       append_dec_op1(desc, cdata->algtype);
12154 +
12155 +       /* Perform operation */
12156 +       ablkcipher_append_src_dst(desc);
12157 +
12158 +#ifdef DEBUG
12159 +       print_hex_dump(KERN_ERR,
12160 +                      "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
12161 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12162 +#endif
12163 +}
12164 +EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap);
12165 +
12166 +MODULE_LICENSE("GPL");
12167 +MODULE_DESCRIPTION("FSL CAAM descriptor support");
12168 +MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
12169 diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
12170 new file mode 100644
12171 index 00000000..6b436f65
12172 --- /dev/null
12173 +++ b/drivers/crypto/caam/caamalg_desc.h
12174 @@ -0,0 +1,127 @@
12175 +/*
12176 + * Shared descriptors for aead, ablkcipher algorithms
12177 + *
12178 + * Copyright 2016 NXP
12179 + */
12180 +
12181 +#ifndef _CAAMALG_DESC_H_
12182 +#define _CAAMALG_DESC_H_
12183 +
12184 +/* length of descriptors text */
12185 +#define DESC_AEAD_BASE                 (4 * CAAM_CMD_SZ)
12186 +#define DESC_AEAD_ENC_LEN              (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
12187 +#define DESC_AEAD_DEC_LEN              (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
12188 +#define DESC_AEAD_GIVENC_LEN           (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
12189 +#define DESC_QI_AEAD_ENC_LEN           (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
12190 +#define DESC_QI_AEAD_DEC_LEN           (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
12191 +#define DESC_QI_AEAD_GIVENC_LEN                (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
12192 +
12193 +#define DESC_TLS_BASE                  (4 * CAAM_CMD_SZ)
12194 +#define DESC_TLS10_ENC_LEN             (DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
12195 +
12196 +/* Note: Nonce is counted in cdata.keylen */
12197 +#define DESC_AEAD_CTR_RFC3686_LEN      (4 * CAAM_CMD_SZ)
12198 +
12199 +#define DESC_AEAD_NULL_BASE            (3 * CAAM_CMD_SZ)
12200 +#define DESC_AEAD_NULL_ENC_LEN         (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
12201 +#define DESC_AEAD_NULL_DEC_LEN         (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
12202 +
12203 +#define DESC_GCM_BASE                  (3 * CAAM_CMD_SZ)
12204 +#define DESC_GCM_ENC_LEN               (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
12205 +#define DESC_GCM_DEC_LEN               (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
12206 +#define DESC_QI_GCM_ENC_LEN            (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
12207 +#define DESC_QI_GCM_DEC_LEN            (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
12208 +
12209 +#define DESC_RFC4106_BASE              (3 * CAAM_CMD_SZ)
12210 +#define DESC_RFC4106_ENC_LEN           (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
12211 +#define DESC_RFC4106_DEC_LEN           (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
12212 +#define DESC_QI_RFC4106_ENC_LEN                (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
12213 +#define DESC_QI_RFC4106_DEC_LEN                (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
12214 +
12215 +#define DESC_RFC4543_BASE              (3 * CAAM_CMD_SZ)
12216 +#define DESC_RFC4543_ENC_LEN           (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
12217 +#define DESC_RFC4543_DEC_LEN           (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
12218 +#define DESC_QI_RFC4543_ENC_LEN                (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
12219 +#define DESC_QI_RFC4543_DEC_LEN                (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
12220 +
12221 +#define DESC_ABLKCIPHER_BASE           (3 * CAAM_CMD_SZ)
12222 +#define DESC_ABLKCIPHER_ENC_LEN                (DESC_ABLKCIPHER_BASE + \
12223 +                                        20 * CAAM_CMD_SZ)
12224 +#define DESC_ABLKCIPHER_DEC_LEN                (DESC_ABLKCIPHER_BASE + \
12225 +                                        15 * CAAM_CMD_SZ)
12226 +
12227 +void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
12228 +                                unsigned int icvsize);
12229 +
12230 +void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
12231 +                                unsigned int icvsize);
12232 +
12233 +void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
12234 +                           struct alginfo *adata, unsigned int ivsize,
12235 +                           unsigned int icvsize, const bool is_rfc3686,
12236 +                           u32 *nonce, const u32 ctx1_iv_off,
12237 +                           const bool is_qi);
12238 +
12239 +void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
12240 +                           struct alginfo *adata, unsigned int ivsize,
12241 +                           unsigned int icvsize, const bool geniv,
12242 +                           const bool is_rfc3686, u32 *nonce,
12243 +                           const u32 ctx1_iv_off, const bool is_qi);
12244 +
12245 +void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
12246 +                              struct alginfo *adata, unsigned int ivsize,
12247 +                              unsigned int icvsize, const bool is_rfc3686,
12248 +                              u32 *nonce, const u32 ctx1_iv_off,
12249 +                              const bool is_qi);
12250 +
12251 +void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
12252 +                          struct alginfo *adata, unsigned int assoclen,
12253 +                          unsigned int ivsize, unsigned int authsize,
12254 +                          unsigned int blocksize);
12255 +
12256 +void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
12257 +                          struct alginfo *adata, unsigned int assoclen,
12258 +                          unsigned int ivsize, unsigned int authsize,
12259 +                          unsigned int blocksize);
12260 +
12261 +void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
12262 +                          unsigned int ivsize, unsigned int icvsize,
12263 +                          const bool is_qi);
12264 +
12265 +void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
12266 +                          unsigned int ivsize, unsigned int icvsize,
12267 +                          const bool is_qi);
12268 +
12269 +void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
12270 +                              unsigned int ivsize, unsigned int icvsize,
12271 +                              const bool is_qi);
12272 +
12273 +void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
12274 +                              unsigned int ivsize, unsigned int icvsize,
12275 +                              const bool is_qi);
12276 +
12277 +void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
12278 +                              unsigned int ivsize, unsigned int icvsize,
12279 +                              const bool is_qi);
12280 +
12281 +void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
12282 +                              unsigned int ivsize, unsigned int icvsize,
12283 +                              const bool is_qi);
12284 +
12285 +void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
12286 +                                 unsigned int ivsize, const bool is_rfc3686,
12287 +                                 const u32 ctx1_iv_off);
12288 +
12289 +void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
12290 +                                 unsigned int ivsize, const bool is_rfc3686,
12291 +                                 const u32 ctx1_iv_off);
12292 +
12293 +void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
12294 +                                    unsigned int ivsize, const bool is_rfc3686,
12295 +                                    const u32 ctx1_iv_off);
12296 +
12297 +void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
12298 +
12299 +void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
12300 +
12301 +#endif /* _CAAMALG_DESC_H_ */
12302 diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
12303 new file mode 100644
12304 index 00000000..d6a9b0c5
12305 --- /dev/null
12306 +++ b/drivers/crypto/caam/caamalg_qi.c
12307 @@ -0,0 +1,2877 @@
12308 +/*
12309 + * Freescale FSL CAAM support for crypto API over QI backend.
12310 + * Based on caamalg.c
12311 + *
12312 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
12313 + * Copyright 2016-2017 NXP
12314 + */
12315 +
12316 +#include "compat.h"
12317 +#include "ctrl.h"
12318 +#include "regs.h"
12319 +#include "intern.h"
12320 +#include "desc_constr.h"
12321 +#include "error.h"
12322 +#include "sg_sw_qm.h"
12323 +#include "key_gen.h"
12324 +#include "qi.h"
12325 +#include "jr.h"
12326 +#include "caamalg_desc.h"
12327 +
12328 +/*
12329 + * crypto alg
12330 + */
12331 +#define CAAM_CRA_PRIORITY              2000
12332 +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
12333 +#define CAAM_MAX_KEY_SIZE              (AES_MAX_KEY_SIZE + \
12334 +                                        SHA512_DIGEST_SIZE * 2)
12335 +
12336 +#define DESC_MAX_USED_BYTES            (DESC_QI_AEAD_GIVENC_LEN + \
12337 +                                        CAAM_MAX_KEY_SIZE)
12338 +#define DESC_MAX_USED_LEN              (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
12339 +
12340 +struct caam_alg_entry {
12341 +       int class1_alg_type;
12342 +       int class2_alg_type;
12343 +       bool rfc3686;
12344 +       bool geniv;
12345 +};
12346 +
12347 +struct caam_aead_alg {
12348 +       struct aead_alg aead;
12349 +       struct caam_alg_entry caam;
12350 +       bool registered;
12351 +};
12352 +
12353 +/*
12354 + * per-session context
12355 + */
12356 +struct caam_ctx {
12357 +       struct device *jrdev;
12358 +       u32 sh_desc_enc[DESC_MAX_USED_LEN];
12359 +       u32 sh_desc_dec[DESC_MAX_USED_LEN];
12360 +       u32 sh_desc_givenc[DESC_MAX_USED_LEN];
12361 +       u8 key[CAAM_MAX_KEY_SIZE];
12362 +       dma_addr_t key_dma;
12363 +       struct alginfo adata;
12364 +       struct alginfo cdata;
12365 +       unsigned int authsize;
12366 +       struct device *qidev;
12367 +       spinlock_t lock;        /* Protects multiple init of driver context */
12368 +       struct caam_drv_ctx *drv_ctx[NUM_OP];
12369 +};
12370 +
12371 +static int aead_set_sh_desc(struct crypto_aead *aead)
12372 +{
12373 +       struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
12374 +                                                typeof(*alg), aead);
12375 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
12376 +       unsigned int ivsize = crypto_aead_ivsize(aead);
12377 +       u32 ctx1_iv_off = 0;
12378 +       u32 *nonce = NULL;
12379 +       unsigned int data_len[2];
12380 +       u32 inl_mask;
12381 +       const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
12382 +                              OP_ALG_AAI_CTR_MOD128);
12383 +       const bool is_rfc3686 = alg->caam.rfc3686;
12384 +
12385 +       if (!ctx->cdata.keylen || !ctx->authsize)
12386 +               return 0;
12387 +
12388 +       /*
12389 +        * AES-CTR needs to load IV in CONTEXT1 reg
12390 +        * at an offset of 128bits (16bytes)
12391 +        * CONTEXT1[255:128] = IV
12392 +        */
12393 +       if (ctr_mode)
12394 +               ctx1_iv_off = 16;
12395 +
12396 +       /*
12397 +        * RFC3686 specific:
12398 +        *      CONTEXT1[255:128] = {NONCE, IV, COUNTER}
12399 +        */
12400 +       if (is_rfc3686) {
12401 +               ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
12402 +               nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
12403 +                               ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
12404 +       }
12405 +
12406 +       data_len[0] = ctx->adata.keylen_pad;
12407 +       data_len[1] = ctx->cdata.keylen;
12408 +
12409 +       if (alg->caam.geniv)
12410 +               goto skip_enc;
12411 +
12412 +       /* aead_encrypt shared descriptor */
12413 +       if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
12414 +                             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12415 +                             DESC_JOB_IO_LEN, data_len, &inl_mask,
12416 +                             ARRAY_SIZE(data_len)) < 0)
12417 +               return -EINVAL;
12418 +
12419 +       if (inl_mask & 1)
12420 +               ctx->adata.key_virt = ctx->key;
12421 +       else
12422 +               ctx->adata.key_dma = ctx->key_dma;
12423 +
12424 +       if (inl_mask & 2)
12425 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12426 +       else
12427 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12428 +
12429 +       ctx->adata.key_inline = !!(inl_mask & 1);
12430 +       ctx->cdata.key_inline = !!(inl_mask & 2);
12431 +
12432 +       cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12433 +                              ivsize, ctx->authsize, is_rfc3686, nonce,
12434 +                              ctx1_iv_off, true);
12435 +
12436 +skip_enc:
12437 +       /* aead_decrypt shared descriptor */
12438 +       if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
12439 +                             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12440 +                             DESC_JOB_IO_LEN, data_len, &inl_mask,
12441 +                             ARRAY_SIZE(data_len)) < 0)
12442 +               return -EINVAL;
12443 +
12444 +       if (inl_mask & 1)
12445 +               ctx->adata.key_virt = ctx->key;
12446 +       else
12447 +               ctx->adata.key_dma = ctx->key_dma;
12448 +
12449 +       if (inl_mask & 2)
12450 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12451 +       else
12452 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12453 +
12454 +       ctx->adata.key_inline = !!(inl_mask & 1);
12455 +       ctx->cdata.key_inline = !!(inl_mask & 2);
12456 +
12457 +       cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
12458 +                              ivsize, ctx->authsize, alg->caam.geniv,
12459 +                              is_rfc3686, nonce, ctx1_iv_off, true);
12460 +
12461 +       if (!alg->caam.geniv)
12462 +               goto skip_givenc;
12463 +
12464 +       /* aead_givencrypt shared descriptor */
12465 +       if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
12466 +                             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12467 +                             DESC_JOB_IO_LEN, data_len, &inl_mask,
12468 +                             ARRAY_SIZE(data_len)) < 0)
12469 +               return -EINVAL;
12470 +
12471 +       if (inl_mask & 1)
12472 +               ctx->adata.key_virt = ctx->key;
12473 +       else
12474 +               ctx->adata.key_dma = ctx->key_dma;
12475 +
12476 +       if (inl_mask & 2)
12477 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12478 +       else
12479 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12480 +
12481 +       ctx->adata.key_inline = !!(inl_mask & 1);
12482 +       ctx->cdata.key_inline = !!(inl_mask & 2);
12483 +
12484 +       cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12485 +                                 ivsize, ctx->authsize, is_rfc3686, nonce,
12486 +                                 ctx1_iv_off, true);
12487 +
12488 +skip_givenc:
12489 +       return 0;
12490 +}
12491 +
12492 +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
12493 +{
12494 +       struct caam_ctx *ctx = crypto_aead_ctx(authenc);
12495 +
12496 +       ctx->authsize = authsize;
12497 +       aead_set_sh_desc(authenc);
12498 +
12499 +       return 0;
12500 +}
12501 +
12502 +static int aead_setkey(struct crypto_aead *aead, const u8 *key,
12503 +                      unsigned int keylen)
12504 +{
12505 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
12506 +       struct device *jrdev = ctx->jrdev;
12507 +       struct crypto_authenc_keys keys;
12508 +       int ret = 0;
12509 +
12510 +       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
12511 +               goto badkey;
12512 +
12513 +#ifdef DEBUG
12514 +       dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
12515 +               keys.authkeylen + keys.enckeylen, keys.enckeylen,
12516 +               keys.authkeylen);
12517 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12518 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12519 +#endif
12520 +
12521 +       ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
12522 +                           keys.authkeylen, CAAM_MAX_KEY_SIZE -
12523 +                           keys.enckeylen);
12524 +       if (ret)
12525 +               goto badkey;
12526 +
12527 +       /* postpend encryption key to auth split key */
12528 +       memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
12529 +       dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
12530 +                                  keys.enckeylen, DMA_TO_DEVICE);
12531 +#ifdef DEBUG
12532 +       print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
12533 +                      DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
12534 +                      ctx->adata.keylen_pad + keys.enckeylen, 1);
12535 +#endif
12536 +
12537 +       ctx->cdata.keylen = keys.enckeylen;
12538 +
12539 +       ret = aead_set_sh_desc(aead);
12540 +       if (ret)
12541 +               goto badkey;
12542 +
12543 +       /* Now update the driver contexts with the new shared descriptor */
12544 +       if (ctx->drv_ctx[ENCRYPT]) {
12545 +               ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12546 +                                         ctx->sh_desc_enc);
12547 +               if (ret) {
12548 +                       dev_err(jrdev, "driver enc context update failed\n");
12549 +                       goto badkey;
12550 +               }
12551 +       }
12552 +
12553 +       if (ctx->drv_ctx[DECRYPT]) {
12554 +               ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12555 +                                         ctx->sh_desc_dec);
12556 +               if (ret) {
12557 +                       dev_err(jrdev, "driver dec context update failed\n");
12558 +                       goto badkey;
12559 +               }
12560 +       }
12561 +
12562 +       return ret;
12563 +badkey:
12564 +       crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
12565 +       return -EINVAL;
12566 +}
12567 +
12568 +static int tls_set_sh_desc(struct crypto_aead *tls)
12569 +{
12570 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
12571 +       unsigned int ivsize = crypto_aead_ivsize(tls);
12572 +       unsigned int blocksize = crypto_aead_blocksize(tls);
12573 +       unsigned int assoclen = 13; /* always 13 bytes for TLS */
12574 +       unsigned int data_len[2];
12575 +       u32 inl_mask;
12576 +
12577 +       if (!ctx->cdata.keylen || !ctx->authsize)
12578 +               return 0;
12579 +
12580 +       /*
12581 +        * TLS 1.0 encrypt shared descriptor
12582 +        * Job Descriptor and Shared Descriptor
12583 +        * must fit into the 64-word Descriptor h/w Buffer
12584 +        */
12585 +       data_len[0] = ctx->adata.keylen_pad;
12586 +       data_len[1] = ctx->cdata.keylen;
12587 +
12588 +       if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
12589 +                             &inl_mask, ARRAY_SIZE(data_len)) < 0)
12590 +               return -EINVAL;
12591 +
12592 +       if (inl_mask & 1)
12593 +               ctx->adata.key_virt = ctx->key;
12594 +       else
12595 +               ctx->adata.key_dma = ctx->key_dma;
12596 +
12597 +       if (inl_mask & 2)
12598 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12599 +       else
12600 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12601 +
12602 +       ctx->adata.key_inline = !!(inl_mask & 1);
12603 +       ctx->cdata.key_inline = !!(inl_mask & 2);
12604 +
12605 +       cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12606 +                             assoclen, ivsize, ctx->authsize, blocksize);
12607 +
12608 +       /*
12609 +        * TLS 1.0 decrypt shared descriptor
12610 +        * Keys do not fit inline, regardless of algorithms used
12611 +        */
12612 +       ctx->adata.key_dma = ctx->key_dma;
12613 +       ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12614 +
12615 +       cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
12616 +                             assoclen, ivsize, ctx->authsize, blocksize);
12617 +
12618 +       return 0;
12619 +}
12620 +
12621 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
12622 +{
12623 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
12624 +
12625 +       ctx->authsize = authsize;
12626 +       tls_set_sh_desc(tls);
12627 +
12628 +       return 0;
12629 +}
12630 +
12631 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
12632 +                     unsigned int keylen)
12633 +{
12634 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
12635 +       struct device *jrdev = ctx->jrdev;
12636 +       struct crypto_authenc_keys keys;
12637 +       int ret = 0;
12638 +
12639 +       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
12640 +               goto badkey;
12641 +
12642 +#ifdef DEBUG
12643 +       dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
12644 +               keys.authkeylen + keys.enckeylen, keys.enckeylen,
12645 +               keys.authkeylen);
12646 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12647 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12648 +#endif
12649 +
12650 +       ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
12651 +                           keys.authkeylen, CAAM_MAX_KEY_SIZE -
12652 +                           keys.enckeylen);
12653 +       if (ret)
12654 +               goto badkey;
12655 +
12656 +       /* postpend encryption key to auth split key */
12657 +       memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
12658 +       dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
12659 +                                  keys.enckeylen, DMA_TO_DEVICE);
12660 +
12661 +#ifdef DEBUG
12662 +       dev_err(jrdev, "split keylen %d split keylen padded %d\n",
12663 +               ctx->adata.keylen, ctx->adata.keylen_pad);
12664 +       print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
12665 +                      DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
12666 +                      ctx->adata.keylen_pad + keys.enckeylen, 1);
12667 +#endif
12668 +
12669 +       ctx->cdata.keylen = keys.enckeylen;
12670 +
12671 +       ret = tls_set_sh_desc(tls);
12672 +       if (ret)
12673 +               goto badkey;
12674 +
12675 +       /* Now update the driver contexts with the new shared descriptor */
12676 +       if (ctx->drv_ctx[ENCRYPT]) {
12677 +               ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12678 +                                         ctx->sh_desc_enc);
12679 +               if (ret) {
12680 +                       dev_err(jrdev, "driver enc context update failed\n");
12681 +                       goto badkey;
12682 +               }
12683 +       }
12684 +
12685 +       if (ctx->drv_ctx[DECRYPT]) {
12686 +               ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12687 +                                         ctx->sh_desc_dec);
12688 +               if (ret) {
12689 +                       dev_err(jrdev, "driver dec context update failed\n");
12690 +                       goto badkey;
12691 +               }
12692 +       }
12693 +
12694 +       return ret;
12695 +badkey:
12696 +       crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
12697 +       return -EINVAL;
12698 +}
12699 +
12700 +static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
12701 +                            const u8 *key, unsigned int keylen)
12702 +{
12703 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
12704 +       struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
12705 +       const char *alg_name = crypto_tfm_alg_name(tfm);
12706 +       struct device *jrdev = ctx->jrdev;
12707 +       unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
12708 +       u32 ctx1_iv_off = 0;
12709 +       const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
12710 +                              OP_ALG_AAI_CTR_MOD128);
12711 +       const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
12712 +       int ret = 0;
12713 +
12714 +       memcpy(ctx->key, key, keylen);
12715 +#ifdef DEBUG
12716 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12717 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12718 +#endif
12719 +       /*
12720 +        * AES-CTR needs to load IV in CONTEXT1 reg
12721 +        * at an offset of 128bits (16bytes)
12722 +        * CONTEXT1[255:128] = IV
12723 +        */
12724 +       if (ctr_mode)
12725 +               ctx1_iv_off = 16;
12726 +
12727 +       /*
12728 +        * RFC3686 specific:
12729 +        *      | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
12730 +        *      | *key = {KEY, NONCE}
12731 +        */
12732 +       if (is_rfc3686) {
12733 +               ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
12734 +               keylen -= CTR_RFC3686_NONCE_SIZE;
12735 +       }
12736 +
12737 +       dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
12738 +       ctx->cdata.keylen = keylen;
12739 +       ctx->cdata.key_virt = ctx->key;
12740 +       ctx->cdata.key_inline = true;
12741 +
12742 +       /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
12743 +       cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
12744 +                                    is_rfc3686, ctx1_iv_off);
12745 +       cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
12746 +                                    is_rfc3686, ctx1_iv_off);
12747 +       cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
12748 +                                       ivsize, is_rfc3686, ctx1_iv_off);
12749 +
12750 +       /* Now update the driver contexts with the new shared descriptor */
12751 +       if (ctx->drv_ctx[ENCRYPT]) {
12752 +               ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12753 +                                         ctx->sh_desc_enc);
12754 +               if (ret) {
12755 +                       dev_err(jrdev, "driver enc context update failed\n");
12756 +                       goto badkey;
12757 +               }
12758 +       }
12759 +
12760 +       if (ctx->drv_ctx[DECRYPT]) {
12761 +               ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12762 +                                         ctx->sh_desc_dec);
12763 +               if (ret) {
12764 +                       dev_err(jrdev, "driver dec context update failed\n");
12765 +                       goto badkey;
12766 +               }
12767 +       }
12768 +
12769 +       if (ctx->drv_ctx[GIVENCRYPT]) {
12770 +               ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
12771 +                                         ctx->sh_desc_givenc);
12772 +               if (ret) {
12773 +                       dev_err(jrdev, "driver givenc context update failed\n");
12774 +                       goto badkey;
12775 +               }
12776 +       }
12777 +
12778 +       return ret;
12779 +badkey:
12780 +       crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
12781 +       return -EINVAL;
12782 +}
12783 +
12784 +static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
12785 +                                const u8 *key, unsigned int keylen)
12786 +{
12787 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
12788 +       struct device *jrdev = ctx->jrdev;
12789 +       int ret = 0;
12790 +
12791 +       if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
12792 +               crypto_ablkcipher_set_flags(ablkcipher,
12793 +                                           CRYPTO_TFM_RES_BAD_KEY_LEN);
12794 +               dev_err(jrdev, "key size mismatch\n");
12795 +               return -EINVAL;
12796 +       }
12797 +
12798 +       memcpy(ctx->key, key, keylen);
12799 +       dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
12800 +       ctx->cdata.keylen = keylen;
12801 +       ctx->cdata.key_virt = ctx->key;
12802 +       ctx->cdata.key_inline = true;
12803 +
12804 +       /* xts ablkcipher encrypt, decrypt shared descriptors */
12805 +       cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
12806 +       cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
12807 +
12808 +       /* Now update the driver contexts with the new shared descriptor */
12809 +       if (ctx->drv_ctx[ENCRYPT]) {
12810 +               ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12811 +                                         ctx->sh_desc_enc);
12812 +               if (ret) {
12813 +                       dev_err(jrdev, "driver enc context update failed\n");
12814 +                       goto badkey;
12815 +               }
12816 +       }
12817 +
12818 +       if (ctx->drv_ctx[DECRYPT]) {
12819 +               ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12820 +                                         ctx->sh_desc_dec);
12821 +               if (ret) {
12822 +                       dev_err(jrdev, "driver dec context update failed\n");
12823 +                       goto badkey;
12824 +               }
12825 +       }
12826 +
12827 +       return ret;
12828 +badkey:
12829 +       crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
12830 +       return 0;
12831 +}
12832 +
12833 +/*
12834 + * aead_edesc - s/w-extended aead descriptor
12835 + * @src_nents: number of segments in input scatterlist
12836 + * @dst_nents: number of segments in output scatterlist
12837 + * @iv_dma: dma address of iv for checking continuity and link table
12838 + * @qm_sg_bytes: length of dma mapped h/w link table
12839 + * @qm_sg_dma: bus physical mapped address of h/w link table
12840 + * @assoclen: associated data length, in CAAM endianness
12841 + * @assoclen_dma: bus physical mapped address of req->assoclen
12842 + * @drv_req: driver-specific request structure
12843 + * @sgt: the h/w link table
12844 + */
12845 +struct aead_edesc {
12846 +       int src_nents;
12847 +       int dst_nents;
12848 +       dma_addr_t iv_dma;
12849 +       int qm_sg_bytes;
12850 +       dma_addr_t qm_sg_dma;
12851 +       unsigned int assoclen;
12852 +       dma_addr_t assoclen_dma;
12853 +       struct caam_drv_req drv_req;
12854 +#define CAAM_QI_MAX_AEAD_SG                                            \
12855 +       ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) /   \
12856 +        sizeof(struct qm_sg_entry))
12857 +       struct qm_sg_entry sgt[0];
12858 +};
12859 +
12860 +/*
12861 + * tls_edesc - s/w-extended tls descriptor
12862 + * @src_nents: number of segments in input scatterlist
12863 + * @dst_nents: number of segments in output scatterlist
12864 + * @iv_dma: dma address of iv for checking continuity and link table
12865 + * @qm_sg_bytes: length of dma mapped h/w link table
12866 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
12867 + * @qm_sg_dma: bus physical mapped address of h/w link table
12868 + * @drv_req: driver-specific request structure
12869 + * @sgt: the h/w link table
12870 + */
12871 +struct tls_edesc {
12872 +       int src_nents;
12873 +       int dst_nents;
12874 +       dma_addr_t iv_dma;
12875 +       int qm_sg_bytes;
12876 +       dma_addr_t qm_sg_dma;
12877 +       struct scatterlist tmp[2];
12878 +       struct scatterlist *dst;
12879 +       struct caam_drv_req drv_req;
12880 +       struct qm_sg_entry sgt[0];
12881 +};
12882 +
12883 +/*
12884 + * ablkcipher_edesc - s/w-extended ablkcipher descriptor
12885 + * @src_nents: number of segments in input scatterlist
12886 + * @dst_nents: number of segments in output scatterlist
12887 + * @iv_dma: dma address of iv for checking continuity and link table
12888 + * @qm_sg_bytes: length of dma mapped h/w link table
12889 + * @qm_sg_dma: bus physical mapped address of h/w link table
12890 + * @drv_req: driver-specific request structure
12891 + * @sgt: the h/w link table
12892 + */
12893 +struct ablkcipher_edesc {
12894 +       int src_nents;
12895 +       int dst_nents;
12896 +       dma_addr_t iv_dma;
12897 +       int qm_sg_bytes;
12898 +       dma_addr_t qm_sg_dma;
12899 +       struct caam_drv_req drv_req;
12900 +#define CAAM_QI_MAX_ABLKCIPHER_SG                                          \
12901 +       ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
12902 +        sizeof(struct qm_sg_entry))
12903 +       struct qm_sg_entry sgt[0];
12904 +};
12905 +
12906 +static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
12907 +                                       enum optype type)
12908 +{
12909 +       /*
12910 +        * This function is called on the fast path with values of 'type'
12911 +        * known at compile time. Invalid arguments are not expected and
12912 +        * thus no checks are made.
12913 +        */
12914 +       struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
12915 +       u32 *desc;
12916 +
12917 +       if (unlikely(!drv_ctx)) {
12918 +               spin_lock(&ctx->lock);
12919 +
12920 +               /* Read again to check if some other core init drv_ctx */
12921 +               drv_ctx = ctx->drv_ctx[type];
12922 +               if (!drv_ctx) {
12923 +                       int cpu;
12924 +
12925 +                       if (type == ENCRYPT)
12926 +                               desc = ctx->sh_desc_enc;
12927 +                       else if (type == DECRYPT)
12928 +                               desc = ctx->sh_desc_dec;
12929 +                       else /* (type == GIVENCRYPT) */
12930 +                               desc = ctx->sh_desc_givenc;
12931 +
12932 +                       cpu = smp_processor_id();
12933 +                       drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
12934 +                       if (likely(!IS_ERR_OR_NULL(drv_ctx)))
12935 +                               drv_ctx->op_type = type;
12936 +
12937 +                       ctx->drv_ctx[type] = drv_ctx;
12938 +               }
12939 +
12940 +               spin_unlock(&ctx->lock);
12941 +       }
12942 +
12943 +       return drv_ctx;
12944 +}
12945 +
12946 +static void caam_unmap(struct device *dev, struct scatterlist *src,
12947 +                      struct scatterlist *dst, int src_nents,
12948 +                      int dst_nents, dma_addr_t iv_dma, int ivsize,
12949 +                      enum optype op_type, dma_addr_t qm_sg_dma,
12950 +                      int qm_sg_bytes)
12951 +{
12952 +       if (dst != src) {
12953 +               if (src_nents)
12954 +                       dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
12955 +               dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
12956 +       } else {
12957 +               dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
12958 +       }
12959 +
12960 +       if (iv_dma)
12961 +               dma_unmap_single(dev, iv_dma, ivsize,
12962 +                                op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
12963 +                                                        DMA_TO_DEVICE);
12964 +       if (qm_sg_bytes)
12965 +               dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
12966 +}
12967 +
12968 +static void aead_unmap(struct device *dev,
12969 +                      struct aead_edesc *edesc,
12970 +                      struct aead_request *req)
12971 +{
12972 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
12973 +       int ivsize = crypto_aead_ivsize(aead);
12974 +
12975 +       caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
12976 +                  edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
12977 +                  edesc->qm_sg_dma, edesc->qm_sg_bytes);
12978 +       dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
12979 +}
12980 +
12981 +static void tls_unmap(struct device *dev,
12982 +                     struct tls_edesc *edesc,
12983 +                     struct aead_request *req)
12984 +{
12985 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
12986 +       int ivsize = crypto_aead_ivsize(aead);
12987 +
12988 +       caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
12989 +                  edesc->dst_nents, edesc->iv_dma, ivsize,
12990 +                  edesc->drv_req.drv_ctx->op_type, edesc->qm_sg_dma,
12991 +                  edesc->qm_sg_bytes);
12992 +}
12993 +
12994 +static void ablkcipher_unmap(struct device *dev,
12995 +                            struct ablkcipher_edesc *edesc,
12996 +                            struct ablkcipher_request *req)
12997 +{
12998 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
12999 +       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13000 +
13001 +       caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
13002 +                  edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
13003 +                  edesc->qm_sg_dma, edesc->qm_sg_bytes);
13004 +}
13005 +
13006 +static void aead_done(struct caam_drv_req *drv_req, u32 status)
13007 +{
13008 +       struct device *qidev;
13009 +       struct aead_edesc *edesc;
13010 +       struct aead_request *aead_req = drv_req->app_ctx;
13011 +       struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
13012 +       struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
13013 +       int ecode = 0;
13014 +
13015 +       qidev = caam_ctx->qidev;
13016 +
13017 +       if (unlikely(status)) {
13018 +               caam_jr_strstatus(qidev, status);
13019 +               ecode = -EIO;
13020 +       }
13021 +
13022 +       edesc = container_of(drv_req, typeof(*edesc), drv_req);
13023 +       aead_unmap(qidev, edesc, aead_req);
13024 +
13025 +       aead_request_complete(aead_req, ecode);
13026 +       qi_cache_free(edesc);
13027 +}
13028 +
13029 +/*
13030 + * allocate and map the aead extended descriptor
13031 + */
13032 +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
13033 +                                          bool encrypt)
13034 +{
13035 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
13036 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
13037 +       struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
13038 +                                                typeof(*alg), aead);
13039 +       struct device *qidev = ctx->qidev;
13040 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
13041 +                      GFP_KERNEL : GFP_ATOMIC;
13042 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
13043 +       struct aead_edesc *edesc;
13044 +       dma_addr_t qm_sg_dma, iv_dma = 0;
13045 +       int ivsize = 0;
13046 +       unsigned int authsize = ctx->authsize;
13047 +       int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
13048 +       int in_len, out_len;
13049 +       struct qm_sg_entry *sg_table, *fd_sgt;
13050 +       struct caam_drv_ctx *drv_ctx;
13051 +       enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
13052 +
13053 +       drv_ctx = get_drv_ctx(ctx, op_type);
13054 +       if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13055 +               return (struct aead_edesc *)drv_ctx;
13056 +
13057 +       /* allocate space for base edesc and hw desc commands, link tables */
13058 +       edesc = qi_cache_alloc(GFP_DMA | flags);
13059 +       if (unlikely(!edesc)) {
13060 +               dev_err(qidev, "could not allocate extended descriptor\n");
13061 +               return ERR_PTR(-ENOMEM);
13062 +       }
13063 +
13064 +       if (likely(req->src == req->dst)) {
13065 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
13066 +                                            req->cryptlen +
13067 +                                               (encrypt ? authsize : 0));
13068 +               if (unlikely(src_nents < 0)) {
13069 +                       dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13070 +                               req->assoclen + req->cryptlen +
13071 +                               (encrypt ? authsize : 0));
13072 +                       qi_cache_free(edesc);
13073 +                       return ERR_PTR(src_nents);
13074 +               }
13075 +
13076 +               mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13077 +                                             DMA_BIDIRECTIONAL);
13078 +               if (unlikely(!mapped_src_nents)) {
13079 +                       dev_err(qidev, "unable to map source\n");
13080 +                       qi_cache_free(edesc);
13081 +                       return ERR_PTR(-ENOMEM);
13082 +               }
13083 +       } else {
13084 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
13085 +                                            req->cryptlen);
13086 +               if (unlikely(src_nents < 0)) {
13087 +                       dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13088 +                               req->assoclen + req->cryptlen);
13089 +                       qi_cache_free(edesc);
13090 +                       return ERR_PTR(src_nents);
13091 +               }
13092 +
13093 +               dst_nents = sg_nents_for_len(req->dst, req->assoclen +
13094 +                                            req->cryptlen +
13095 +                                            (encrypt ? authsize :
13096 +                                                       (-authsize)));
13097 +               if (unlikely(dst_nents < 0)) {
13098 +                       dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13099 +                               req->assoclen + req->cryptlen +
13100 +                               (encrypt ? authsize : (-authsize)));
13101 +                       qi_cache_free(edesc);
13102 +                       return ERR_PTR(dst_nents);
13103 +               }
13104 +
13105 +               if (src_nents) {
13106 +                       mapped_src_nents = dma_map_sg(qidev, req->src,
13107 +                                                     src_nents, DMA_TO_DEVICE);
13108 +                       if (unlikely(!mapped_src_nents)) {
13109 +                               dev_err(qidev, "unable to map source\n");
13110 +                               qi_cache_free(edesc);
13111 +                               return ERR_PTR(-ENOMEM);
13112 +                       }
13113 +               } else {
13114 +                       mapped_src_nents = 0;
13115 +               }
13116 +
13117 +               mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
13118 +                                             DMA_FROM_DEVICE);
13119 +               if (unlikely(!mapped_dst_nents)) {
13120 +                       dev_err(qidev, "unable to map destination\n");
13121 +                       dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13122 +                       qi_cache_free(edesc);
13123 +                       return ERR_PTR(-ENOMEM);
13124 +               }
13125 +       }
13126 +
13127 +       if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
13128 +               ivsize = crypto_aead_ivsize(aead);
13129 +               iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
13130 +               if (dma_mapping_error(qidev, iv_dma)) {
13131 +                       dev_err(qidev, "unable to map IV\n");
13132 +                       caam_unmap(qidev, req->src, req->dst, src_nents,
13133 +                                  dst_nents, 0, 0, op_type, 0, 0);
13134 +                       qi_cache_free(edesc);
13135 +                       return ERR_PTR(-ENOMEM);
13136 +               }
13137 +       }
13138 +
13139 +       /*
13140 +        * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
13141 +        * Input is not contiguous.
13142 +        */
13143 +       qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
13144 +                    (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
13145 +       if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
13146 +               dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
13147 +                       qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
13148 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13149 +                          iv_dma, ivsize, op_type, 0, 0);
13150 +               qi_cache_free(edesc);
13151 +               return ERR_PTR(-ENOMEM);
13152 +       }
13153 +       sg_table = &edesc->sgt[0];
13154 +       qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13155 +
13156 +       edesc->src_nents = src_nents;
13157 +       edesc->dst_nents = dst_nents;
13158 +       edesc->iv_dma = iv_dma;
13159 +       edesc->drv_req.app_ctx = req;
13160 +       edesc->drv_req.cbk = aead_done;
13161 +       edesc->drv_req.drv_ctx = drv_ctx;
13162 +
13163 +       edesc->assoclen = cpu_to_caam32(req->assoclen);
13164 +       edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
13165 +                                            DMA_TO_DEVICE);
13166 +       if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
13167 +               dev_err(qidev, "unable to map assoclen\n");
13168 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13169 +                          iv_dma, ivsize, op_type, 0, 0);
13170 +               qi_cache_free(edesc);
13171 +               return ERR_PTR(-ENOMEM);
13172 +       }
13173 +
13174 +       dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
13175 +       qm_sg_index++;
13176 +       if (ivsize) {
13177 +               dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
13178 +               qm_sg_index++;
13179 +       }
13180 +       sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
13181 +       qm_sg_index += mapped_src_nents;
13182 +
13183 +       if (mapped_dst_nents > 1)
13184 +               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
13185 +                                qm_sg_index, 0);
13186 +
13187 +       qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
13188 +       if (dma_mapping_error(qidev, qm_sg_dma)) {
13189 +               dev_err(qidev, "unable to map S/G table\n");
13190 +               dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
13191 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13192 +                          iv_dma, ivsize, op_type, 0, 0);
13193 +               qi_cache_free(edesc);
13194 +               return ERR_PTR(-ENOMEM);
13195 +       }
13196 +
13197 +       edesc->qm_sg_dma = qm_sg_dma;
13198 +       edesc->qm_sg_bytes = qm_sg_bytes;
13199 +
13200 +       out_len = req->assoclen + req->cryptlen +
13201 +                 (encrypt ? ctx->authsize : (-ctx->authsize));
13202 +       in_len = 4 + ivsize + req->assoclen + req->cryptlen;
13203 +
13204 +       fd_sgt = &edesc->drv_req.fd_sgt[0];
13205 +       dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
13206 +
13207 +       if (req->dst == req->src) {
13208 +               if (mapped_src_nents == 1)
13209 +                       dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
13210 +                                        out_len, 0);
13211 +               else
13212 +                       dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
13213 +                                            (1 + !!ivsize) * sizeof(*sg_table),
13214 +                                            out_len, 0);
13215 +       } else if (mapped_dst_nents == 1) {
13216 +               dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
13217 +                                0);
13218 +       } else {
13219 +               dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
13220 +                                    qm_sg_index, out_len, 0);
13221 +       }
13222 +
13223 +       return edesc;
13224 +}
13225 +
13226 +static inline int aead_crypt(struct aead_request *req, bool encrypt)
13227 +{
13228 +       struct aead_edesc *edesc;
13229 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
13230 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
13231 +       int ret;
13232 +
13233 +       if (unlikely(caam_congested))
13234 +               return -EAGAIN;
13235 +
13236 +       /* allocate extended descriptor */
13237 +       edesc = aead_edesc_alloc(req, encrypt);
13238 +       if (IS_ERR_OR_NULL(edesc))
13239 +               return PTR_ERR(edesc);
13240 +
13241 +       /* Create and submit job descriptor */
13242 +       ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13243 +       if (!ret) {
13244 +               ret = -EINPROGRESS;
13245 +       } else {
13246 +               aead_unmap(ctx->qidev, edesc, req);
13247 +               qi_cache_free(edesc);
13248 +       }
13249 +
13250 +       return ret;
13251 +}
13252 +
13253 +static int aead_encrypt(struct aead_request *req)
13254 +{
13255 +       return aead_crypt(req, true);
13256 +}
13257 +
13258 +static int aead_decrypt(struct aead_request *req)
13259 +{
13260 +       return aead_crypt(req, false);
13261 +}
13262 +
13263 +static void tls_done(struct caam_drv_req *drv_req, u32 status)
13264 +{
13265 +       struct device *qidev;
13266 +       struct tls_edesc *edesc;
13267 +       struct aead_request *aead_req = drv_req->app_ctx;
13268 +       struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
13269 +       struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
13270 +       int ecode = 0;
13271 +
13272 +       qidev = caam_ctx->qidev;
13273 +
13274 +       if (unlikely(status)) {
13275 +               caam_jr_strstatus(qidev, status);
13276 +               ecode = -EIO;
13277 +       }
13278 +
13279 +       edesc = container_of(drv_req, typeof(*edesc), drv_req);
13280 +       tls_unmap(qidev, edesc, aead_req);
13281 +
13282 +       aead_request_complete(aead_req, ecode);
13283 +       qi_cache_free(edesc);
13284 +}
13285 +
13286 +/*
13287 + * allocate and map the tls extended descriptor
13288 + */
13289 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
13290 +{
13291 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
13292 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
13293 +       unsigned int blocksize = crypto_aead_blocksize(aead);
13294 +       unsigned int padsize, authsize;
13295 +       struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
13296 +                                                typeof(*alg), aead);
13297 +       struct device *qidev = ctx->qidev;
13298 +       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
13299 +                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
13300 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
13301 +       struct tls_edesc *edesc;
13302 +       dma_addr_t qm_sg_dma, iv_dma = 0;
13303 +       int ivsize = 0;
13304 +       int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
13305 +       int in_len, out_len;
13306 +       struct qm_sg_entry *sg_table, *fd_sgt;
13307 +       struct caam_drv_ctx *drv_ctx;
13308 +       enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
13309 +       struct scatterlist *dst;
13310 +
13311 +       if (encrypt) {
13312 +               padsize = blocksize - ((req->cryptlen + ctx->authsize) %
13313 +                                       blocksize);
13314 +               authsize = ctx->authsize + padsize;
13315 +       } else {
13316 +               authsize = ctx->authsize;
13317 +       }
13318 +
13319 +       drv_ctx = get_drv_ctx(ctx, op_type);
13320 +       if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13321 +               return (struct tls_edesc *)drv_ctx;
13322 +
13323 +       /* allocate space for base edesc and hw desc commands, link tables */
13324 +       edesc = qi_cache_alloc(GFP_DMA | flags);
13325 +       if (unlikely(!edesc)) {
13326 +               dev_err(qidev, "could not allocate extended descriptor\n");
13327 +               return ERR_PTR(-ENOMEM);
13328 +       }
13329 +
13330 +       if (likely(req->src == req->dst)) {
13331 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
13332 +                                            req->cryptlen +
13333 +                                            (encrypt ? authsize : 0));
13334 +               if (unlikely(src_nents < 0)) {
13335 +                       dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13336 +                               req->assoclen + req->cryptlen +
13337 +                               (encrypt ? authsize : 0));
13338 +                       qi_cache_free(edesc);
13339 +                       return ERR_PTR(src_nents);
13340 +               }
13341 +
13342 +               mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13343 +                                             DMA_BIDIRECTIONAL);
13344 +               if (unlikely(!mapped_src_nents)) {
13345 +                       dev_err(qidev, "unable to map source\n");
13346 +                       qi_cache_free(edesc);
13347 +                       return ERR_PTR(-ENOMEM);
13348 +               }
13349 +               dst = req->dst;
13350 +       } else {
13351 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
13352 +                                            req->cryptlen);
13353 +               if (unlikely(src_nents < 0)) {
13354 +                       dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13355 +                               req->assoclen + req->cryptlen);
13356 +                       qi_cache_free(edesc);
13357 +                       return ERR_PTR(src_nents);
13358 +               }
13359 +
13360 +               dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
13361 +               dst_nents = sg_nents_for_len(dst, req->cryptlen +
13362 +                                            (encrypt ? authsize : 0));
13363 +               if (unlikely(dst_nents < 0)) {
13364 +                       dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13365 +                               req->cryptlen +
13366 +                               (encrypt ? authsize : 0));
13367 +                       qi_cache_free(edesc);
13368 +                       return ERR_PTR(dst_nents);
13369 +               }
13370 +
13371 +               if (src_nents) {
13372 +                       mapped_src_nents = dma_map_sg(qidev, req->src,
13373 +                                                     src_nents, DMA_TO_DEVICE);
13374 +                       if (unlikely(!mapped_src_nents)) {
13375 +                               dev_err(qidev, "unable to map source\n");
13376 +                               qi_cache_free(edesc);
13377 +                               return ERR_PTR(-ENOMEM);
13378 +                       }
13379 +               } else {
13380 +                       mapped_src_nents = 0;
13381 +               }
13382 +
13383 +               mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
13384 +                                             DMA_FROM_DEVICE);
13385 +               if (unlikely(!mapped_dst_nents)) {
13386 +                       dev_err(qidev, "unable to map destination\n");
13387 +                       dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13388 +                       qi_cache_free(edesc);
13389 +                       return ERR_PTR(-ENOMEM);
13390 +               }
13391 +       }
13392 +
13393 +       ivsize = crypto_aead_ivsize(aead);
13394 +       iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
13395 +       if (dma_mapping_error(qidev, iv_dma)) {
13396 +               dev_err(qidev, "unable to map IV\n");
13397 +               caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0,
13398 +                          op_type, 0, 0);
13399 +               qi_cache_free(edesc);
13400 +               return ERR_PTR(-ENOMEM);
13401 +       }
13402 +
13403 +       /*
13404 +        * Create S/G table: IV, src, dst.
13405 +        * Input is not contiguous.
13406 +        */
13407 +       qm_sg_ents = 1 + mapped_src_nents +
13408 +                    (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
13409 +       sg_table = &edesc->sgt[0];
13410 +       qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13411 +
13412 +       edesc->src_nents = src_nents;
13413 +       edesc->dst_nents = dst_nents;
13414 +       edesc->dst = dst;
13415 +       edesc->iv_dma = iv_dma;
13416 +       edesc->drv_req.app_ctx = req;
13417 +       edesc->drv_req.cbk = tls_done;
13418 +       edesc->drv_req.drv_ctx = drv_ctx;
13419 +
13420 +       dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
13421 +       qm_sg_index = 1;
13422 +
13423 +       sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
13424 +       qm_sg_index += mapped_src_nents;
13425 +
13426 +       if (mapped_dst_nents > 1)
13427 +               sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
13428 +                                qm_sg_index, 0);
13429 +
13430 +       qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
13431 +       if (dma_mapping_error(qidev, qm_sg_dma)) {
13432 +               dev_err(qidev, "unable to map S/G table\n");
13433 +               caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
13434 +                          ivsize, op_type, 0, 0);
13435 +               qi_cache_free(edesc);
13436 +               return ERR_PTR(-ENOMEM);
13437 +       }
13438 +
13439 +       edesc->qm_sg_dma = qm_sg_dma;
13440 +       edesc->qm_sg_bytes = qm_sg_bytes;
13441 +
13442 +       out_len = req->cryptlen + (encrypt ? authsize : 0);
13443 +       in_len = ivsize + req->assoclen + req->cryptlen;
13444 +
13445 +       fd_sgt = &edesc->drv_req.fd_sgt[0];
13446 +
13447 +       dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
13448 +
13449 +       if (req->dst == req->src)
13450 +               dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
13451 +                                   (sg_nents_for_len(req->src, req->assoclen) +
13452 +                                    1) * sizeof(*sg_table), out_len, 0);
13453 +       else if (mapped_dst_nents == 1)
13454 +               dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
13455 +       else
13456 +               dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
13457 +                                    qm_sg_index, out_len, 0);
13458 +
13459 +       return edesc;
13460 +}
13461 +
13462 +static int tls_crypt(struct aead_request *req, bool encrypt)
13463 +{
13464 +       struct tls_edesc *edesc;
13465 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
13466 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
13467 +       int ret;
13468 +
13469 +       if (unlikely(caam_congested))
13470 +               return -EAGAIN;
13471 +
13472 +       edesc = tls_edesc_alloc(req, encrypt);
13473 +       if (IS_ERR_OR_NULL(edesc))
13474 +               return PTR_ERR(edesc);
13475 +
13476 +       ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13477 +       if (!ret) {
13478 +               ret = -EINPROGRESS;
13479 +       } else {
13480 +               tls_unmap(ctx->qidev, edesc, req);
13481 +               qi_cache_free(edesc);
13482 +       }
13483 +
13484 +       return ret;
13485 +}
13486 +
13487 +static int tls_encrypt(struct aead_request *req)
13488 +{
13489 +       return tls_crypt(req, true);
13490 +}
13491 +
13492 +static int tls_decrypt(struct aead_request *req)
13493 +{
13494 +       return tls_crypt(req, false);
13495 +}
13496 +
13497 +static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
13498 +{
13499 +       struct ablkcipher_edesc *edesc;
13500 +       struct ablkcipher_request *req = drv_req->app_ctx;
13501 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13502 +       struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
13503 +       struct device *qidev = caam_ctx->qidev;
13504 +       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13505 +
13506 +#ifdef DEBUG
13507 +       dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
13508 +#endif
13509 +
13510 +       edesc = container_of(drv_req, typeof(*edesc), drv_req);
13511 +
13512 +       if (status)
13513 +               caam_jr_strstatus(qidev, status);
13514 +
13515 +#ifdef DEBUG
13516 +       print_hex_dump(KERN_ERR, "dstiv  @" __stringify(__LINE__)": ",
13517 +                      DUMP_PREFIX_ADDRESS, 16, 4, req->info,
13518 +                      edesc->src_nents > 1 ? 100 : ivsize, 1);
13519 +       caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
13520 +                    DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
13521 +                    edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
13522 +#endif
13523 +
13524 +       ablkcipher_unmap(qidev, edesc, req);
13525 +       qi_cache_free(edesc);
13526 +
13527 +       /*
13528 +        * The crypto API expects us to set the IV (req->info) to the last
13529 +        * ciphertext block. This is used e.g. by the CTS mode.
13530 +        */
13531 +       scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
13532 +                                ivsize, 0);
13533 +
13534 +       ablkcipher_request_complete(req, status);
13535 +}
13536 +
13537 +static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
13538 +                                                      *req, bool encrypt)
13539 +{
13540 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13541 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13542 +       struct device *qidev = ctx->qidev;
13543 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
13544 +                      GFP_KERNEL : GFP_ATOMIC;
13545 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
13546 +       struct ablkcipher_edesc *edesc;
13547 +       dma_addr_t iv_dma;
13548 +       bool in_contig;
13549 +       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13550 +       int dst_sg_idx, qm_sg_ents;
13551 +       struct qm_sg_entry *sg_table, *fd_sgt;
13552 +       struct caam_drv_ctx *drv_ctx;
13553 +       enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
13554 +
13555 +       drv_ctx = get_drv_ctx(ctx, op_type);
13556 +       if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13557 +               return (struct ablkcipher_edesc *)drv_ctx;
13558 +
13559 +       src_nents = sg_nents_for_len(req->src, req->nbytes);
13560 +       if (unlikely(src_nents < 0)) {
13561 +               dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13562 +                       req->nbytes);
13563 +               return ERR_PTR(src_nents);
13564 +       }
13565 +
13566 +       if (unlikely(req->src != req->dst)) {
13567 +               dst_nents = sg_nents_for_len(req->dst, req->nbytes);
13568 +               if (unlikely(dst_nents < 0)) {
13569 +                       dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13570 +                               req->nbytes);
13571 +                       return ERR_PTR(dst_nents);
13572 +               }
13573 +
13574 +               mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13575 +                                             DMA_TO_DEVICE);
13576 +               if (unlikely(!mapped_src_nents)) {
13577 +                       dev_err(qidev, "unable to map source\n");
13578 +                       return ERR_PTR(-ENOMEM);
13579 +               }
13580 +
13581 +               mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
13582 +                                             DMA_FROM_DEVICE);
13583 +               if (unlikely(!mapped_dst_nents)) {
13584 +                       dev_err(qidev, "unable to map destination\n");
13585 +                       dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13586 +                       return ERR_PTR(-ENOMEM);
13587 +               }
13588 +       } else {
13589 +               mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13590 +                                             DMA_BIDIRECTIONAL);
13591 +               if (unlikely(!mapped_src_nents)) {
13592 +                       dev_err(qidev, "unable to map source\n");
13593 +                       return ERR_PTR(-ENOMEM);
13594 +               }
13595 +       }
13596 +
13597 +       iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
13598 +       if (dma_mapping_error(qidev, iv_dma)) {
13599 +               dev_err(qidev, "unable to map IV\n");
13600 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
13601 +                          0, 0, 0, 0);
13602 +               return ERR_PTR(-ENOMEM);
13603 +       }
13604 +
13605 +       if (mapped_src_nents == 1 &&
13606 +           iv_dma + ivsize == sg_dma_address(req->src)) {
13607 +               in_contig = true;
13608 +               qm_sg_ents = 0;
13609 +       } else {
13610 +               in_contig = false;
13611 +               qm_sg_ents = 1 + mapped_src_nents;
13612 +       }
13613 +       dst_sg_idx = qm_sg_ents;
13614 +
13615 +       qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
13616 +       if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
13617 +               dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
13618 +                       qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
13619 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13620 +                          iv_dma, ivsize, op_type, 0, 0);
13621 +               return ERR_PTR(-ENOMEM);
13622 +       }
13623 +
13624 +       /* allocate space for base edesc and link tables */
13625 +       edesc = qi_cache_alloc(GFP_DMA | flags);
13626 +       if (unlikely(!edesc)) {
13627 +               dev_err(qidev, "could not allocate extended descriptor\n");
13628 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13629 +                          iv_dma, ivsize, op_type, 0, 0);
13630 +               return ERR_PTR(-ENOMEM);
13631 +       }
13632 +
13633 +       edesc->src_nents = src_nents;
13634 +       edesc->dst_nents = dst_nents;
13635 +       edesc->iv_dma = iv_dma;
13636 +       sg_table = &edesc->sgt[0];
13637 +       edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13638 +       edesc->drv_req.app_ctx = req;
13639 +       edesc->drv_req.cbk = ablkcipher_done;
13640 +       edesc->drv_req.drv_ctx = drv_ctx;
13641 +
13642 +       if (!in_contig) {
13643 +               dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
13644 +               sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
13645 +       }
13646 +
13647 +       if (mapped_dst_nents > 1)
13648 +               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
13649 +                                dst_sg_idx, 0);
13650 +
13651 +       edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
13652 +                                         DMA_TO_DEVICE);
13653 +       if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
13654 +               dev_err(qidev, "unable to map S/G table\n");
13655 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13656 +                          iv_dma, ivsize, op_type, 0, 0);
13657 +               qi_cache_free(edesc);
13658 +               return ERR_PTR(-ENOMEM);
13659 +       }
13660 +
13661 +       fd_sgt = &edesc->drv_req.fd_sgt[0];
13662 +
13663 +       if (!in_contig)
13664 +               dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
13665 +                                         ivsize + req->nbytes, 0);
13666 +       else
13667 +               dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
13668 +                                     0);
13669 +
13670 +       if (req->src == req->dst) {
13671 +               if (!in_contig)
13672 +                       dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
13673 +                                            sizeof(*sg_table), req->nbytes, 0);
13674 +               else
13675 +                       dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
13676 +                                        req->nbytes, 0);
13677 +       } else if (mapped_dst_nents > 1) {
13678 +               dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
13679 +                                    sizeof(*sg_table), req->nbytes, 0);
13680 +       } else {
13681 +               dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
13682 +                                req->nbytes, 0);
13683 +       }
13684 +
13685 +       return edesc;
13686 +}
13687 +
13688 +static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
13689 +       struct skcipher_givcrypt_request *creq)
13690 +{
13691 +       struct ablkcipher_request *req = &creq->creq;
13692 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13693 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13694 +       struct device *qidev = ctx->qidev;
13695 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
13696 +                      GFP_KERNEL : GFP_ATOMIC;
13697 +       int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
13698 +       struct ablkcipher_edesc *edesc;
13699 +       dma_addr_t iv_dma;
13700 +       bool out_contig;
13701 +       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13702 +       struct qm_sg_entry *sg_table, *fd_sgt;
13703 +       int dst_sg_idx, qm_sg_ents;
13704 +       struct caam_drv_ctx *drv_ctx;
13705 +
13706 +       drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
13707 +       if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13708 +               return (struct ablkcipher_edesc *)drv_ctx;
13709 +
13710 +       src_nents = sg_nents_for_len(req->src, req->nbytes);
13711 +       if (unlikely(src_nents < 0)) {
13712 +               dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13713 +                       req->nbytes);
13714 +               return ERR_PTR(src_nents);
13715 +       }
13716 +
13717 +       if (unlikely(req->src != req->dst)) {
13718 +               dst_nents = sg_nents_for_len(req->dst, req->nbytes);
13719 +               if (unlikely(dst_nents < 0)) {
13720 +                       dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13721 +                               req->nbytes);
13722 +                       return ERR_PTR(dst_nents);
13723 +               }
13724 +
13725 +               mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13726 +                                             DMA_TO_DEVICE);
13727 +               if (unlikely(!mapped_src_nents)) {
13728 +                       dev_err(qidev, "unable to map source\n");
13729 +                       return ERR_PTR(-ENOMEM);
13730 +               }
13731 +
13732 +               mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
13733 +                                             DMA_FROM_DEVICE);
13734 +               if (unlikely(!mapped_dst_nents)) {
13735 +                       dev_err(qidev, "unable to map destination\n");
13736 +                       dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13737 +                       return ERR_PTR(-ENOMEM);
13738 +               }
13739 +       } else {
13740 +               mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13741 +                                             DMA_BIDIRECTIONAL);
13742 +               if (unlikely(!mapped_src_nents)) {
13743 +                       dev_err(qidev, "unable to map source\n");
13744 +                       return ERR_PTR(-ENOMEM);
13745 +               }
13746 +
13747 +               dst_nents = src_nents;
13748 +               mapped_dst_nents = src_nents;
13749 +       }
13750 +
13751 +       iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
13752 +       if (dma_mapping_error(qidev, iv_dma)) {
13753 +               dev_err(qidev, "unable to map IV\n");
13754 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
13755 +                          0, 0, 0, 0);
13756 +               return ERR_PTR(-ENOMEM);
13757 +       }
13758 +
13759 +       qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
13760 +       dst_sg_idx = qm_sg_ents;
13761 +       if (mapped_dst_nents == 1 &&
13762 +           iv_dma + ivsize == sg_dma_address(req->dst)) {
13763 +               out_contig = true;
13764 +       } else {
13765 +               out_contig = false;
13766 +               qm_sg_ents += 1 + mapped_dst_nents;
13767 +       }
13768 +
13769 +       if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
13770 +               dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
13771 +                       qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
13772 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13773 +                          iv_dma, ivsize, GIVENCRYPT, 0, 0);
13774 +               return ERR_PTR(-ENOMEM);
13775 +       }
13776 +
13777 +       /* allocate space for base edesc and link tables */
13778 +       edesc = qi_cache_alloc(GFP_DMA | flags);
13779 +       if (!edesc) {
13780 +               dev_err(qidev, "could not allocate extended descriptor\n");
13781 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13782 +                          iv_dma, ivsize, GIVENCRYPT, 0, 0);
13783 +               return ERR_PTR(-ENOMEM);
13784 +       }
13785 +
13786 +       edesc->src_nents = src_nents;
13787 +       edesc->dst_nents = dst_nents;
13788 +       edesc->iv_dma = iv_dma;
13789 +       sg_table = &edesc->sgt[0];
13790 +       edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13791 +       edesc->drv_req.app_ctx = req;
13792 +       edesc->drv_req.cbk = ablkcipher_done;
13793 +       edesc->drv_req.drv_ctx = drv_ctx;
13794 +
13795 +       if (mapped_src_nents > 1)
13796 +               sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
13797 +
13798 +       if (!out_contig) {
13799 +               dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
13800 +               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
13801 +                                dst_sg_idx + 1, 0);
13802 +       }
13803 +
13804 +       edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
13805 +                                         DMA_TO_DEVICE);
13806 +       if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
13807 +               dev_err(qidev, "unable to map S/G table\n");
13808 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13809 +                          iv_dma, ivsize, GIVENCRYPT, 0, 0);
13810 +               qi_cache_free(edesc);
13811 +               return ERR_PTR(-ENOMEM);
13812 +       }
13813 +
13814 +       fd_sgt = &edesc->drv_req.fd_sgt[0];
13815 +
13816 +       if (mapped_src_nents > 1)
13817 +               dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
13818 +                                    0);
13819 +       else
13820 +               dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
13821 +                                req->nbytes, 0);
13822 +
13823 +       if (!out_contig)
13824 +               dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
13825 +                                    sizeof(*sg_table), ivsize + req->nbytes,
13826 +                                    0);
13827 +       else
13828 +               dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
13829 +                                ivsize + req->nbytes, 0);
13830 +
13831 +       return edesc;
13832 +}
13833 +
13834 +static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
13835 +{
13836 +       struct ablkcipher_edesc *edesc;
13837 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13838 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13839 +       int ret;
13840 +
13841 +       if (unlikely(caam_congested))
13842 +               return -EAGAIN;
13843 +
13844 +       /* allocate extended descriptor */
13845 +       edesc = ablkcipher_edesc_alloc(req, encrypt);
13846 +       if (IS_ERR(edesc))
13847 +               return PTR_ERR(edesc);
13848 +
13849 +       ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13850 +       if (!ret) {
13851 +               ret = -EINPROGRESS;
13852 +       } else {
13853 +               ablkcipher_unmap(ctx->qidev, edesc, req);
13854 +               qi_cache_free(edesc);
13855 +       }
13856 +
13857 +       return ret;
13858 +}
13859 +
13860 +static int ablkcipher_encrypt(struct ablkcipher_request *req)
13861 +{
13862 +       return ablkcipher_crypt(req, true);
13863 +}
13864 +
13865 +static int ablkcipher_decrypt(struct ablkcipher_request *req)
13866 +{
13867 +       return ablkcipher_crypt(req, false);
13868 +}
13869 +
13870 +static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
13871 +{
13872 +       struct ablkcipher_request *req = &creq->creq;
13873 +       struct ablkcipher_edesc *edesc;
13874 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13875 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13876 +       int ret;
13877 +
13878 +       if (unlikely(caam_congested))
13879 +               return -EAGAIN;
13880 +
13881 +       /* allocate extended descriptor */
13882 +       edesc = ablkcipher_giv_edesc_alloc(creq);
13883 +       if (IS_ERR(edesc))
13884 +               return PTR_ERR(edesc);
13885 +
13886 +       ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13887 +       if (!ret) {
13888 +               ret = -EINPROGRESS;
13889 +       } else {
13890 +               ablkcipher_unmap(ctx->qidev, edesc, req);
13891 +               qi_cache_free(edesc);
13892 +       }
13893 +
13894 +       return ret;
13895 +}
13896 +
13897 +#define template_ablkcipher    template_u.ablkcipher
13898 +struct caam_alg_template {
13899 +       char name[CRYPTO_MAX_ALG_NAME];
13900 +       char driver_name[CRYPTO_MAX_ALG_NAME];
13901 +       unsigned int blocksize;
13902 +       u32 type;
13903 +       union {
13904 +               struct ablkcipher_alg ablkcipher;
13905 +       } template_u;
13906 +       u32 class1_alg_type;
13907 +       u32 class2_alg_type;
13908 +};
13909 +
13910 +static struct caam_alg_template driver_algs[] = {
13911 +       /* ablkcipher descriptor */
13912 +       {
13913 +               .name = "cbc(aes)",
13914 +               .driver_name = "cbc-aes-caam-qi",
13915 +               .blocksize = AES_BLOCK_SIZE,
13916 +               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
13917 +               .template_ablkcipher = {
13918 +                       .setkey = ablkcipher_setkey,
13919 +                       .encrypt = ablkcipher_encrypt,
13920 +                       .decrypt = ablkcipher_decrypt,
13921 +                       .givencrypt = ablkcipher_givencrypt,
13922 +                       .geniv = "<built-in>",
13923 +                       .min_keysize = AES_MIN_KEY_SIZE,
13924 +                       .max_keysize = AES_MAX_KEY_SIZE,
13925 +                       .ivsize = AES_BLOCK_SIZE,
13926 +               },
13927 +               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
13928 +       },
13929 +       {
13930 +               .name = "cbc(des3_ede)",
13931 +               .driver_name = "cbc-3des-caam-qi",
13932 +               .blocksize = DES3_EDE_BLOCK_SIZE,
13933 +               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
13934 +               .template_ablkcipher = {
13935 +                       .setkey = ablkcipher_setkey,
13936 +                       .encrypt = ablkcipher_encrypt,
13937 +                       .decrypt = ablkcipher_decrypt,
13938 +                       .givencrypt = ablkcipher_givencrypt,
13939 +                       .geniv = "<built-in>",
13940 +                       .min_keysize = DES3_EDE_KEY_SIZE,
13941 +                       .max_keysize = DES3_EDE_KEY_SIZE,
13942 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
13943 +               },
13944 +               .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
13945 +       },
13946 +       {
13947 +               .name = "cbc(des)",
13948 +               .driver_name = "cbc-des-caam-qi",
13949 +               .blocksize = DES_BLOCK_SIZE,
13950 +               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
13951 +               .template_ablkcipher = {
13952 +                       .setkey = ablkcipher_setkey,
13953 +                       .encrypt = ablkcipher_encrypt,
13954 +                       .decrypt = ablkcipher_decrypt,
13955 +                       .givencrypt = ablkcipher_givencrypt,
13956 +                       .geniv = "<built-in>",
13957 +                       .min_keysize = DES_KEY_SIZE,
13958 +                       .max_keysize = DES_KEY_SIZE,
13959 +                       .ivsize = DES_BLOCK_SIZE,
13960 +               },
13961 +               .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
13962 +       },
13963 +       {
13964 +               .name = "ctr(aes)",
13965 +               .driver_name = "ctr-aes-caam-qi",
13966 +               .blocksize = 1,
13967 +               .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
13968 +               .template_ablkcipher = {
13969 +                       .setkey = ablkcipher_setkey,
13970 +                       .encrypt = ablkcipher_encrypt,
13971 +                       .decrypt = ablkcipher_decrypt,
13972 +                       .geniv = "chainiv",
13973 +                       .min_keysize = AES_MIN_KEY_SIZE,
13974 +                       .max_keysize = AES_MAX_KEY_SIZE,
13975 +                       .ivsize = AES_BLOCK_SIZE,
13976 +               },
13977 +               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
13978 +       },
13979 +       {
13980 +               .name = "rfc3686(ctr(aes))",
13981 +               .driver_name = "rfc3686-ctr-aes-caam-qi",
13982 +               .blocksize = 1,
13983 +               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
13984 +               .template_ablkcipher = {
13985 +                       .setkey = ablkcipher_setkey,
13986 +                       .encrypt = ablkcipher_encrypt,
13987 +                       .decrypt = ablkcipher_decrypt,
13988 +                       .givencrypt = ablkcipher_givencrypt,
13989 +                       .geniv = "<built-in>",
13990 +                       .min_keysize = AES_MIN_KEY_SIZE +
13991 +                                      CTR_RFC3686_NONCE_SIZE,
13992 +                       .max_keysize = AES_MAX_KEY_SIZE +
13993 +                                      CTR_RFC3686_NONCE_SIZE,
13994 +                       .ivsize = CTR_RFC3686_IV_SIZE,
13995 +               },
13996 +               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
13997 +       },
13998 +       {
13999 +               .name = "xts(aes)",
14000 +               .driver_name = "xts-aes-caam-qi",
14001 +               .blocksize = AES_BLOCK_SIZE,
14002 +               .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
14003 +               .template_ablkcipher = {
14004 +                       .setkey = xts_ablkcipher_setkey,
14005 +                       .encrypt = ablkcipher_encrypt,
14006 +                       .decrypt = ablkcipher_decrypt,
14007 +                       .geniv = "eseqiv",
14008 +                       .min_keysize = 2 * AES_MIN_KEY_SIZE,
14009 +                       .max_keysize = 2 * AES_MAX_KEY_SIZE,
14010 +                       .ivsize = AES_BLOCK_SIZE,
14011 +               },
14012 +               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
14013 +       },
14014 +};
14015 +
14016 +static struct caam_aead_alg driver_aeads[] = {
14017 +       /* single-pass ipsec_esp descriptor */
14018 +       {
14019 +               .aead = {
14020 +                       .base = {
14021 +                               .cra_name = "authenc(hmac(md5),cbc(aes))",
14022 +                               .cra_driver_name = "authenc-hmac-md5-"
14023 +                                                  "cbc-aes-caam-qi",
14024 +                               .cra_blocksize = AES_BLOCK_SIZE,
14025 +                       },
14026 +                       .setkey = aead_setkey,
14027 +                       .setauthsize = aead_setauthsize,
14028 +                       .encrypt = aead_encrypt,
14029 +                       .decrypt = aead_decrypt,
14030 +                       .ivsize = AES_BLOCK_SIZE,
14031 +                       .maxauthsize = MD5_DIGEST_SIZE,
14032 +               },
14033 +               .caam = {
14034 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14035 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14036 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14037 +               }
14038 +       },
14039 +       {
14040 +               .aead = {
14041 +                       .base = {
14042 +                               .cra_name = "echainiv(authenc(hmac(md5),"
14043 +                                           "cbc(aes)))",
14044 +                               .cra_driver_name = "echainiv-authenc-hmac-md5-"
14045 +                                                  "cbc-aes-caam-qi",
14046 +                               .cra_blocksize = AES_BLOCK_SIZE,
14047 +                       },
14048 +                       .setkey = aead_setkey,
14049 +                       .setauthsize = aead_setauthsize,
14050 +                       .encrypt = aead_encrypt,
14051 +                       .decrypt = aead_decrypt,
14052 +                       .ivsize = AES_BLOCK_SIZE,
14053 +                       .maxauthsize = MD5_DIGEST_SIZE,
14054 +               },
14055 +               .caam = {
14056 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14057 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14058 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14059 +                       .geniv = true,
14060 +               }
14061 +       },
14062 +       {
14063 +               .aead = {
14064 +                       .base = {
14065 +                               .cra_name = "authenc(hmac(sha1),cbc(aes))",
14066 +                               .cra_driver_name = "authenc-hmac-sha1-"
14067 +                                                  "cbc-aes-caam-qi",
14068 +                               .cra_blocksize = AES_BLOCK_SIZE,
14069 +                       },
14070 +                       .setkey = aead_setkey,
14071 +                       .setauthsize = aead_setauthsize,
14072 +                       .encrypt = aead_encrypt,
14073 +                       .decrypt = aead_decrypt,
14074 +                       .ivsize = AES_BLOCK_SIZE,
14075 +                       .maxauthsize = SHA1_DIGEST_SIZE,
14076 +               },
14077 +               .caam = {
14078 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14079 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14080 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14081 +               }
14082 +       },
14083 +       {
14084 +               .aead = {
14085 +                       .base = {
14086 +                               .cra_name = "echainiv(authenc(hmac(sha1),"
14087 +                                           "cbc(aes)))",
14088 +                               .cra_driver_name = "echainiv-authenc-"
14089 +                                                  "hmac-sha1-cbc-aes-caam-qi",
14090 +                               .cra_blocksize = AES_BLOCK_SIZE,
14091 +                       },
14092 +                       .setkey = aead_setkey,
14093 +                       .setauthsize = aead_setauthsize,
14094 +                       .encrypt = aead_encrypt,
14095 +                       .decrypt = aead_decrypt,
14096 +                       .ivsize = AES_BLOCK_SIZE,
14097 +                       .maxauthsize = SHA1_DIGEST_SIZE,
14098 +               },
14099 +               .caam = {
14100 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14101 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14102 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14103 +                       .geniv = true,
14104 +               },
14105 +       },
14106 +       {
14107 +               .aead = {
14108 +                       .base = {
14109 +                               .cra_name = "authenc(hmac(sha224),cbc(aes))",
14110 +                               .cra_driver_name = "authenc-hmac-sha224-"
14111 +                                                  "cbc-aes-caam-qi",
14112 +                               .cra_blocksize = AES_BLOCK_SIZE,
14113 +                       },
14114 +                       .setkey = aead_setkey,
14115 +                       .setauthsize = aead_setauthsize,
14116 +                       .encrypt = aead_encrypt,
14117 +                       .decrypt = aead_decrypt,
14118 +                       .ivsize = AES_BLOCK_SIZE,
14119 +                       .maxauthsize = SHA224_DIGEST_SIZE,
14120 +               },
14121 +               .caam = {
14122 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14123 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14124 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14125 +               }
14126 +       },
14127 +       {
14128 +               .aead = {
14129 +                       .base = {
14130 +                               .cra_name = "echainiv(authenc(hmac(sha224),"
14131 +                                           "cbc(aes)))",
14132 +                               .cra_driver_name = "echainiv-authenc-"
14133 +                                                  "hmac-sha224-cbc-aes-caam-qi",
14134 +                               .cra_blocksize = AES_BLOCK_SIZE,
14135 +                       },
14136 +                       .setkey = aead_setkey,
14137 +                       .setauthsize = aead_setauthsize,
14138 +                       .encrypt = aead_encrypt,
14139 +                       .decrypt = aead_decrypt,
14140 +                       .ivsize = AES_BLOCK_SIZE,
14141 +                       .maxauthsize = SHA224_DIGEST_SIZE,
14142 +               },
14143 +               .caam = {
14144 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14145 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14146 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14147 +                       .geniv = true,
14148 +               }
14149 +       },
14150 +       {
14151 +               .aead = {
14152 +                       .base = {
14153 +                               .cra_name = "authenc(hmac(sha256),cbc(aes))",
14154 +                               .cra_driver_name = "authenc-hmac-sha256-"
14155 +                                                  "cbc-aes-caam-qi",
14156 +                               .cra_blocksize = AES_BLOCK_SIZE,
14157 +                       },
14158 +                       .setkey = aead_setkey,
14159 +                       .setauthsize = aead_setauthsize,
14160 +                       .encrypt = aead_encrypt,
14161 +                       .decrypt = aead_decrypt,
14162 +                       .ivsize = AES_BLOCK_SIZE,
14163 +                       .maxauthsize = SHA256_DIGEST_SIZE,
14164 +               },
14165 +               .caam = {
14166 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14167 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14168 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14169 +               }
14170 +       },
14171 +       {
14172 +               .aead = {
14173 +                       .base = {
14174 +                               .cra_name = "echainiv(authenc(hmac(sha256),"
14175 +                                           "cbc(aes)))",
14176 +                               .cra_driver_name = "echainiv-authenc-"
14177 +                                                  "hmac-sha256-cbc-aes-"
14178 +                                                  "caam-qi",
14179 +                               .cra_blocksize = AES_BLOCK_SIZE,
14180 +                       },
14181 +                       .setkey = aead_setkey,
14182 +                       .setauthsize = aead_setauthsize,
14183 +                       .encrypt = aead_encrypt,
14184 +                       .decrypt = aead_decrypt,
14185 +                       .ivsize = AES_BLOCK_SIZE,
14186 +                       .maxauthsize = SHA256_DIGEST_SIZE,
14187 +               },
14188 +               .caam = {
14189 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14190 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14191 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14192 +                       .geniv = true,
14193 +               }
14194 +       },
14195 +       {
14196 +               .aead = {
14197 +                       .base = {
14198 +                               .cra_name = "authenc(hmac(sha384),cbc(aes))",
14199 +                               .cra_driver_name = "authenc-hmac-sha384-"
14200 +                                                  "cbc-aes-caam-qi",
14201 +                               .cra_blocksize = AES_BLOCK_SIZE,
14202 +                       },
14203 +                       .setkey = aead_setkey,
14204 +                       .setauthsize = aead_setauthsize,
14205 +                       .encrypt = aead_encrypt,
14206 +                       .decrypt = aead_decrypt,
14207 +                       .ivsize = AES_BLOCK_SIZE,
14208 +                       .maxauthsize = SHA384_DIGEST_SIZE,
14209 +               },
14210 +               .caam = {
14211 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14212 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14213 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14214 +               }
14215 +       },
14216 +       {
14217 +               .aead = {
14218 +                       .base = {
14219 +                               .cra_name = "echainiv(authenc(hmac(sha384),"
14220 +                                           "cbc(aes)))",
14221 +                               .cra_driver_name = "echainiv-authenc-"
14222 +                                                  "hmac-sha384-cbc-aes-"
14223 +                                                  "caam-qi",
14224 +                               .cra_blocksize = AES_BLOCK_SIZE,
14225 +                       },
14226 +                       .setkey = aead_setkey,
14227 +                       .setauthsize = aead_setauthsize,
14228 +                       .encrypt = aead_encrypt,
14229 +                       .decrypt = aead_decrypt,
14230 +                       .ivsize = AES_BLOCK_SIZE,
14231 +                       .maxauthsize = SHA384_DIGEST_SIZE,
14232 +               },
14233 +               .caam = {
14234 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14235 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14236 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14237 +                       .geniv = true,
14238 +               }
14239 +       },
14240 +       {
14241 +               .aead = {
14242 +                       .base = {
14243 +                               .cra_name = "authenc(hmac(sha512),cbc(aes))",
14244 +                               .cra_driver_name = "authenc-hmac-sha512-"
14245 +                                                  "cbc-aes-caam-qi",
14246 +                               .cra_blocksize = AES_BLOCK_SIZE,
14247 +                       },
14248 +                       .setkey = aead_setkey,
14249 +                       .setauthsize = aead_setauthsize,
14250 +                       .encrypt = aead_encrypt,
14251 +                       .decrypt = aead_decrypt,
14252 +                       .ivsize = AES_BLOCK_SIZE,
14253 +                       .maxauthsize = SHA512_DIGEST_SIZE,
14254 +               },
14255 +               .caam = {
14256 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14257 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14258 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14259 +               }
14260 +       },
14261 +       {
14262 +               .aead = {
14263 +                       .base = {
14264 +                               .cra_name = "echainiv(authenc(hmac(sha512),"
14265 +                                           "cbc(aes)))",
14266 +                               .cra_driver_name = "echainiv-authenc-"
14267 +                                                  "hmac-sha512-cbc-aes-"
14268 +                                                  "caam-qi",
14269 +                               .cra_blocksize = AES_BLOCK_SIZE,
14270 +                       },
14271 +                       .setkey = aead_setkey,
14272 +                       .setauthsize = aead_setauthsize,
14273 +                       .encrypt = aead_encrypt,
14274 +                       .decrypt = aead_decrypt,
14275 +                       .ivsize = AES_BLOCK_SIZE,
14276 +                       .maxauthsize = SHA512_DIGEST_SIZE,
14277 +               },
14278 +               .caam = {
14279 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14280 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14281 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14282 +                       .geniv = true,
14283 +               }
14284 +       },
14285 +       {
14286 +               .aead = {
14287 +                       .base = {
14288 +                               .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
14289 +                               .cra_driver_name = "authenc-hmac-md5-"
14290 +                                                  "cbc-des3_ede-caam-qi",
14291 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14292 +                       },
14293 +                       .setkey = aead_setkey,
14294 +                       .setauthsize = aead_setauthsize,
14295 +                       .encrypt = aead_encrypt,
14296 +                       .decrypt = aead_decrypt,
14297 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14298 +                       .maxauthsize = MD5_DIGEST_SIZE,
14299 +               },
14300 +               .caam = {
14301 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14302 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14303 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14304 +               }
14305 +       },
14306 +       {
14307 +               .aead = {
14308 +                       .base = {
14309 +                               .cra_name = "echainiv(authenc(hmac(md5),"
14310 +                                           "cbc(des3_ede)))",
14311 +                               .cra_driver_name = "echainiv-authenc-hmac-md5-"
14312 +                                                  "cbc-des3_ede-caam-qi",
14313 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14314 +                       },
14315 +                       .setkey = aead_setkey,
14316 +                       .setauthsize = aead_setauthsize,
14317 +                       .encrypt = aead_encrypt,
14318 +                       .decrypt = aead_decrypt,
14319 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14320 +                       .maxauthsize = MD5_DIGEST_SIZE,
14321 +               },
14322 +               .caam = {
14323 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14324 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14325 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14326 +                       .geniv = true,
14327 +               }
14328 +       },
14329 +       {
14330 +               .aead = {
14331 +                       .base = {
14332 +                               .cra_name = "authenc(hmac(sha1),"
14333 +                                           "cbc(des3_ede))",
14334 +                               .cra_driver_name = "authenc-hmac-sha1-"
14335 +                                                  "cbc-des3_ede-caam-qi",
14336 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14337 +                       },
14338 +                       .setkey = aead_setkey,
14339 +                       .setauthsize = aead_setauthsize,
14340 +                       .encrypt = aead_encrypt,
14341 +                       .decrypt = aead_decrypt,
14342 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14343 +                       .maxauthsize = SHA1_DIGEST_SIZE,
14344 +               },
14345 +               .caam = {
14346 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14347 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14348 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14349 +               },
14350 +       },
14351 +       {
14352 +               .aead = {
14353 +                       .base = {
14354 +                               .cra_name = "echainiv(authenc(hmac(sha1),"
14355 +                                           "cbc(des3_ede)))",
14356 +                               .cra_driver_name = "echainiv-authenc-"
14357 +                                                  "hmac-sha1-"
14358 +                                                  "cbc-des3_ede-caam-qi",
14359 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14360 +                       },
14361 +                       .setkey = aead_setkey,
14362 +                       .setauthsize = aead_setauthsize,
14363 +                       .encrypt = aead_encrypt,
14364 +                       .decrypt = aead_decrypt,
14365 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14366 +                       .maxauthsize = SHA1_DIGEST_SIZE,
14367 +               },
14368 +               .caam = {
14369 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14370 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14371 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14372 +                       .geniv = true,
14373 +               }
14374 +       },
14375 +       {
14376 +               .aead = {
14377 +                       .base = {
14378 +                               .cra_name = "authenc(hmac(sha224),"
14379 +                                           "cbc(des3_ede))",
14380 +                               .cra_driver_name = "authenc-hmac-sha224-"
14381 +                                                  "cbc-des3_ede-caam-qi",
14382 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14383 +                       },
14384 +                       .setkey = aead_setkey,
14385 +                       .setauthsize = aead_setauthsize,
14386 +                       .encrypt = aead_encrypt,
14387 +                       .decrypt = aead_decrypt,
14388 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14389 +                       .maxauthsize = SHA224_DIGEST_SIZE,
14390 +               },
14391 +               .caam = {
14392 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14393 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14394 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14395 +               },
14396 +       },
14397 +       {
14398 +               .aead = {
14399 +                       .base = {
14400 +                               .cra_name = "echainiv(authenc(hmac(sha224),"
14401 +                                           "cbc(des3_ede)))",
14402 +                               .cra_driver_name = "echainiv-authenc-"
14403 +                                                  "hmac-sha224-"
14404 +                                                  "cbc-des3_ede-caam-qi",
14405 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14406 +                       },
14407 +                       .setkey = aead_setkey,
14408 +                       .setauthsize = aead_setauthsize,
14409 +                       .encrypt = aead_encrypt,
14410 +                       .decrypt = aead_decrypt,
14411 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14412 +                       .maxauthsize = SHA224_DIGEST_SIZE,
14413 +               },
14414 +               .caam = {
14415 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14416 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14417 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14418 +                       .geniv = true,
14419 +               }
14420 +       },
14421 +       {
14422 +               .aead = {
14423 +                       .base = {
14424 +                               .cra_name = "authenc(hmac(sha256),"
14425 +                                           "cbc(des3_ede))",
14426 +                               .cra_driver_name = "authenc-hmac-sha256-"
14427 +                                                  "cbc-des3_ede-caam-qi",
14428 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14429 +                       },
14430 +                       .setkey = aead_setkey,
14431 +                       .setauthsize = aead_setauthsize,
14432 +                       .encrypt = aead_encrypt,
14433 +                       .decrypt = aead_decrypt,
14434 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14435 +                       .maxauthsize = SHA256_DIGEST_SIZE,
14436 +               },
14437 +               .caam = {
14438 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14439 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14440 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14441 +               },
14442 +       },
14443 +       {
14444 +               .aead = {
14445 +                       .base = {
14446 +                               .cra_name = "echainiv(authenc(hmac(sha256),"
14447 +                                           "cbc(des3_ede)))",
14448 +                               .cra_driver_name = "echainiv-authenc-"
14449 +                                                  "hmac-sha256-"
14450 +                                                  "cbc-des3_ede-caam-qi",
14451 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14452 +                       },
14453 +                       .setkey = aead_setkey,
14454 +                       .setauthsize = aead_setauthsize,
14455 +                       .encrypt = aead_encrypt,
14456 +                       .decrypt = aead_decrypt,
14457 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14458 +                       .maxauthsize = SHA256_DIGEST_SIZE,
14459 +               },
14460 +               .caam = {
14461 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14462 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14463 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14464 +                       .geniv = true,
14465 +               }
14466 +       },
14467 +       {
14468 +               .aead = {
14469 +                       .base = {
14470 +                               .cra_name = "authenc(hmac(sha384),"
14471 +                                           "cbc(des3_ede))",
14472 +                               .cra_driver_name = "authenc-hmac-sha384-"
14473 +                                                  "cbc-des3_ede-caam-qi",
14474 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14475 +                       },
14476 +                       .setkey = aead_setkey,
14477 +                       .setauthsize = aead_setauthsize,
14478 +                       .encrypt = aead_encrypt,
14479 +                       .decrypt = aead_decrypt,
14480 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14481 +                       .maxauthsize = SHA384_DIGEST_SIZE,
14482 +               },
14483 +               .caam = {
14484 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14485 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14486 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14487 +               },
14488 +       },
14489 +       {
14490 +               .aead = {
14491 +                       .base = {
14492 +                               .cra_name = "echainiv(authenc(hmac(sha384),"
14493 +                                           "cbc(des3_ede)))",
14494 +                               .cra_driver_name = "echainiv-authenc-"
14495 +                                                  "hmac-sha384-"
14496 +                                                  "cbc-des3_ede-caam-qi",
14497 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14498 +                       },
14499 +                       .setkey = aead_setkey,
14500 +                       .setauthsize = aead_setauthsize,
14501 +                       .encrypt = aead_encrypt,
14502 +                       .decrypt = aead_decrypt,
14503 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14504 +                       .maxauthsize = SHA384_DIGEST_SIZE,
14505 +               },
14506 +               .caam = {
14507 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14508 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14509 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14510 +                       .geniv = true,
14511 +               }
14512 +       },
14513 +       {
14514 +               .aead = {
14515 +                       .base = {
14516 +                               .cra_name = "authenc(hmac(sha512),"
14517 +                                           "cbc(des3_ede))",
14518 +                               .cra_driver_name = "authenc-hmac-sha512-"
14519 +                                                  "cbc-des3_ede-caam-qi",
14520 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14521 +                       },
14522 +                       .setkey = aead_setkey,
14523 +                       .setauthsize = aead_setauthsize,
14524 +                       .encrypt = aead_encrypt,
14525 +                       .decrypt = aead_decrypt,
14526 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14527 +                       .maxauthsize = SHA512_DIGEST_SIZE,
14528 +               },
14529 +               .caam = {
14530 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14531 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14532 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14533 +               },
14534 +       },
14535 +       {
14536 +               .aead = {
14537 +                       .base = {
14538 +                               .cra_name = "echainiv(authenc(hmac(sha512),"
14539 +                                           "cbc(des3_ede)))",
14540 +                               .cra_driver_name = "echainiv-authenc-"
14541 +                                                  "hmac-sha512-"
14542 +                                                  "cbc-des3_ede-caam-qi",
14543 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14544 +                       },
14545 +                       .setkey = aead_setkey,
14546 +                       .setauthsize = aead_setauthsize,
14547 +                       .encrypt = aead_encrypt,
14548 +                       .decrypt = aead_decrypt,
14549 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14550 +                       .maxauthsize = SHA512_DIGEST_SIZE,
14551 +               },
14552 +               .caam = {
14553 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14554 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14555 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14556 +                       .geniv = true,
14557 +               }
14558 +       },
14559 +       {
14560 +               .aead = {
14561 +                       .base = {
14562 +                               .cra_name = "authenc(hmac(md5),cbc(des))",
14563 +                               .cra_driver_name = "authenc-hmac-md5-"
14564 +                                                  "cbc-des-caam-qi",
14565 +                               .cra_blocksize = DES_BLOCK_SIZE,
14566 +                       },
14567 +                       .setkey = aead_setkey,
14568 +                       .setauthsize = aead_setauthsize,
14569 +                       .encrypt = aead_encrypt,
14570 +                       .decrypt = aead_decrypt,
14571 +                       .ivsize = DES_BLOCK_SIZE,
14572 +                       .maxauthsize = MD5_DIGEST_SIZE,
14573 +               },
14574 +               .caam = {
14575 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14576 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14577 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14578 +               },
14579 +       },
14580 +       {
14581 +               .aead = {
14582 +                       .base = {
14583 +                               .cra_name = "echainiv(authenc(hmac(md5),"
14584 +                                           "cbc(des)))",
14585 +                               .cra_driver_name = "echainiv-authenc-hmac-md5-"
14586 +                                                  "cbc-des-caam-qi",
14587 +                               .cra_blocksize = DES_BLOCK_SIZE,
14588 +                       },
14589 +                       .setkey = aead_setkey,
14590 +                       .setauthsize = aead_setauthsize,
14591 +                       .encrypt = aead_encrypt,
14592 +                       .decrypt = aead_decrypt,
14593 +                       .ivsize = DES_BLOCK_SIZE,
14594 +                       .maxauthsize = MD5_DIGEST_SIZE,
14595 +               },
14596 +               .caam = {
14597 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14598 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14599 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14600 +                       .geniv = true,
14601 +               }
14602 +       },
14603 +       {
14604 +               .aead = {
14605 +                       .base = {
14606 +                               .cra_name = "authenc(hmac(sha1),cbc(des))",
14607 +                               .cra_driver_name = "authenc-hmac-sha1-"
14608 +                                                  "cbc-des-caam-qi",
14609 +                               .cra_blocksize = DES_BLOCK_SIZE,
14610 +                       },
14611 +                       .setkey = aead_setkey,
14612 +                       .setauthsize = aead_setauthsize,
14613 +                       .encrypt = aead_encrypt,
14614 +                       .decrypt = aead_decrypt,
14615 +                       .ivsize = DES_BLOCK_SIZE,
14616 +                       .maxauthsize = SHA1_DIGEST_SIZE,
14617 +               },
14618 +               .caam = {
14619 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14620 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14621 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14622 +               },
14623 +       },
14624 +       {
14625 +               .aead = {
14626 +                       .base = {
14627 +                               .cra_name = "echainiv(authenc(hmac(sha1),"
14628 +                                           "cbc(des)))",
14629 +                               .cra_driver_name = "echainiv-authenc-"
14630 +                                                  "hmac-sha1-cbc-des-caam-qi",
14631 +                               .cra_blocksize = DES_BLOCK_SIZE,
14632 +                       },
14633 +                       .setkey = aead_setkey,
14634 +                       .setauthsize = aead_setauthsize,
14635 +                       .encrypt = aead_encrypt,
14636 +                       .decrypt = aead_decrypt,
14637 +                       .ivsize = DES_BLOCK_SIZE,
14638 +                       .maxauthsize = SHA1_DIGEST_SIZE,
14639 +               },
14640 +               .caam = {
14641 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14642 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14643 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14644 +                       .geniv = true,
14645 +               }
14646 +       },
14647 +       {
14648 +               .aead = {
14649 +                       .base = {
14650 +                               .cra_name = "authenc(hmac(sha224),cbc(des))",
14651 +                               .cra_driver_name = "authenc-hmac-sha224-"
14652 +                                                  "cbc-des-caam-qi",
14653 +                               .cra_blocksize = DES_BLOCK_SIZE,
14654 +                       },
14655 +                       .setkey = aead_setkey,
14656 +                       .setauthsize = aead_setauthsize,
14657 +                       .encrypt = aead_encrypt,
14658 +                       .decrypt = aead_decrypt,
14659 +                       .ivsize = DES_BLOCK_SIZE,
14660 +                       .maxauthsize = SHA224_DIGEST_SIZE,
14661 +               },
14662 +               .caam = {
14663 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14664 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14665 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14666 +               },
14667 +       },
14668 +       {
14669 +               .aead = {
14670 +                       .base = {
14671 +                               .cra_name = "echainiv(authenc(hmac(sha224),"
14672 +                                           "cbc(des)))",
14673 +                               .cra_driver_name = "echainiv-authenc-"
14674 +                                                  "hmac-sha224-cbc-des-"
14675 +                                                  "caam-qi",
14676 +                               .cra_blocksize = DES_BLOCK_SIZE,
14677 +                       },
14678 +                       .setkey = aead_setkey,
14679 +                       .setauthsize = aead_setauthsize,
14680 +                       .encrypt = aead_encrypt,
14681 +                       .decrypt = aead_decrypt,
14682 +                       .ivsize = DES_BLOCK_SIZE,
14683 +                       .maxauthsize = SHA224_DIGEST_SIZE,
14684 +               },
14685 +               .caam = {
14686 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14687 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14688 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14689 +                       .geniv = true,
14690 +               }
14691 +       },
14692 +       {
14693 +               .aead = {
14694 +                       .base = {
14695 +                               .cra_name = "authenc(hmac(sha256),cbc(des))",
14696 +                               .cra_driver_name = "authenc-hmac-sha256-"
14697 +                                                  "cbc-des-caam-qi",
14698 +                               .cra_blocksize = DES_BLOCK_SIZE,
14699 +                       },
14700 +                       .setkey = aead_setkey,
14701 +                       .setauthsize = aead_setauthsize,
14702 +                       .encrypt = aead_encrypt,
14703 +                       .decrypt = aead_decrypt,
14704 +                       .ivsize = DES_BLOCK_SIZE,
14705 +                       .maxauthsize = SHA256_DIGEST_SIZE,
14706 +               },
14707 +               .caam = {
14708 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14709 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14710 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14711 +               },
14712 +       },
14713 +       {
14714 +               .aead = {
14715 +                       .base = {
14716 +                               .cra_name = "echainiv(authenc(hmac(sha256),"
14717 +                                           "cbc(des)))",
14718 +                               .cra_driver_name = "echainiv-authenc-"
14719 +                                                  "hmac-sha256-cbc-des-"
14720 +                                                  "caam-qi",
14721 +                               .cra_blocksize = DES_BLOCK_SIZE,
14722 +                       },
14723 +                       .setkey = aead_setkey,
14724 +                       .setauthsize = aead_setauthsize,
14725 +                       .encrypt = aead_encrypt,
14726 +                       .decrypt = aead_decrypt,
14727 +                       .ivsize = DES_BLOCK_SIZE,
14728 +                       .maxauthsize = SHA256_DIGEST_SIZE,
14729 +               },
14730 +               .caam = {
14731 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14732 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14733 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14734 +                       .geniv = true,
14735 +               },
14736 +       },
14737 +       {
14738 +               .aead = {
14739 +                       .base = {
14740 +                               .cra_name = "authenc(hmac(sha384),cbc(des))",
14741 +                               .cra_driver_name = "authenc-hmac-sha384-"
14742 +                                                  "cbc-des-caam-qi",
14743 +                               .cra_blocksize = DES_BLOCK_SIZE,
14744 +                       },
14745 +                       .setkey = aead_setkey,
14746 +                       .setauthsize = aead_setauthsize,
14747 +                       .encrypt = aead_encrypt,
14748 +                       .decrypt = aead_decrypt,
14749 +                       .ivsize = DES_BLOCK_SIZE,
14750 +                       .maxauthsize = SHA384_DIGEST_SIZE,
14751 +               },
14752 +               .caam = {
14753 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14754 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14755 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14756 +               },
14757 +       },
14758 +       {
14759 +               .aead = {
14760 +                       .base = {
14761 +                               .cra_name = "echainiv(authenc(hmac(sha384),"
14762 +                                           "cbc(des)))",
14763 +                               .cra_driver_name = "echainiv-authenc-"
14764 +                                                  "hmac-sha384-cbc-des-"
14765 +                                                  "caam-qi",
14766 +                               .cra_blocksize = DES_BLOCK_SIZE,
14767 +                       },
14768 +                       .setkey = aead_setkey,
14769 +                       .setauthsize = aead_setauthsize,
14770 +                       .encrypt = aead_encrypt,
14771 +                       .decrypt = aead_decrypt,
14772 +                       .ivsize = DES_BLOCK_SIZE,
14773 +                       .maxauthsize = SHA384_DIGEST_SIZE,
14774 +               },
14775 +               .caam = {
14776 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14777 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14778 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14779 +                       .geniv = true,
14780 +               }
14781 +       },
14782 +       {
14783 +               .aead = {
14784 +                       .base = {
14785 +                               .cra_name = "authenc(hmac(sha512),cbc(des))",
14786 +                               .cra_driver_name = "authenc-hmac-sha512-"
14787 +                                                  "cbc-des-caam-qi",
14788 +                               .cra_blocksize = DES_BLOCK_SIZE,
14789 +                       },
14790 +                       .setkey = aead_setkey,
14791 +                       .setauthsize = aead_setauthsize,
14792 +                       .encrypt = aead_encrypt,
14793 +                       .decrypt = aead_decrypt,
14794 +                       .ivsize = DES_BLOCK_SIZE,
14795 +                       .maxauthsize = SHA512_DIGEST_SIZE,
14796 +               },
14797 +               .caam = {
14798 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14799 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14800 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14801 +               }
14802 +       },
14803 +       {
14804 +               .aead = {
14805 +                       .base = {
14806 +                               .cra_name = "echainiv(authenc(hmac(sha512),"
14807 +                                           "cbc(des)))",
14808 +                               .cra_driver_name = "echainiv-authenc-"
14809 +                                                  "hmac-sha512-cbc-des-"
14810 +                                                  "caam-qi",
14811 +                               .cra_blocksize = DES_BLOCK_SIZE,
14812 +                       },
14813 +                       .setkey = aead_setkey,
14814 +                       .setauthsize = aead_setauthsize,
14815 +                       .encrypt = aead_encrypt,
14816 +                       .decrypt = aead_decrypt,
14817 +                       .ivsize = DES_BLOCK_SIZE,
14818 +                       .maxauthsize = SHA512_DIGEST_SIZE,
14819 +               },
14820 +               .caam = {
14821 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14822 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14823 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14824 +                       .geniv = true,
14825 +               }
14826 +       },
14827 +       {
14828 +               .aead = {
14829 +                       .base = {
14830 +                               .cra_name = "tls10(hmac(sha1),cbc(aes))",
14831 +                               .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi",
14832 +                               .cra_blocksize = AES_BLOCK_SIZE,
14833 +                       },
14834 +                       .setkey = tls_setkey,
14835 +                       .setauthsize = tls_setauthsize,
14836 +                       .encrypt = tls_encrypt,
14837 +                       .decrypt = tls_decrypt,
14838 +                       .ivsize = AES_BLOCK_SIZE,
14839 +                       .maxauthsize = SHA1_DIGEST_SIZE,
14840 +               },
14841 +               .caam = {
14842 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14843 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14844 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14845 +               }
14846 +       }
14847 +};
14848 +
14849 +struct caam_crypto_alg {
14850 +       struct list_head entry;
14851 +       struct crypto_alg crypto_alg;
14852 +       struct caam_alg_entry caam;
14853 +};
14854 +
14855 +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
14856 +{
14857 +       struct caam_drv_private *priv;
14858 +       /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
14859 +       static const u8 digest_size[] = {
14860 +               MD5_DIGEST_SIZE,
14861 +               SHA1_DIGEST_SIZE,
14862 +               SHA224_DIGEST_SIZE,
14863 +               SHA256_DIGEST_SIZE,
14864 +               SHA384_DIGEST_SIZE,
14865 +               SHA512_DIGEST_SIZE
14866 +       };
14867 +       u8 op_id;
14868 +
14869 +       /*
14870 +        * distribute tfms across job rings to ensure in-order
14871 +        * crypto request processing per tfm
14872 +        */
14873 +       ctx->jrdev = caam_jr_alloc();
14874 +       if (IS_ERR(ctx->jrdev)) {
14875 +               pr_err("Job Ring Device allocation for transform failed\n");
14876 +               return PTR_ERR(ctx->jrdev);
14877 +       }
14878 +
14879 +       ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
14880 +                                     DMA_TO_DEVICE);
14881 +       if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
14882 +               dev_err(ctx->jrdev, "unable to map key\n");
14883 +               caam_jr_free(ctx->jrdev);
14884 +               return -ENOMEM;
14885 +       }
14886 +
14887 +       /* copy descriptor header template value */
14888 +       ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
14889 +       ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
14890 +
14891 +       if (ctx->adata.algtype) {
14892 +               op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK)
14893 +                               >> OP_ALG_ALGSEL_SHIFT;
14894 +               if (op_id < ARRAY_SIZE(digest_size)) {
14895 +                       ctx->authsize = digest_size[op_id];
14896 +               } else {
14897 +                       dev_err(ctx->jrdev,
14898 +                               "incorrect op_id %d; must be less than %zu\n",
14899 +                               op_id, ARRAY_SIZE(digest_size));
14900 +                       caam_jr_free(ctx->jrdev);
14901 +                       return -EINVAL;
14902 +               }
14903 +       } else {
14904 +               ctx->authsize = 0;
14905 +       }
14906 +
14907 +       priv = dev_get_drvdata(ctx->jrdev->parent);
14908 +       ctx->qidev = priv->qidev;
14909 +
14910 +       spin_lock_init(&ctx->lock);
14911 +       ctx->drv_ctx[ENCRYPT] = NULL;
14912 +       ctx->drv_ctx[DECRYPT] = NULL;
14913 +       ctx->drv_ctx[GIVENCRYPT] = NULL;
14914 +
14915 +       return 0;
14916 +}
14917 +
14918 +static int caam_cra_init(struct crypto_tfm *tfm)
14919 +{
14920 +       struct crypto_alg *alg = tfm->__crt_alg;
14921 +       struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
14922 +                                                       crypto_alg);
14923 +       struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
14924 +
14925 +       return caam_init_common(ctx, &caam_alg->caam);
14926 +}
14927 +
14928 +static int caam_aead_init(struct crypto_aead *tfm)
14929 +{
14930 +       struct aead_alg *alg = crypto_aead_alg(tfm);
14931 +       struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
14932 +                                                     aead);
14933 +       struct caam_ctx *ctx = crypto_aead_ctx(tfm);
14934 +
14935 +       return caam_init_common(ctx, &caam_alg->caam);
14936 +}
14937 +
14938 +static void caam_exit_common(struct caam_ctx *ctx)
14939 +{
14940 +       caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
14941 +       caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
14942 +       caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
14943 +
14944 +       dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
14945 +                        DMA_TO_DEVICE);
14946 +
14947 +       caam_jr_free(ctx->jrdev);
14948 +}
14949 +
14950 +static void caam_cra_exit(struct crypto_tfm *tfm)
14951 +{
14952 +       caam_exit_common(crypto_tfm_ctx(tfm));
14953 +}
14954 +
14955 +static void caam_aead_exit(struct crypto_aead *tfm)
14956 +{
14957 +       caam_exit_common(crypto_aead_ctx(tfm));
14958 +}
14959 +
14960 +static struct list_head alg_list;
14961 +static void __exit caam_qi_algapi_exit(void)
14962 +{
14963 +       struct caam_crypto_alg *t_alg, *n;
14964 +       int i;
14965 +
14966 +       for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
14967 +               struct caam_aead_alg *t_alg = driver_aeads + i;
14968 +
14969 +               if (t_alg->registered)
14970 +                       crypto_unregister_aead(&t_alg->aead);
14971 +       }
14972 +
14973 +       if (!alg_list.next)
14974 +               return;
14975 +
14976 +       list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
14977 +               crypto_unregister_alg(&t_alg->crypto_alg);
14978 +               list_del(&t_alg->entry);
14979 +               kfree(t_alg);
14980 +       }
14981 +}
14982 +
14983 +static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
14984 +                                             *template)
14985 +{
14986 +       struct caam_crypto_alg *t_alg;
14987 +       struct crypto_alg *alg;
14988 +
14989 +       t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
14990 +       if (!t_alg)
14991 +               return ERR_PTR(-ENOMEM);
14992 +
14993 +       alg = &t_alg->crypto_alg;
14994 +
14995 +       snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
14996 +       snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
14997 +                template->driver_name);
14998 +       alg->cra_module = THIS_MODULE;
14999 +       alg->cra_init = caam_cra_init;
15000 +       alg->cra_exit = caam_cra_exit;
15001 +       alg->cra_priority = CAAM_CRA_PRIORITY;
15002 +       alg->cra_blocksize = template->blocksize;
15003 +       alg->cra_alignmask = 0;
15004 +       alg->cra_ctxsize = sizeof(struct caam_ctx);
15005 +       alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
15006 +                        template->type;
15007 +       switch (template->type) {
15008 +       case CRYPTO_ALG_TYPE_GIVCIPHER:
15009 +               alg->cra_type = &crypto_givcipher_type;
15010 +               alg->cra_ablkcipher = template->template_ablkcipher;
15011 +               break;
15012 +       case CRYPTO_ALG_TYPE_ABLKCIPHER:
15013 +               alg->cra_type = &crypto_ablkcipher_type;
15014 +               alg->cra_ablkcipher = template->template_ablkcipher;
15015 +               break;
15016 +       }
15017 +
15018 +       t_alg->caam.class1_alg_type = template->class1_alg_type;
15019 +       t_alg->caam.class2_alg_type = template->class2_alg_type;
15020 +
15021 +       return t_alg;
15022 +}
15023 +
15024 +static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
15025 +{
15026 +       struct aead_alg *alg = &t_alg->aead;
15027 +
15028 +       alg->base.cra_module = THIS_MODULE;
15029 +       alg->base.cra_priority = CAAM_CRA_PRIORITY;
15030 +       alg->base.cra_ctxsize = sizeof(struct caam_ctx);
15031 +       alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
15032 +
15033 +       alg->init = caam_aead_init;
15034 +       alg->exit = caam_aead_exit;
15035 +}
15036 +
15037 +static int __init caam_qi_algapi_init(void)
15038 +{
15039 +       struct device_node *dev_node;
15040 +       struct platform_device *pdev;
15041 +       struct device *ctrldev;
15042 +       struct caam_drv_private *priv;
15043 +       int i = 0, err = 0;
15044 +       u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
15045 +       unsigned int md_limit = SHA512_DIGEST_SIZE;
15046 +       bool registered = false;
15047 +
15048 +       dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
15049 +       if (!dev_node) {
15050 +               dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
15051 +               if (!dev_node)
15052 +                       return -ENODEV;
15053 +       }
15054 +
15055 +       pdev = of_find_device_by_node(dev_node);
15056 +       of_node_put(dev_node);
15057 +       if (!pdev)
15058 +               return -ENODEV;
15059 +
15060 +       ctrldev = &pdev->dev;
15061 +       priv = dev_get_drvdata(ctrldev);
15062 +
15063 +       /*
15064 +        * If priv is NULL, it's probably because the caam driver wasn't
15065 +        * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
15066 +        */
15067 +       if (!priv || !priv->qi_present)
15068 +               return -ENODEV;
15069 +
15070 +       if (caam_dpaa2) {
15071 +               dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
15072 +               return -ENODEV;
15073 +       }
15074 +
15075 +       INIT_LIST_HEAD(&alg_list);
15076 +
15077 +       /*
15078 +        * Register crypto algorithms the device supports.
15079 +        * First, detect presence and attributes of DES, AES, and MD blocks.
15080 +        */
15081 +       cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
15082 +       cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
15083 +       des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
15084 +       aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
15085 +       md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
15086 +
15087 +       /* If MD is present, limit digest size based on LP256 */
15088 +       if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
15089 +               md_limit = SHA256_DIGEST_SIZE;
15090 +
15091 +       for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
15092 +               struct caam_crypto_alg *t_alg;
15093 +               struct caam_alg_template *alg = driver_algs + i;
15094 +               u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
15095 +
15096 +               /* Skip DES algorithms if not supported by device */
15097 +               if (!des_inst &&
15098 +                   ((alg_sel == OP_ALG_ALGSEL_3DES) ||
15099 +                    (alg_sel == OP_ALG_ALGSEL_DES)))
15100 +                       continue;
15101 +
15102 +               /* Skip AES algorithms if not supported by device */
15103 +               if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
15104 +                       continue;
15105 +
15106 +               t_alg = caam_alg_alloc(alg);
15107 +               if (IS_ERR(t_alg)) {
15108 +                       err = PTR_ERR(t_alg);
15109 +                       dev_warn(priv->qidev, "%s alg allocation failed\n",
15110 +                                alg->driver_name);
15111 +                       continue;
15112 +               }
15113 +
15114 +               err = crypto_register_alg(&t_alg->crypto_alg);
15115 +               if (err) {
15116 +                       dev_warn(priv->qidev, "%s alg registration failed\n",
15117 +                                t_alg->crypto_alg.cra_driver_name);
15118 +                       kfree(t_alg);
15119 +                       continue;
15120 +               }
15121 +
15122 +               list_add_tail(&t_alg->entry, &alg_list);
15123 +               registered = true;
15124 +       }
15125 +
15126 +       for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
15127 +               struct caam_aead_alg *t_alg = driver_aeads + i;
15128 +               u32 c1_alg_sel = t_alg->caam.class1_alg_type &
15129 +                                OP_ALG_ALGSEL_MASK;
15130 +               u32 c2_alg_sel = t_alg->caam.class2_alg_type &
15131 +                                OP_ALG_ALGSEL_MASK;
15132 +               u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
15133 +
15134 +               /* Skip DES algorithms if not supported by device */
15135 +               if (!des_inst &&
15136 +                   ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
15137 +                    (c1_alg_sel == OP_ALG_ALGSEL_DES)))
15138 +                       continue;
15139 +
15140 +               /* Skip AES algorithms if not supported by device */
15141 +               if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
15142 +                       continue;
15143 +
15144 +               /*
15145 +                * Check support for AES algorithms not available
15146 +                * on LP devices.
15147 +                */
15148 +               if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
15149 +                   (alg_aai == OP_ALG_AAI_GCM))
15150 +                       continue;
15151 +
15152 +               /*
15153 +                * Skip algorithms requiring message digests
15154 +                * if MD or MD size is not supported by device.
15155 +                */
15156 +               if (c2_alg_sel &&
15157 +                   (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
15158 +                       continue;
15159 +
15160 +               caam_aead_alg_init(t_alg);
15161 +
15162 +               err = crypto_register_aead(&t_alg->aead);
15163 +               if (err) {
15164 +                       pr_warn("%s alg registration failed\n",
15165 +                               t_alg->aead.base.cra_driver_name);
15166 +                       continue;
15167 +               }
15168 +
15169 +               t_alg->registered = true;
15170 +               registered = true;
15171 +       }
15172 +
15173 +       if (registered)
15174 +               dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
15175 +
15176 +       return err;
15177 +}
15178 +
15179 +module_init(caam_qi_algapi_init);
15180 +module_exit(caam_qi_algapi_exit);
15181 +
15182 +MODULE_LICENSE("GPL");
15183 +MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
15184 +MODULE_AUTHOR("Freescale Semiconductor");
15185 diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
15186 new file mode 100644
15187 index 00000000..102b0841
15188 --- /dev/null
15189 +++ b/drivers/crypto/caam/caamalg_qi2.c
15190 @@ -0,0 +1,4428 @@
15191 +/*
15192 + * Copyright 2015-2016 Freescale Semiconductor Inc.
15193 + * Copyright 2017 NXP
15194 + *
15195 + * Redistribution and use in source and binary forms, with or without
15196 + * modification, are permitted provided that the following conditions are met:
15197 + *     * Redistributions of source code must retain the above copyright
15198 + *      notice, this list of conditions and the following disclaimer.
15199 + *     * Redistributions in binary form must reproduce the above copyright
15200 + *      notice, this list of conditions and the following disclaimer in the
15201 + *      documentation and/or other materials provided with the distribution.
15202 + *     * Neither the names of the above-listed copyright holders nor the
15203 + *      names of any contributors may be used to endorse or promote products
15204 + *      derived from this software without specific prior written permission.
15205 + *
15206 + *
15207 + * ALTERNATIVELY, this software may be distributed under the terms of the
15208 + * GNU General Public License ("GPL") as published by the Free Software
15209 + * Foundation, either version 2 of that License or (at your option) any
15210 + * later version.
15211 + *
15212 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15213 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15214 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
15215 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
15216 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
15217 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
15218 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
15219 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
15220 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
15221 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
15222 + * POSSIBILITY OF SUCH DAMAGE.
15223 + */
15224 +
15225 +#include "compat.h"
15226 +#include "regs.h"
15227 +#include "caamalg_qi2.h"
15228 +#include "dpseci_cmd.h"
15229 +#include "desc_constr.h"
15230 +#include "error.h"
15231 +#include "sg_sw_sec4.h"
15232 +#include "sg_sw_qm2.h"
15233 +#include "key_gen.h"
15234 +#include "caamalg_desc.h"
15235 +#include "../../../drivers/staging/fsl-mc/include/mc.h"
15236 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
15237 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
15238 +
15239 +#define CAAM_CRA_PRIORITY      2000
15240 +
15241 +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
15242 +#define CAAM_MAX_KEY_SIZE      (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
15243 +                                SHA512_DIGEST_SIZE * 2)
15244 +
15245 +#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM
15246 +bool caam_little_end;
15247 +EXPORT_SYMBOL(caam_little_end);
15248 +bool caam_imx;
15249 +EXPORT_SYMBOL(caam_imx);
15250 +#endif
15251 +
15252 +/*
15253 + * This is a a cache of buffers, from which the users of CAAM QI driver
15254 + * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
15255 + * NOTE: A more elegant solution would be to have some headroom in the frames
15256 + *       being processed. This can be added by the dpaa2-eth driver. This would
15257 + *       pose a problem for userspace application processing which cannot
15258 + *       know of this limitation. So for now, this will work.
15259 + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
15260 + */
15261 +static struct kmem_cache *qi_cache;
15262 +
15263 +struct caam_alg_entry {
15264 +       struct device *dev;
15265 +       int class1_alg_type;
15266 +       int class2_alg_type;
15267 +       bool rfc3686;
15268 +       bool geniv;
15269 +};
15270 +
15271 +struct caam_aead_alg {
15272 +       struct aead_alg aead;
15273 +       struct caam_alg_entry caam;
15274 +       bool registered;
15275 +};
15276 +
15277 +/**
15278 + * caam_ctx - per-session context
15279 + * @flc: Flow Contexts array
15280 + * @key:  virtual address of the key(s): [authentication key], encryption key
15281 + * @key_dma: I/O virtual address of the key
15282 + * @dev: dpseci device
15283 + * @adata: authentication algorithm details
15284 + * @cdata: encryption algorithm details
15285 + * @authsize: authentication tag (a.k.a. ICV / MAC) size
15286 + */
15287 +struct caam_ctx {
15288 +       struct caam_flc flc[NUM_OP];
15289 +       u8 key[CAAM_MAX_KEY_SIZE];
15290 +       dma_addr_t key_dma;
15291 +       struct device *dev;
15292 +       struct alginfo adata;
15293 +       struct alginfo cdata;
15294 +       unsigned int authsize;
15295 +};
15296 +
15297 +void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
15298 +                             dma_addr_t iova_addr)
15299 +{
15300 +       phys_addr_t phys_addr;
15301 +
15302 +       phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
15303 +                                  iova_addr;
15304 +
15305 +       return phys_to_virt(phys_addr);
15306 +}
15307 +
15308 +/*
15309 + * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
15310 + *
15311 + * Allocate data on the hotpath. Instead of using kzalloc, one can use the
15312 + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
15313 + * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
15314 + * hosting 16 SG entries.
15315 + *
15316 + * @flags - flags that would be used for the equivalent kmalloc(..) call
15317 + *
15318 + * Returns a pointer to a retrieved buffer on success or NULL on failure.
15319 + */
15320 +static inline void *qi_cache_zalloc(gfp_t flags)
15321 +{
15322 +       return kmem_cache_zalloc(qi_cache, flags);
15323 +}
15324 +
15325 +/*
15326 + * qi_cache_free - Frees buffers allocated from CAAM-QI cache
15327 + *
15328 + * @obj - buffer previously allocated by qi_cache_zalloc
15329 + *
15330 + * No checking is being done, the call is a passthrough call to
15331 + * kmem_cache_free(...)
15332 + */
15333 +static inline void qi_cache_free(void *obj)
15334 +{
15335 +       kmem_cache_free(qi_cache, obj);
15336 +}
15337 +
15338 +static struct caam_request *to_caam_req(struct crypto_async_request *areq)
15339 +{
15340 +       switch (crypto_tfm_alg_type(areq->tfm)) {
15341 +       case CRYPTO_ALG_TYPE_ABLKCIPHER:
15342 +       case CRYPTO_ALG_TYPE_GIVCIPHER:
15343 +               return ablkcipher_request_ctx(ablkcipher_request_cast(areq));
15344 +       case CRYPTO_ALG_TYPE_AEAD:
15345 +               return aead_request_ctx(container_of(areq, struct aead_request,
15346 +                                                    base));
15347 +       default:
15348 +               return ERR_PTR(-EINVAL);
15349 +       }
15350 +}
15351 +
15352 +static void caam_unmap(struct device *dev, struct scatterlist *src,
15353 +                      struct scatterlist *dst, int src_nents,
15354 +                      int dst_nents, dma_addr_t iv_dma, int ivsize,
15355 +                      enum optype op_type, dma_addr_t qm_sg_dma,
15356 +                      int qm_sg_bytes)
15357 +{
15358 +       if (dst != src) {
15359 +               if (src_nents)
15360 +                       dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
15361 +               dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
15362 +       } else {
15363 +               dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
15364 +       }
15365 +
15366 +       if (iv_dma)
15367 +               dma_unmap_single(dev, iv_dma, ivsize,
15368 +                                op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
15369 +                                                        DMA_TO_DEVICE);
15370 +
15371 +       if (qm_sg_bytes)
15372 +               dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
15373 +}
15374 +
15375 +static int aead_set_sh_desc(struct crypto_aead *aead)
15376 +{
15377 +       struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
15378 +                                                typeof(*alg), aead);
15379 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
15380 +       unsigned int ivsize = crypto_aead_ivsize(aead);
15381 +       struct device *dev = ctx->dev;
15382 +       struct caam_flc *flc;
15383 +       u32 *desc;
15384 +       u32 ctx1_iv_off = 0;
15385 +       u32 *nonce = NULL;
15386 +       unsigned int data_len[2];
15387 +       u32 inl_mask;
15388 +       const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
15389 +                              OP_ALG_AAI_CTR_MOD128);
15390 +       const bool is_rfc3686 = alg->caam.rfc3686;
15391 +
15392 +       if (!ctx->cdata.keylen || !ctx->authsize)
15393 +               return 0;
15394 +
15395 +       /*
15396 +        * AES-CTR needs to load IV in CONTEXT1 reg
15397 +        * at an offset of 128bits (16bytes)
15398 +        * CONTEXT1[255:128] = IV
15399 +        */
15400 +       if (ctr_mode)
15401 +               ctx1_iv_off = 16;
15402 +
15403 +       /*
15404 +        * RFC3686 specific:
15405 +        *      CONTEXT1[255:128] = {NONCE, IV, COUNTER}
15406 +        */
15407 +       if (is_rfc3686) {
15408 +               ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
15409 +               nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
15410 +                               ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
15411 +       }
15412 +
15413 +       data_len[0] = ctx->adata.keylen_pad;
15414 +       data_len[1] = ctx->cdata.keylen;
15415 +
15416 +       /* aead_encrypt shared descriptor */
15417 +       if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
15418 +                                                DESC_QI_AEAD_ENC_LEN) +
15419 +                             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
15420 +                             DESC_JOB_IO_LEN, data_len, &inl_mask,
15421 +                             ARRAY_SIZE(data_len)) < 0)
15422 +               return -EINVAL;
15423 +
15424 +       if (inl_mask & 1)
15425 +               ctx->adata.key_virt = ctx->key;
15426 +       else
15427 +               ctx->adata.key_dma = ctx->key_dma;
15428 +
15429 +       if (inl_mask & 2)
15430 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
15431 +       else
15432 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
15433 +
15434 +       ctx->adata.key_inline = !!(inl_mask & 1);
15435 +       ctx->cdata.key_inline = !!(inl_mask & 2);
15436 +
15437 +       flc = &ctx->flc[ENCRYPT];
15438 +       desc = flc->sh_desc;
15439 +
15440 +       if (alg->caam.geniv)
15441 +               cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
15442 +                                         ivsize, ctx->authsize, is_rfc3686,
15443 +                                         nonce, ctx1_iv_off, true);
15444 +       else
15445 +               cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
15446 +                                      ivsize, ctx->authsize, is_rfc3686, nonce,
15447 +                                      ctx1_iv_off, true);
15448 +
15449 +       flc->flc[1] = desc_len(desc); /* SDL */
15450 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
15451 +                                     desc_bytes(desc), DMA_TO_DEVICE);
15452 +       if (dma_mapping_error(dev, flc->flc_dma)) {
15453 +               dev_err(dev, "unable to map shared descriptor\n");
15454 +               return -ENOMEM;
15455 +       }
15456 +
15457 +       /* aead_decrypt shared descriptor */
15458 +       if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
15459 +                             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
15460 +                             DESC_JOB_IO_LEN, data_len, &inl_mask,
15461 +                             ARRAY_SIZE(data_len)) < 0)
15462 +               return -EINVAL;
15463 +
15464 +       if (inl_mask & 1)
15465 +               ctx->adata.key_virt = ctx->key;
15466 +       else
15467 +               ctx->adata.key_dma = ctx->key_dma;
15468 +
15469 +       if (inl_mask & 2)
15470 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
15471 +       else
15472 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
15473 +
15474 +       ctx->adata.key_inline = !!(inl_mask & 1);
15475 +       ctx->cdata.key_inline = !!(inl_mask & 2);
15476 +
15477 +       flc = &ctx->flc[DECRYPT];
15478 +       desc = flc->sh_desc;
15479 +
15480 +       cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
15481 +                              ivsize, ctx->authsize, alg->caam.geniv,
15482 +                              is_rfc3686, nonce, ctx1_iv_off, true);
15483 +
15484 +       flc->flc[1] = desc_len(desc); /* SDL */
15485 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
15486 +                                     desc_bytes(desc), DMA_TO_DEVICE);
15487 +       if (dma_mapping_error(dev, flc->flc_dma)) {
15488 +               dev_err(dev, "unable to map shared descriptor\n");
15489 +               return -ENOMEM;
15490 +       }
15491 +
15492 +       return 0;
15493 +}
15494 +
15495 +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
15496 +{
15497 +       struct caam_ctx *ctx = crypto_aead_ctx(authenc);
15498 +
15499 +       ctx->authsize = authsize;
15500 +       aead_set_sh_desc(authenc);
15501 +
15502 +       return 0;
15503 +}
15504 +
15505 +struct split_key_sh_result {
15506 +       struct completion completion;
15507 +       int err;
15508 +       struct device *dev;
15509 +};
15510 +
15511 +static void split_key_sh_done(void *cbk_ctx, u32 err)
15512 +{
15513 +       struct split_key_sh_result *res = cbk_ctx;
15514 +
15515 +#ifdef DEBUG
15516 +       dev_err(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
15517 +#endif
15518 +
15519 +       if (err)
15520 +               caam_qi2_strstatus(res->dev, err);
15521 +
15522 +       res->err = err;
15523 +       complete(&res->completion);
15524 +}
15525 +
15526 +static int gen_split_key_sh(struct device *dev, u8 *key_out,
15527 +                           struct alginfo * const adata, const u8 *key_in,
15528 +                           u32 keylen)
15529 +{
15530 +       struct caam_request *req_ctx;
15531 +       u32 *desc;
15532 +       struct split_key_sh_result result;
15533 +       dma_addr_t dma_addr_in, dma_addr_out;
15534 +       struct caam_flc *flc;
15535 +       struct dpaa2_fl_entry *in_fle, *out_fle;
15536 +       int ret = -ENOMEM;
15537 +
15538 +       req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
15539 +       if (!req_ctx)
15540 +               return -ENOMEM;
15541 +
15542 +       in_fle = &req_ctx->fd_flt[1];
15543 +       out_fle = &req_ctx->fd_flt[0];
15544 +
15545 +       flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
15546 +       if (!flc)
15547 +               goto err_flc;
15548 +
15549 +       dma_addr_in = dma_map_single(dev, (void *)key_in, keylen,
15550 +                                    DMA_TO_DEVICE);
15551 +       if (dma_mapping_error(dev, dma_addr_in)) {
15552 +               dev_err(dev, "unable to map key input memory\n");
15553 +               goto err_dma_addr_in;
15554 +       }
15555 +
15556 +       dma_addr_out = dma_map_single(dev, key_out, adata->keylen_pad,
15557 +                                     DMA_FROM_DEVICE);
15558 +       if (dma_mapping_error(dev, dma_addr_out)) {
15559 +               dev_err(dev, "unable to map key output memory\n");
15560 +               goto err_dma_addr_out;
15561 +       }
15562 +
15563 +       desc = flc->sh_desc;
15564 +
15565 +       init_sh_desc(desc, 0);
15566 +       append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
15567 +
15568 +       /* Sets MDHA up into an HMAC-INIT */
15569 +       append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
15570 +                        OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
15571 +                        OP_ALG_AS_INIT);
15572 +
15573 +       /*
15574 +        * do a FIFO_LOAD of zero, this will trigger the internal key expansion
15575 +        * into both pads inside MDHA
15576 +        */
15577 +       append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
15578 +                               FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
15579 +
15580 +       /*
15581 +        * FIFO_STORE with the explicit split-key content store
15582 +        * (0x26 output type)
15583 +        */
15584 +       append_fifo_store(desc, dma_addr_out, adata->keylen,
15585 +                         LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
15586 +
15587 +       flc->flc[1] = desc_len(desc); /* SDL */
15588 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
15589 +                                     desc_bytes(desc), DMA_TO_DEVICE);
15590 +       if (dma_mapping_error(dev, flc->flc_dma)) {
15591 +               dev_err(dev, "unable to map shared descriptor\n");
15592 +               goto err_flc_dma;
15593 +       }
15594 +
15595 +       dpaa2_fl_set_final(in_fle, true);
15596 +       dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
15597 +       dpaa2_fl_set_addr(in_fle, dma_addr_in);
15598 +       dpaa2_fl_set_len(in_fle, keylen);
15599 +       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
15600 +       dpaa2_fl_set_addr(out_fle, dma_addr_out);
15601 +       dpaa2_fl_set_len(out_fle, adata->keylen_pad);
15602 +
15603 +#ifdef DEBUG
15604 +       print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
15605 +                      DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
15606 +       print_hex_dump(KERN_ERR, "desc@" __stringify(__LINE__)": ",
15607 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
15608 +#endif
15609 +
15610 +       result.err = 0;
15611 +       init_completion(&result.completion);
15612 +       result.dev = dev;
15613 +
15614 +       req_ctx->flc = flc;
15615 +       req_ctx->cbk = split_key_sh_done;
15616 +       req_ctx->ctx = &result;
15617 +
15618 +       ret = dpaa2_caam_enqueue(dev, req_ctx);
15619 +       if (ret == -EINPROGRESS) {
15620 +               /* in progress */
15621 +               wait_for_completion(&result.completion);
15622 +               ret = result.err;
15623 +#ifdef DEBUG
15624 +               print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
15625 +                              DUMP_PREFIX_ADDRESS, 16, 4, key_out,
15626 +                              adata->keylen_pad, 1);
15627 +#endif
15628 +       }
15629 +
15630 +       dma_unmap_single(dev, flc->flc_dma, sizeof(flc->flc) + desc_bytes(desc),
15631 +                        DMA_TO_DEVICE);
15632 +err_flc_dma:
15633 +       dma_unmap_single(dev, dma_addr_out, adata->keylen_pad, DMA_FROM_DEVICE);
15634 +err_dma_addr_out:
15635 +       dma_unmap_single(dev, dma_addr_in, keylen, DMA_TO_DEVICE);
15636 +err_dma_addr_in:
15637 +       kfree(flc);
15638 +err_flc:
15639 +       kfree(req_ctx);
15640 +       return ret;
15641 +}
15642 +
15643 +static int gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
15644 +                             u32 authkeylen)
15645 +{
15646 +       return gen_split_key_sh(ctx->dev, ctx->key, &ctx->adata, key_in,
15647 +                               authkeylen);
15648 +}
15649 +
15650 +static int aead_setkey(struct crypto_aead *aead, const u8 *key,
15651 +                      unsigned int keylen)
15652 +{
15653 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
15654 +       struct device *dev = ctx->dev;
15655 +       struct crypto_authenc_keys keys;
15656 +       int ret;
15657 +
15658 +       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
15659 +               goto badkey;
15660 +
15661 +#ifdef DEBUG
15662 +       dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
15663 +               keys.authkeylen + keys.enckeylen, keys.enckeylen,
15664 +               keys.authkeylen);
15665 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
15666 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
15667 +#endif
15668 +
15669 +       ctx->adata.keylen = split_key_len(ctx->adata.algtype &
15670 +                                         OP_ALG_ALGSEL_MASK);
15671 +       ctx->adata.keylen_pad = split_key_pad_len(ctx->adata.algtype &
15672 +                                                 OP_ALG_ALGSEL_MASK);
15673 +
15674 +#ifdef DEBUG
15675 +       dev_err(dev, "split keylen %d split keylen padded %d\n",
15676 +               ctx->adata.keylen, ctx->adata.keylen_pad);
15677 +       print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
15678 +                      DUMP_PREFIX_ADDRESS, 16, 4, keys.authkey, keylen, 1);
15679 +#endif
15680 +
15681 +       if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
15682 +               goto badkey;
15683 +
15684 +       ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
15685 +       if (ret)
15686 +               goto badkey;
15687 +
15688 +       /* postpend encryption key to auth split key */
15689 +       memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
15690 +
15691 +       ctx->key_dma = dma_map_single(dev, ctx->key, ctx->adata.keylen_pad +
15692 +                                     keys.enckeylen, DMA_TO_DEVICE);
15693 +       if (dma_mapping_error(dev, ctx->key_dma)) {
15694 +               dev_err(dev, "unable to map key i/o memory\n");
15695 +               return -ENOMEM;
15696 +       }
15697 +#ifdef DEBUG
15698 +       print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
15699 +                      DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
15700 +                      ctx->adata.keylen_pad + keys.enckeylen, 1);
15701 +#endif
15702 +
15703 +       ctx->cdata.keylen = keys.enckeylen;
15704 +
15705 +       ret = aead_set_sh_desc(aead);
15706 +       if (ret)
15707 +               dma_unmap_single(dev, ctx->key_dma, ctx->adata.keylen_pad +
15708 +                                keys.enckeylen, DMA_TO_DEVICE);
15709 +
15710 +       return ret;
15711 +badkey:
15712 +       crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
15713 +       return -EINVAL;
15714 +}
15715 +
15716 +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
15717 +                                          bool encrypt)
15718 +{
15719 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
15720 +       struct caam_request *req_ctx = aead_request_ctx(req);
15721 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
15722 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
15723 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
15724 +       struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
15725 +                                                typeof(*alg), aead);
15726 +       struct device *dev = ctx->dev;
15727 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
15728 +                     GFP_KERNEL : GFP_ATOMIC;
15729 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
15730 +       struct aead_edesc *edesc;
15731 +       dma_addr_t qm_sg_dma, iv_dma = 0;
15732 +       int ivsize = 0;
15733 +       unsigned int authsize = ctx->authsize;
15734 +       int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
15735 +       int in_len, out_len;
15736 +       struct dpaa2_sg_entry *sg_table;
15737 +       enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
15738 +
15739 +       /* allocate space for base edesc and link tables */
15740 +       edesc = qi_cache_zalloc(GFP_DMA | flags);
15741 +       if (unlikely(!edesc)) {
15742 +               dev_err(dev, "could not allocate extended descriptor\n");
15743 +               return ERR_PTR(-ENOMEM);
15744 +       }
15745 +
15746 +       if (unlikely(req->dst != req->src)) {
15747 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
15748 +                                            req->cryptlen);
15749 +               if (unlikely(src_nents < 0)) {
15750 +                       dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15751 +                               req->assoclen + req->cryptlen);
15752 +                       qi_cache_free(edesc);
15753 +                       return ERR_PTR(src_nents);
15754 +               }
15755 +
15756 +               dst_nents = sg_nents_for_len(req->dst, req->assoclen +
15757 +                                            req->cryptlen +
15758 +                                            (encrypt ? authsize :
15759 +                                                       (-authsize)));
15760 +               if (unlikely(dst_nents < 0)) {
15761 +                       dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
15762 +                               req->assoclen + req->cryptlen +
15763 +                               (encrypt ? authsize : (-authsize)));
15764 +                       qi_cache_free(edesc);
15765 +                       return ERR_PTR(dst_nents);
15766 +               }
15767 +
15768 +               if (src_nents) {
15769 +                       mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
15770 +                                                     DMA_TO_DEVICE);
15771 +                       if (unlikely(!mapped_src_nents)) {
15772 +                               dev_err(dev, "unable to map source\n");
15773 +                               qi_cache_free(edesc);
15774 +                               return ERR_PTR(-ENOMEM);
15775 +                       }
15776 +               } else {
15777 +                       mapped_src_nents = 0;
15778 +               }
15779 +
15780 +               mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
15781 +                                             DMA_FROM_DEVICE);
15782 +               if (unlikely(!mapped_dst_nents)) {
15783 +                       dev_err(dev, "unable to map destination\n");
15784 +                       dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
15785 +                       qi_cache_free(edesc);
15786 +                       return ERR_PTR(-ENOMEM);
15787 +               }
15788 +       } else {
15789 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
15790 +                                            req->cryptlen +
15791 +                                               (encrypt ? authsize : 0));
15792 +               if (unlikely(src_nents < 0)) {
15793 +                       dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15794 +                               req->assoclen + req->cryptlen +
15795 +                               (encrypt ? authsize : 0));
15796 +                       qi_cache_free(edesc);
15797 +                       return ERR_PTR(src_nents);
15798 +               }
15799 +
15800 +               mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
15801 +                                             DMA_BIDIRECTIONAL);
15802 +               if (unlikely(!mapped_src_nents)) {
15803 +                       dev_err(dev, "unable to map source\n");
15804 +                       qi_cache_free(edesc);
15805 +                       return ERR_PTR(-ENOMEM);
15806 +               }
15807 +       }
15808 +
15809 +       if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
15810 +               ivsize = crypto_aead_ivsize(aead);
15811 +               iv_dma = dma_map_single(dev, req->iv, ivsize, DMA_TO_DEVICE);
15812 +               if (dma_mapping_error(dev, iv_dma)) {
15813 +                       dev_err(dev, "unable to map IV\n");
15814 +                       caam_unmap(dev, req->src, req->dst, src_nents,
15815 +                                  dst_nents, 0, 0, op_type, 0, 0);
15816 +                       qi_cache_free(edesc);
15817 +                       return ERR_PTR(-ENOMEM);
15818 +               }
15819 +       }
15820 +
15821 +       /*
15822 +        * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
15823 +        * Input is not contiguous.
15824 +        */
15825 +       qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
15826 +                     (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
15827 +       if (unlikely(qm_sg_nents > CAAM_QI_MAX_AEAD_SG)) {
15828 +               dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
15829 +                       qm_sg_nents, CAAM_QI_MAX_AEAD_SG);
15830 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
15831 +                          iv_dma, ivsize, op_type, 0, 0);
15832 +               qi_cache_free(edesc);
15833 +               return ERR_PTR(-ENOMEM);
15834 +       }
15835 +       sg_table = &edesc->sgt[0];
15836 +       qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
15837 +
15838 +       edesc->src_nents = src_nents;
15839 +       edesc->dst_nents = dst_nents;
15840 +       edesc->iv_dma = iv_dma;
15841 +
15842 +       edesc->assoclen_dma = dma_map_single(dev, &req->assoclen, 4,
15843 +                                            DMA_TO_DEVICE);
15844 +       if (dma_mapping_error(dev, edesc->assoclen_dma)) {
15845 +               dev_err(dev, "unable to map assoclen\n");
15846 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
15847 +                          iv_dma, ivsize, op_type, 0, 0);
15848 +               qi_cache_free(edesc);
15849 +               return ERR_PTR(-ENOMEM);
15850 +       }
15851 +
15852 +       dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
15853 +       qm_sg_index++;
15854 +       if (ivsize) {
15855 +               dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
15856 +               qm_sg_index++;
15857 +       }
15858 +       sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
15859 +       qm_sg_index += mapped_src_nents;
15860 +
15861 +       if (mapped_dst_nents > 1)
15862 +               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
15863 +                                qm_sg_index, 0);
15864 +
15865 +       qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
15866 +       if (dma_mapping_error(dev, qm_sg_dma)) {
15867 +               dev_err(dev, "unable to map S/G table\n");
15868 +               dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
15869 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
15870 +                          iv_dma, ivsize, op_type, 0, 0);
15871 +               qi_cache_free(edesc);
15872 +               return ERR_PTR(-ENOMEM);
15873 +       }
15874 +
15875 +       edesc->qm_sg_dma = qm_sg_dma;
15876 +       edesc->qm_sg_bytes = qm_sg_bytes;
15877 +
15878 +       out_len = req->assoclen + req->cryptlen +
15879 +                 (encrypt ? ctx->authsize : (-ctx->authsize));
15880 +       in_len = 4 + ivsize + req->assoclen + req->cryptlen;
15881 +
15882 +       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
15883 +       dpaa2_fl_set_final(in_fle, true);
15884 +       dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
15885 +       dpaa2_fl_set_addr(in_fle, qm_sg_dma);
15886 +       dpaa2_fl_set_len(in_fle, in_len);
15887 +
15888 +       if (req->dst == req->src) {
15889 +               if (mapped_src_nents == 1) {
15890 +                       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
15891 +                       dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
15892 +               } else {
15893 +                       dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
15894 +                       dpaa2_fl_set_addr(out_fle, qm_sg_dma +
15895 +                                         (1 + !!ivsize) * sizeof(*sg_table));
15896 +               }
15897 +       } else if (mapped_dst_nents == 1) {
15898 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
15899 +               dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
15900 +       } else {
15901 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
15902 +               dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
15903 +                                 sizeof(*sg_table));
15904 +       }
15905 +
15906 +       dpaa2_fl_set_len(out_fle, out_len);
15907 +
15908 +       return edesc;
15909 +}
15910 +
15911 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req,
15912 +                                        bool encrypt)
15913 +{
15914 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
15915 +       unsigned int blocksize = crypto_aead_blocksize(tls);
15916 +       unsigned int padsize, authsize;
15917 +       struct caam_request *req_ctx = aead_request_ctx(req);
15918 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
15919 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
15920 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
15921 +       struct caam_aead_alg *alg = container_of(crypto_aead_alg(tls),
15922 +                                                typeof(*alg), aead);
15923 +       struct device *dev = ctx->dev;
15924 +       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
15925 +                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
15926 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
15927 +       struct tls_edesc *edesc;
15928 +       dma_addr_t qm_sg_dma, iv_dma = 0;
15929 +       int ivsize = 0;
15930 +       int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
15931 +       int in_len, out_len;
15932 +       struct dpaa2_sg_entry *sg_table;
15933 +       enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
15934 +       struct scatterlist *dst;
15935 +
15936 +       if (encrypt) {
15937 +               padsize = blocksize - ((req->cryptlen + ctx->authsize) %
15938 +                                       blocksize);
15939 +               authsize = ctx->authsize + padsize;
15940 +       } else {
15941 +               authsize = ctx->authsize;
15942 +       }
15943 +
15944 +       /* allocate space for base edesc and link tables */
15945 +       edesc = qi_cache_zalloc(GFP_DMA | flags);
15946 +       if (unlikely(!edesc)) {
15947 +               dev_err(dev, "could not allocate extended descriptor\n");
15948 +               return ERR_PTR(-ENOMEM);
15949 +       }
15950 +
15951 +       if (likely(req->src == req->dst)) {
15952 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
15953 +                                            req->cryptlen +
15954 +                                            (encrypt ? authsize : 0));
15955 +               if (unlikely(src_nents < 0)) {
15956 +                       dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15957 +                               req->assoclen + req->cryptlen +
15958 +                               (encrypt ? authsize : 0));
15959 +                       qi_cache_free(edesc);
15960 +                       return ERR_PTR(src_nents);
15961 +               }
15962 +
15963 +               mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
15964 +                                             DMA_BIDIRECTIONAL);
15965 +               if (unlikely(!mapped_src_nents)) {
15966 +                       dev_err(dev, "unable to map source\n");
15967 +                       qi_cache_free(edesc);
15968 +                       return ERR_PTR(-ENOMEM);
15969 +               }
15970 +               dst = req->dst;
15971 +       } else {
15972 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
15973 +                                            req->cryptlen);
15974 +               if (unlikely(src_nents < 0)) {
15975 +                       dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15976 +                               req->assoclen + req->cryptlen);
15977 +                       qi_cache_free(edesc);
15978 +                       return ERR_PTR(src_nents);
15979 +               }
15980 +
15981 +               dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
15982 +               dst_nents = sg_nents_for_len(dst, req->cryptlen +
15983 +                                            (encrypt ? authsize : 0));
15984 +               if (unlikely(dst_nents < 0)) {
15985 +                       dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
15986 +                               req->cryptlen +
15987 +                               (encrypt ? authsize : 0));
15988 +                       qi_cache_free(edesc);
15989 +                       return ERR_PTR(dst_nents);
15990 +               }
15991 +
15992 +               if (src_nents) {
15993 +                       mapped_src_nents = dma_map_sg(dev, req->src,
15994 +                                                     src_nents, DMA_TO_DEVICE);
15995 +                       if (unlikely(!mapped_src_nents)) {
15996 +                               dev_err(dev, "unable to map source\n");
15997 +                               qi_cache_free(edesc);
15998 +                               return ERR_PTR(-ENOMEM);
15999 +                       }
16000 +               } else {
16001 +                       mapped_src_nents = 0;
16002 +               }
16003 +
16004 +               mapped_dst_nents = dma_map_sg(dev, dst, dst_nents,
16005 +                                             DMA_FROM_DEVICE);
16006 +               if (unlikely(!mapped_dst_nents)) {
16007 +                       dev_err(dev, "unable to map destination\n");
16008 +                       dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
16009 +                       qi_cache_free(edesc);
16010 +                       return ERR_PTR(-ENOMEM);
16011 +               }
16012 +       }
16013 +
16014 +       ivsize = crypto_aead_ivsize(tls);
16015 +       iv_dma = dma_map_single(dev, req->iv, ivsize, DMA_TO_DEVICE);
16016 +       if (dma_mapping_error(dev, iv_dma)) {
16017 +               dev_err(dev, "unable to map IV\n");
16018 +               caam_unmap(dev, req->src, dst, src_nents, dst_nents, 0, 0,
16019 +                          op_type, 0, 0);
16020 +               qi_cache_free(edesc);
16021 +               return ERR_PTR(-ENOMEM);
16022 +       }
16023 +
16024 +       /*
16025 +        * Create S/G table: IV, src, dst.
16026 +        * Input is not contiguous.
16027 +        */
16028 +       qm_sg_ents = 1 + mapped_src_nents +
16029 +                    (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
16030 +       sg_table = &edesc->sgt[0];
16031 +       qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
16032 +
16033 +       edesc->src_nents = src_nents;
16034 +       edesc->dst_nents = dst_nents;
16035 +       edesc->dst = dst;
16036 +       edesc->iv_dma = iv_dma;
16037 +
16038 +       dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
16039 +       qm_sg_index = 1;
16040 +
16041 +       sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
16042 +       qm_sg_index += mapped_src_nents;
16043 +
16044 +       if (mapped_dst_nents > 1)
16045 +               sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
16046 +                                qm_sg_index, 0);
16047 +
16048 +       qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
16049 +       if (dma_mapping_error(dev, qm_sg_dma)) {
16050 +               dev_err(dev, "unable to map S/G table\n");
16051 +               caam_unmap(dev, req->src, dst, src_nents, dst_nents, iv_dma,
16052 +                          ivsize, op_type, 0, 0);
16053 +               qi_cache_free(edesc);
16054 +               return ERR_PTR(-ENOMEM);
16055 +       }
16056 +
16057 +       edesc->qm_sg_dma = qm_sg_dma;
16058 +       edesc->qm_sg_bytes = qm_sg_bytes;
16059 +
16060 +       out_len = req->cryptlen + (encrypt ? authsize : 0);
16061 +       in_len = ivsize + req->assoclen + req->cryptlen;
16062 +
16063 +       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
16064 +       dpaa2_fl_set_final(in_fle, true);
16065 +       dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
16066 +       dpaa2_fl_set_addr(in_fle, qm_sg_dma);
16067 +       dpaa2_fl_set_len(in_fle, in_len);
16068 +
16069 +       if (req->dst == req->src) {
16070 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16071 +               dpaa2_fl_set_addr(out_fle, qm_sg_dma +
16072 +                                 (sg_nents_for_len(req->src, req->assoclen) +
16073 +                                  1) * sizeof(*sg_table));
16074 +       } else if (mapped_dst_nents == 1) {
16075 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16076 +               dpaa2_fl_set_addr(out_fle, sg_dma_address(dst));
16077 +       } else {
16078 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16079 +               dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
16080 +                                 sizeof(*sg_table));
16081 +       }
16082 +
16083 +       dpaa2_fl_set_len(out_fle, out_len);
16084 +
16085 +       return edesc;
16086 +}
16087 +
16088 +static int tls_set_sh_desc(struct crypto_aead *tls)
16089 +{
16090 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
16091 +       unsigned int ivsize = crypto_aead_ivsize(tls);
16092 +       unsigned int blocksize = crypto_aead_blocksize(tls);
16093 +       struct device *dev = ctx->dev;
16094 +       struct caam_flc *flc;
16095 +       u32 *desc;
16096 +       unsigned int assoclen = 13; /* always 13 bytes for TLS */
16097 +       unsigned int data_len[2];
16098 +       u32 inl_mask;
16099 +
16100 +       if (!ctx->cdata.keylen || !ctx->authsize)
16101 +               return 0;
16102 +
16103 +       /*
16104 +        * TLS 1.0 encrypt shared descriptor
16105 +        * Job Descriptor and Shared Descriptor
16106 +        * must fit into the 64-word Descriptor h/w Buffer
16107 +        */
16108 +       data_len[0] = ctx->adata.keylen_pad;
16109 +       data_len[1] = ctx->cdata.keylen;
16110 +
16111 +       if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
16112 +                             &inl_mask, ARRAY_SIZE(data_len)) < 0)
16113 +               return -EINVAL;
16114 +
16115 +       if (inl_mask & 1)
16116 +               ctx->adata.key_virt = ctx->key;
16117 +       else
16118 +               ctx->adata.key_dma = ctx->key_dma;
16119 +
16120 +       if (inl_mask & 2)
16121 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
16122 +       else
16123 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
16124 +
16125 +       ctx->adata.key_inline = !!(inl_mask & 1);
16126 +       ctx->cdata.key_inline = !!(inl_mask & 2);
16127 +
16128 +       flc = &ctx->flc[ENCRYPT];
16129 +       desc = flc->sh_desc;
16130 +
16131 +       cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata,
16132 +                             assoclen, ivsize, ctx->authsize, blocksize);
16133 +
16134 +       flc->flc[1] = desc_len(desc);
16135 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16136 +                                     desc_bytes(desc), DMA_TO_DEVICE);
16137 +
16138 +       if (dma_mapping_error(dev, flc->flc_dma)) {
16139 +               dev_err(dev, "unable to map shared descriptor\n");
16140 +               return -ENOMEM;
16141 +       }
16142 +
16143 +       /*
16144 +        * TLS 1.0 decrypt shared descriptor
16145 +        * Keys do not fit inline, regardless of algorithms used
16146 +        */
16147 +       ctx->adata.key_dma = ctx->key_dma;
16148 +       ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
16149 +
16150 +       flc = &ctx->flc[DECRYPT];
16151 +       desc = flc->sh_desc;
16152 +
16153 +       cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize,
16154 +                             ctx->authsize, blocksize);
16155 +
16156 +       flc->flc[1] = desc_len(desc); /* SDL */
16157 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16158 +                                     desc_bytes(desc), DMA_TO_DEVICE);
16159 +       if (dma_mapping_error(dev, flc->flc_dma)) {
16160 +               dev_err(dev, "unable to map shared descriptor\n");
16161 +               return -ENOMEM;
16162 +       }
16163 +
16164 +       return 0;
16165 +}
16166 +
16167 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
16168 +                     unsigned int keylen)
16169 +{
16170 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
16171 +       struct device *dev = ctx->dev;
16172 +       struct crypto_authenc_keys keys;
16173 +       int ret;
16174 +
16175 +       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
16176 +               goto badkey;
16177 +
16178 +#ifdef DEBUG
16179 +       dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
16180 +               keys.authkeylen + keys.enckeylen, keys.enckeylen,
16181 +               keys.authkeylen);
16182 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16183 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16184 +#endif
16185 +
16186 +       ctx->adata.keylen = split_key_len(ctx->adata.algtype &
16187 +                                         OP_ALG_ALGSEL_MASK);
16188 +       ctx->adata.keylen_pad = split_key_pad_len(ctx->adata.algtype &
16189 +                                                 OP_ALG_ALGSEL_MASK);
16190 +
16191 +#ifdef DEBUG
16192 +       dev_err(dev, "split keylen %d split keylen padded %d\n",
16193 +               ctx->adata.keylen, ctx->adata.keylen_pad);
16194 +       print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
16195 +                      DUMP_PREFIX_ADDRESS, 16, 4, keys.authkey,
16196 +                      keys.authkeylen + keys.enckeylen, 1);
16197 +#endif
16198 +
16199 +       if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
16200 +               goto badkey;
16201 +
16202 +       ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
16203 +       if (ret)
16204 +               goto badkey;
16205 +
16206 +       /* postpend encryption key to auth split key */
16207 +       memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
16208 +
16209 +       ctx->key_dma = dma_map_single(dev, ctx->key, ctx->adata.keylen_pad +
16210 +                                     keys.enckeylen, DMA_TO_DEVICE);
16211 +       if (dma_mapping_error(dev, ctx->key_dma)) {
16212 +               dev_err(dev, "unable to map key i/o memory\n");
16213 +               return -ENOMEM;
16214 +       }
16215 +#ifdef DEBUG
16216 +       print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
16217 +                      DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
16218 +                      ctx->adata.keylen_pad + keys.enckeylen, 1);
16219 +#endif
16220 +
16221 +       ctx->cdata.keylen = keys.enckeylen;
16222 +
16223 +       ret = tls_set_sh_desc(tls);
16224 +       if (ret)
16225 +               dma_unmap_single(dev, ctx->key_dma, ctx->adata.keylen_pad +
16226 +                                keys.enckeylen, DMA_TO_DEVICE);
16227 +
16228 +       return ret;
16229 +badkey:
16230 +       crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
16231 +       return -EINVAL;
16232 +}
16233 +
16234 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
16235 +{
16236 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
16237 +
16238 +       ctx->authsize = authsize;
16239 +       tls_set_sh_desc(tls);
16240 +
16241 +       return 0;
16242 +}
16243 +
16244 +static int gcm_set_sh_desc(struct crypto_aead *aead)
16245 +{
16246 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
16247 +       struct device *dev = ctx->dev;
16248 +       unsigned int ivsize = crypto_aead_ivsize(aead);
16249 +       struct caam_flc *flc;
16250 +       u32 *desc;
16251 +       int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16252 +                       ctx->cdata.keylen;
16253 +
16254 +       if (!ctx->cdata.keylen || !ctx->authsize)
16255 +               return 0;
16256 +
16257 +       /*
16258 +        * AES GCM encrypt shared descriptor
16259 +        * Job Descriptor and Shared Descriptor
16260 +        * must fit into the 64-word Descriptor h/w Buffer
16261 +        */
16262 +       if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
16263 +               ctx->cdata.key_inline = true;
16264 +               ctx->cdata.key_virt = ctx->key;
16265 +       } else {
16266 +               ctx->cdata.key_inline = false;
16267 +               ctx->cdata.key_dma = ctx->key_dma;
16268 +       }
16269 +
16270 +       flc = &ctx->flc[ENCRYPT];
16271 +       desc = flc->sh_desc;
16272 +       cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
16273 +
16274 +       flc->flc[1] = desc_len(desc); /* SDL */
16275 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16276 +                                     desc_bytes(desc), DMA_TO_DEVICE);
16277 +       if (dma_mapping_error(dev, flc->flc_dma)) {
16278 +               dev_err(dev, "unable to map shared descriptor\n");
16279 +               return -ENOMEM;
16280 +       }
16281 +
16282 +       /*
16283 +        * Job Descriptor and Shared Descriptors
16284 +        * must all fit into the 64-word Descriptor h/w Buffer
16285 +        */
16286 +       if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
16287 +               ctx->cdata.key_inline = true;
16288 +               ctx->cdata.key_virt = ctx->key;
16289 +       } else {
16290 +               ctx->cdata.key_inline = false;
16291 +               ctx->cdata.key_dma = ctx->key_dma;
16292 +       }
16293 +
16294 +       flc = &ctx->flc[DECRYPT];
16295 +       desc = flc->sh_desc;
16296 +       cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
16297 +
16298 +       flc->flc[1] = desc_len(desc); /* SDL */
16299 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16300 +                                     desc_bytes(desc), DMA_TO_DEVICE);
16301 +       if (dma_mapping_error(dev, flc->flc_dma)) {
16302 +               dev_err(dev, "unable to map shared descriptor\n");
16303 +               return -ENOMEM;
16304 +       }
16305 +
16306 +       return 0;
16307 +}
16308 +
16309 +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
16310 +{
16311 +       struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16312 +
16313 +       ctx->authsize = authsize;
16314 +       gcm_set_sh_desc(authenc);
16315 +
16316 +       return 0;
16317 +}
16318 +
16319 +static int gcm_setkey(struct crypto_aead *aead,
16320 +                     const u8 *key, unsigned int keylen)
16321 +{
16322 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
16323 +       struct device *dev = ctx->dev;
16324 +       int ret;
16325 +
16326 +#ifdef DEBUG
16327 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16328 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16329 +#endif
16330 +
16331 +       memcpy(ctx->key, key, keylen);
16332 +       ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
16333 +       if (dma_mapping_error(dev, ctx->key_dma)) {
16334 +               dev_err(dev, "unable to map key i/o memory\n");
16335 +               return -ENOMEM;
16336 +       }
16337 +       ctx->cdata.keylen = keylen;
16338 +
16339 +       ret = gcm_set_sh_desc(aead);
16340 +       if (ret)
16341 +               dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
16342 +                                DMA_TO_DEVICE);
16343 +
16344 +       return ret;
16345 +}
16346 +
16347 +static int rfc4106_set_sh_desc(struct crypto_aead *aead)
16348 +{
16349 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
16350 +       struct device *dev = ctx->dev;
16351 +       unsigned int ivsize = crypto_aead_ivsize(aead);
16352 +       struct caam_flc *flc;
16353 +       u32 *desc;
16354 +       int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16355 +                       ctx->cdata.keylen;
16356 +
16357 +       if (!ctx->cdata.keylen || !ctx->authsize)
16358 +               return 0;
16359 +
16360 +       ctx->cdata.key_virt = ctx->key;
16361 +
16362 +       /*
16363 +        * RFC4106 encrypt shared descriptor
16364 +        * Job Descriptor and Shared Descriptor
16365 +        * must fit into the 64-word Descriptor h/w Buffer
16366 +        */
16367 +       if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
16368 +               ctx->cdata.key_inline = true;
16369 +       } else {
16370 +               ctx->cdata.key_inline = false;
16371 +               ctx->cdata.key_dma = ctx->key_dma;
16372 +       }
16373 +
16374 +       flc = &ctx->flc[ENCRYPT];
16375 +       desc = flc->sh_desc;
16376 +       cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
16377 +                                 true);
16378 +
16379 +       flc->flc[1] = desc_len(desc); /* SDL */
16380 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16381 +                                     desc_bytes(desc), DMA_TO_DEVICE);
16382 +       if (dma_mapping_error(dev, flc->flc_dma)) {
16383 +               dev_err(dev, "unable to map shared descriptor\n");
16384 +               return -ENOMEM;
16385 +       }
16386 +
16387 +       /*
16388 +        * Job Descriptor and Shared Descriptors
16389 +        * must all fit into the 64-word Descriptor h/w Buffer
16390 +        */
16391 +       if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
16392 +               ctx->cdata.key_inline = true;
16393 +       } else {
16394 +               ctx->cdata.key_inline = false;
16395 +               ctx->cdata.key_dma = ctx->key_dma;
16396 +       }
16397 +
16398 +       flc = &ctx->flc[DECRYPT];
16399 +       desc = flc->sh_desc;
16400 +       cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
16401 +                                 true);
16402 +
16403 +       flc->flc[1] = desc_len(desc); /* SDL */
16404 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16405 +                                     desc_bytes(desc), DMA_TO_DEVICE);
16406 +       if (dma_mapping_error(dev, flc->flc_dma)) {
16407 +               dev_err(dev, "unable to map shared descriptor\n");
16408 +               return -ENOMEM;
16409 +       }
16410 +
16411 +       return 0;
16412 +}
16413 +
16414 +static int rfc4106_setauthsize(struct crypto_aead *authenc,
16415 +                              unsigned int authsize)
16416 +{
16417 +       struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16418 +
16419 +       ctx->authsize = authsize;
16420 +       rfc4106_set_sh_desc(authenc);
16421 +
16422 +       return 0;
16423 +}
16424 +
16425 +static int rfc4106_setkey(struct crypto_aead *aead,
16426 +                         const u8 *key, unsigned int keylen)
16427 +{
16428 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
16429 +       struct device *dev = ctx->dev;
16430 +       int ret;
16431 +
16432 +       if (keylen < 4)
16433 +               return -EINVAL;
16434 +
16435 +#ifdef DEBUG
16436 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16437 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16438 +#endif
16439 +
16440 +       memcpy(ctx->key, key, keylen);
16441 +       /*
16442 +        * The last four bytes of the key material are used as the salt value
16443 +        * in the nonce. Update the AES key length.
16444 +        */
16445 +       ctx->cdata.keylen = keylen - 4;
16446 +       ctx->key_dma = dma_map_single(dev, ctx->key, ctx->cdata.keylen,
16447 +                                     DMA_TO_DEVICE);
16448 +       if (dma_mapping_error(dev, ctx->key_dma)) {
16449 +               dev_err(dev, "unable to map key i/o memory\n");
16450 +               return -ENOMEM;
16451 +       }
16452 +
16453 +       ret = rfc4106_set_sh_desc(aead);
16454 +       if (ret)
16455 +               dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
16456 +                                DMA_TO_DEVICE);
16457 +
16458 +       return ret;
16459 +}
16460 +
16461 +static int rfc4543_set_sh_desc(struct crypto_aead *aead)
16462 +{
16463 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
16464 +       struct device *dev = ctx->dev;
16465 +       unsigned int ivsize = crypto_aead_ivsize(aead);
16466 +       struct caam_flc *flc;
16467 +       u32 *desc;
16468 +       int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16469 +                       ctx->cdata.keylen;
16470 +
16471 +       if (!ctx->cdata.keylen || !ctx->authsize)
16472 +               return 0;
16473 +
16474 +       ctx->cdata.key_virt = ctx->key;
16475 +
16476 +       /*
16477 +        * RFC4543 encrypt shared descriptor
16478 +        * Job Descriptor and Shared Descriptor
16479 +        * must fit into the 64-word Descriptor h/w Buffer
16480 +        */
16481 +       if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
16482 +               ctx->cdata.key_inline = true;
16483 +       } else {
16484 +               ctx->cdata.key_inline = false;
16485 +               ctx->cdata.key_dma = ctx->key_dma;
16486 +       }
16487 +
16488 +       flc = &ctx->flc[ENCRYPT];
16489 +       desc = flc->sh_desc;
16490 +       cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
16491 +                                 true);
16492 +
16493 +       flc->flc[1] = desc_len(desc); /* SDL */
16494 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16495 +                                     desc_bytes(desc), DMA_TO_DEVICE);
16496 +       if (dma_mapping_error(dev, flc->flc_dma)) {
16497 +               dev_err(dev, "unable to map shared descriptor\n");
16498 +               return -ENOMEM;
16499 +       }
16500 +
16501 +       /*
16502 +        * Job Descriptor and Shared Descriptors
16503 +        * must all fit into the 64-word Descriptor h/w Buffer
16504 +        */
16505 +       if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
16506 +               ctx->cdata.key_inline = true;
16507 +       } else {
16508 +               ctx->cdata.key_inline = false;
16509 +               ctx->cdata.key_dma = ctx->key_dma;
16510 +       }
16511 +
16512 +       flc = &ctx->flc[DECRYPT];
16513 +       desc = flc->sh_desc;
16514 +       cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
16515 +                                 true);
16516 +
16517 +       flc->flc[1] = desc_len(desc); /* SDL */
16518 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16519 +                                     desc_bytes(desc), DMA_TO_DEVICE);
16520 +       if (dma_mapping_error(dev, flc->flc_dma)) {
16521 +               dev_err(dev, "unable to map shared descriptor\n");
16522 +               return -ENOMEM;
16523 +       }
16524 +
16525 +       return 0;
16526 +}
16527 +
16528 +static int rfc4543_setauthsize(struct crypto_aead *authenc,
16529 +                              unsigned int authsize)
16530 +{
16531 +       struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16532 +
16533 +       ctx->authsize = authsize;
16534 +       rfc4543_set_sh_desc(authenc);
16535 +
16536 +       return 0;
16537 +}
16538 +
16539 +static int rfc4543_setkey(struct crypto_aead *aead,
16540 +                         const u8 *key, unsigned int keylen)
16541 +{
16542 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
16543 +       struct device *dev = ctx->dev;
16544 +       int ret;
16545 +
16546 +       if (keylen < 4)
16547 +               return -EINVAL;
16548 +
16549 +#ifdef DEBUG
16550 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16551 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16552 +#endif
16553 +
16554 +       memcpy(ctx->key, key, keylen);
16555 +       /*
16556 +        * The last four bytes of the key material are used as the salt value
16557 +        * in the nonce. Update the AES key length.
16558 +        */
16559 +       ctx->cdata.keylen = keylen - 4;
16560 +       ctx->key_dma = dma_map_single(dev, ctx->key, ctx->cdata.keylen,
16561 +                                     DMA_TO_DEVICE);
16562 +       if (dma_mapping_error(dev, ctx->key_dma)) {
16563 +               dev_err(dev, "unable to map key i/o memory\n");
16564 +               return -ENOMEM;
16565 +       }
16566 +
16567 +       ret = rfc4543_set_sh_desc(aead);
16568 +       if (ret)
16569 +               dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
16570 +                                DMA_TO_DEVICE);
16571 +
16572 +       return ret;
16573 +}
16574 +
16575 +static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
16576 +                            const u8 *key, unsigned int keylen)
16577 +{
16578 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16579 +       struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
16580 +       const char *alg_name = crypto_tfm_alg_name(tfm);
16581 +       struct device *dev = ctx->dev;
16582 +       struct caam_flc *flc;
16583 +       unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16584 +       u32 *desc;
16585 +       u32 ctx1_iv_off = 0;
16586 +       const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
16587 +                              OP_ALG_AAI_CTR_MOD128);
16588 +       const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
16589 +
16590 +       memcpy(ctx->key, key, keylen);
16591 +#ifdef DEBUG
16592 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16593 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16594 +#endif
16595 +       /*
16596 +        * AES-CTR needs to load IV in CONTEXT1 reg
16597 +        * at an offset of 128bits (16bytes)
16598 +        * CONTEXT1[255:128] = IV
16599 +        */
16600 +       if (ctr_mode)
16601 +               ctx1_iv_off = 16;
16602 +
16603 +       /*
16604 +        * RFC3686 specific:
16605 +        *      | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
16606 +        *      | *key = {KEY, NONCE}
16607 +        */
16608 +       if (is_rfc3686) {
16609 +               ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
16610 +               keylen -= CTR_RFC3686_NONCE_SIZE;
16611 +       }
16612 +
16613 +       ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
16614 +       if (dma_mapping_error(dev, ctx->key_dma)) {
16615 +               dev_err(dev, "unable to map key i/o memory\n");
16616 +               return -ENOMEM;
16617 +       }
16618 +       ctx->cdata.keylen = keylen;
16619 +       ctx->cdata.key_virt = ctx->key;
16620 +       ctx->cdata.key_inline = true;
16621 +
16622 +       /* ablkcipher_encrypt shared descriptor */
16623 +       flc = &ctx->flc[ENCRYPT];
16624 +       desc = flc->sh_desc;
16625 +
16626 +       cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize,
16627 +                                    is_rfc3686, ctx1_iv_off);
16628 +
16629 +       flc->flc[1] = desc_len(desc); /* SDL */
16630 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16631 +                                     desc_bytes(desc), DMA_TO_DEVICE);
16632 +       if (dma_mapping_error(dev, flc->flc_dma)) {
16633 +               dev_err(dev, "unable to map shared descriptor\n");
16634 +               return -ENOMEM;
16635 +       }
16636 +
16637 +       /* ablkcipher_decrypt shared descriptor */
16638 +       flc = &ctx->flc[DECRYPT];
16639 +       desc = flc->sh_desc;
16640 +
16641 +       cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize,
16642 +                                    is_rfc3686, ctx1_iv_off);
16643 +
16644 +       flc->flc[1] = desc_len(desc); /* SDL */
16645 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16646 +                                     desc_bytes(desc), DMA_TO_DEVICE);
16647 +       if (dma_mapping_error(dev, flc->flc_dma)) {
16648 +               dev_err(dev, "unable to map shared descriptor\n");
16649 +               return -ENOMEM;
16650 +       }
16651 +
16652 +       /* ablkcipher_givencrypt shared descriptor */
16653 +       flc = &ctx->flc[GIVENCRYPT];
16654 +       desc = flc->sh_desc;
16655 +
16656 +       cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata,
16657 +                                       ivsize, is_rfc3686, ctx1_iv_off);
16658 +
16659 +       flc->flc[1] = desc_len(desc); /* SDL */
16660 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16661 +                                     desc_bytes(desc), DMA_TO_DEVICE);
16662 +       if (dma_mapping_error(dev, flc->flc_dma)) {
16663 +               dev_err(dev, "unable to map shared descriptor\n");
16664 +               return -ENOMEM;
16665 +       }
16666 +
16667 +       return 0;
16668 +}
16669 +
16670 +static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
16671 +                                const u8 *key, unsigned int keylen)
16672 +{
16673 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16674 +       struct device *dev = ctx->dev;
16675 +       struct caam_flc *flc;
16676 +       u32 *desc;
16677 +
16678 +       if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
16679 +               dev_err(dev, "key size mismatch\n");
16680 +               crypto_ablkcipher_set_flags(ablkcipher,
16681 +                                           CRYPTO_TFM_RES_BAD_KEY_LEN);
16682 +               return -EINVAL;
16683 +       }
16684 +
16685 +       memcpy(ctx->key, key, keylen);
16686 +       ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
16687 +       if (dma_mapping_error(dev, ctx->key_dma)) {
16688 +               dev_err(dev, "unable to map key i/o memory\n");
16689 +               return -ENOMEM;
16690 +       }
16691 +       ctx->cdata.keylen = keylen;
16692 +       ctx->cdata.key_virt = ctx->key;
16693 +       ctx->cdata.key_inline = true;
16694 +
16695 +       /* xts_ablkcipher_encrypt shared descriptor */
16696 +       flc = &ctx->flc[ENCRYPT];
16697 +       desc = flc->sh_desc;
16698 +       cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
16699 +
16700 +       flc->flc[1] = desc_len(desc); /* SDL */
16701 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16702 +                                     desc_bytes(desc), DMA_TO_DEVICE);
16703 +       if (dma_mapping_error(dev, flc->flc_dma)) {
16704 +               dev_err(dev, "unable to map shared descriptor\n");
16705 +               return -ENOMEM;
16706 +       }
16707 +
16708 +       /* xts_ablkcipher_decrypt shared descriptor */
16709 +       flc = &ctx->flc[DECRYPT];
16710 +       desc = flc->sh_desc;
16711 +
16712 +       cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
16713 +
16714 +       flc->flc[1] = desc_len(desc); /* SDL */
16715 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16716 +                                     desc_bytes(desc), DMA_TO_DEVICE);
16717 +       if (dma_mapping_error(dev, flc->flc_dma)) {
16718 +               dev_err(dev, "unable to map shared descriptor\n");
16719 +               return -ENOMEM;
16720 +       }
16721 +
16722 +       return 0;
16723 +}
16724 +
16725 +static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
16726 +                                                      *req, bool encrypt)
16727 +{
16728 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
16729 +       struct caam_request *req_ctx = ablkcipher_request_ctx(req);
16730 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
16731 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
16732 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16733 +       struct device *dev = ctx->dev;
16734 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
16735 +                      GFP_KERNEL : GFP_ATOMIC;
16736 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
16737 +       struct ablkcipher_edesc *edesc;
16738 +       dma_addr_t iv_dma;
16739 +       bool in_contig;
16740 +       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16741 +       int dst_sg_idx, qm_sg_ents;
16742 +       struct dpaa2_sg_entry *sg_table;
16743 +       enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
16744 +
16745 +       src_nents = sg_nents_for_len(req->src, req->nbytes);
16746 +       if (unlikely(src_nents < 0)) {
16747 +               dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
16748 +                       req->nbytes);
16749 +               return ERR_PTR(src_nents);
16750 +       }
16751 +
16752 +       if (unlikely(req->dst != req->src)) {
16753 +               dst_nents = sg_nents_for_len(req->dst, req->nbytes);
16754 +               if (unlikely(dst_nents < 0)) {
16755 +                       dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
16756 +                               req->nbytes);
16757 +                       return ERR_PTR(dst_nents);
16758 +               }
16759 +
16760 +               mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16761 +                                             DMA_TO_DEVICE);
16762 +               if (unlikely(!mapped_src_nents)) {
16763 +                       dev_err(dev, "unable to map source\n");
16764 +                       return ERR_PTR(-ENOMEM);
16765 +               }
16766 +
16767 +               mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
16768 +                                             DMA_FROM_DEVICE);
16769 +               if (unlikely(!mapped_dst_nents)) {
16770 +                       dev_err(dev, "unable to map destination\n");
16771 +                       dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
16772 +                       return ERR_PTR(-ENOMEM);
16773 +               }
16774 +       } else {
16775 +               mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16776 +                                             DMA_BIDIRECTIONAL);
16777 +               if (unlikely(!mapped_src_nents)) {
16778 +                       dev_err(dev, "unable to map source\n");
16779 +                       return ERR_PTR(-ENOMEM);
16780 +               }
16781 +       }
16782 +
16783 +       iv_dma = dma_map_single(dev, req->info, ivsize, DMA_TO_DEVICE);
16784 +       if (dma_mapping_error(dev, iv_dma)) {
16785 +               dev_err(dev, "unable to map IV\n");
16786 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
16787 +                          0, 0, 0, 0);
16788 +               return ERR_PTR(-ENOMEM);
16789 +       }
16790 +
16791 +       if (mapped_src_nents == 1 &&
16792 +           iv_dma + ivsize == sg_dma_address(req->src)) {
16793 +               in_contig = true;
16794 +               qm_sg_ents = 0;
16795 +       } else {
16796 +               in_contig = false;
16797 +               qm_sg_ents = 1 + mapped_src_nents;
16798 +       }
16799 +       dst_sg_idx = qm_sg_ents;
16800 +
16801 +       qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
16802 +       if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
16803 +               dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
16804 +                       qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
16805 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16806 +                          iv_dma, ivsize, op_type, 0, 0);
16807 +               return ERR_PTR(-ENOMEM);
16808 +       }
16809 +
16810 +       /* allocate space for base edesc and link tables */
16811 +       edesc = qi_cache_zalloc(GFP_DMA | flags);
16812 +       if (unlikely(!edesc)) {
16813 +               dev_err(dev, "could not allocate extended descriptor\n");
16814 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16815 +                          iv_dma, ivsize, op_type, 0, 0);
16816 +               return ERR_PTR(-ENOMEM);
16817 +       }
16818 +
16819 +       edesc->src_nents = src_nents;
16820 +       edesc->dst_nents = dst_nents;
16821 +       edesc->iv_dma = iv_dma;
16822 +       sg_table = &edesc->sgt[0];
16823 +       edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
16824 +
16825 +       if (!in_contig) {
16826 +               dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
16827 +               sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
16828 +       }
16829 +
16830 +       if (mapped_dst_nents > 1)
16831 +               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
16832 +                                dst_sg_idx, 0);
16833 +
16834 +       edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
16835 +                                         DMA_TO_DEVICE);
16836 +       if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
16837 +               dev_err(dev, "unable to map S/G table\n");
16838 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16839 +                          iv_dma, ivsize, op_type, 0, 0);
16840 +               qi_cache_free(edesc);
16841 +               return ERR_PTR(-ENOMEM);
16842 +       }
16843 +
16844 +       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
16845 +       dpaa2_fl_set_final(in_fle, true);
16846 +       dpaa2_fl_set_len(in_fle, req->nbytes + ivsize);
16847 +       dpaa2_fl_set_len(out_fle, req->nbytes);
16848 +
16849 +       if (!in_contig) {
16850 +               dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
16851 +               dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
16852 +       } else {
16853 +               dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
16854 +               dpaa2_fl_set_addr(in_fle, iv_dma);
16855 +       }
16856 +
16857 +       if (req->src == req->dst) {
16858 +               if (!in_contig) {
16859 +                       dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16860 +                       dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
16861 +                                         sizeof(*sg_table));
16862 +               } else {
16863 +                       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16864 +                       dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
16865 +               }
16866 +       } else if (mapped_dst_nents > 1) {
16867 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16868 +               dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
16869 +                                 sizeof(*sg_table));
16870 +       } else {
16871 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16872 +               dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
16873 +       }
16874 +
16875 +       return edesc;
16876 +}
16877 +
16878 +static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
16879 +       struct skcipher_givcrypt_request *greq)
16880 +{
16881 +       struct ablkcipher_request *req = &greq->creq;
16882 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
16883 +       struct caam_request *req_ctx = ablkcipher_request_ctx(req);
16884 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
16885 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
16886 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16887 +       struct device *dev = ctx->dev;
16888 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
16889 +                      GFP_KERNEL : GFP_ATOMIC;
16890 +       int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
16891 +       struct ablkcipher_edesc *edesc;
16892 +       dma_addr_t iv_dma;
16893 +       bool out_contig;
16894 +       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16895 +       struct dpaa2_sg_entry *sg_table;
16896 +       int dst_sg_idx, qm_sg_ents;
16897 +
16898 +       src_nents = sg_nents_for_len(req->src, req->nbytes);
16899 +       if (unlikely(src_nents < 0)) {
16900 +               dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
16901 +                       req->nbytes);
16902 +               return ERR_PTR(src_nents);
16903 +       }
16904 +
16905 +       if (unlikely(req->dst != req->src)) {
16906 +               dst_nents = sg_nents_for_len(req->dst, req->nbytes);
16907 +               if (unlikely(dst_nents < 0)) {
16908 +                       dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
16909 +                               req->nbytes);
16910 +                       return ERR_PTR(dst_nents);
16911 +               }
16912 +
16913 +               mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16914 +                                             DMA_TO_DEVICE);
16915 +               if (unlikely(!mapped_src_nents)) {
16916 +                       dev_err(dev, "unable to map source\n");
16917 +                       return ERR_PTR(-ENOMEM);
16918 +               }
16919 +
16920 +               mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
16921 +                                             DMA_FROM_DEVICE);
16922 +               if (unlikely(!mapped_dst_nents)) {
16923 +                       dev_err(dev, "unable to map destination\n");
16924 +                       dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
16925 +                       return ERR_PTR(-ENOMEM);
16926 +               }
16927 +       } else {
16928 +               mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16929 +                                             DMA_BIDIRECTIONAL);
16930 +               if (unlikely(!mapped_src_nents)) {
16931 +                       dev_err(dev, "unable to map source\n");
16932 +                       return ERR_PTR(-ENOMEM);
16933 +               }
16934 +
16935 +               dst_nents = src_nents;
16936 +               mapped_dst_nents = src_nents;
16937 +       }
16938 +
16939 +       iv_dma = dma_map_single(dev, greq->giv, ivsize, DMA_FROM_DEVICE);
16940 +       if (dma_mapping_error(dev, iv_dma)) {
16941 +               dev_err(dev, "unable to map IV\n");
16942 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
16943 +                          0, 0, 0, 0);
16944 +               return ERR_PTR(-ENOMEM);
16945 +       }
16946 +
16947 +       qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
16948 +       dst_sg_idx = qm_sg_ents;
16949 +       if (mapped_dst_nents == 1 &&
16950 +           iv_dma + ivsize == sg_dma_address(req->dst)) {
16951 +               out_contig = true;
16952 +       } else {
16953 +               out_contig = false;
16954 +               qm_sg_ents += 1 + mapped_dst_nents;
16955 +       }
16956 +
16957 +       if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
16958 +               dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
16959 +                       qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
16960 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16961 +                          iv_dma, ivsize, GIVENCRYPT, 0, 0);
16962 +               return ERR_PTR(-ENOMEM);
16963 +       }
16964 +
16965 +       /* allocate space for base edesc and link tables */
16966 +       edesc = qi_cache_zalloc(GFP_DMA | flags);
16967 +       if (!edesc) {
16968 +               dev_err(dev, "could not allocate extended descriptor\n");
16969 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16970 +                          iv_dma, ivsize, GIVENCRYPT, 0, 0);
16971 +               return ERR_PTR(-ENOMEM);
16972 +       }
16973 +
16974 +       edesc->src_nents = src_nents;
16975 +       edesc->dst_nents = dst_nents;
16976 +       edesc->iv_dma = iv_dma;
16977 +       sg_table = &edesc->sgt[0];
16978 +       edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
16979 +
16980 +       if (mapped_src_nents > 1)
16981 +               sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
16982 +
16983 +       if (!out_contig) {
16984 +               dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
16985 +               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
16986 +                                dst_sg_idx + 1, 0);
16987 +       }
16988 +
16989 +       edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
16990 +                                         DMA_TO_DEVICE);
16991 +       if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
16992 +               dev_err(dev, "unable to map S/G table\n");
16993 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16994 +                          iv_dma, ivsize, GIVENCRYPT, 0, 0);
16995 +               qi_cache_free(edesc);
16996 +               return ERR_PTR(-ENOMEM);
16997 +       }
16998 +
16999 +       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
17000 +       dpaa2_fl_set_final(in_fle, true);
17001 +       dpaa2_fl_set_len(in_fle, req->nbytes);
17002 +       dpaa2_fl_set_len(out_fle, ivsize + req->nbytes);
17003 +
17004 +       if (mapped_src_nents > 1) {
17005 +               dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
17006 +               dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
17007 +       } else {
17008 +               dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
17009 +               dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
17010 +       }
17011 +
17012 +       if (!out_contig) {
17013 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
17014 +               dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
17015 +                                 sizeof(*sg_table));
17016 +       } else {
17017 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
17018 +               dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
17019 +       }
17020 +
17021 +       return edesc;
17022 +}
17023 +
17024 +static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
17025 +                      struct aead_request *req)
17026 +{
17027 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
17028 +       int ivsize = crypto_aead_ivsize(aead);
17029 +       struct caam_request *caam_req = aead_request_ctx(req);
17030 +
17031 +       caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
17032 +                  edesc->iv_dma, ivsize, caam_req->op_type,
17033 +                  edesc->qm_sg_dma, edesc->qm_sg_bytes);
17034 +       dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
17035 +}
17036 +
17037 +static void tls_unmap(struct device *dev, struct tls_edesc *edesc,
17038 +                     struct aead_request *req)
17039 +{
17040 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
17041 +       int ivsize = crypto_aead_ivsize(tls);
17042 +       struct caam_request *caam_req = aead_request_ctx(req);
17043 +
17044 +       caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
17045 +                  edesc->dst_nents, edesc->iv_dma, ivsize, caam_req->op_type,
17046 +                  edesc->qm_sg_dma, edesc->qm_sg_bytes);
17047 +}
17048 +
17049 +static void ablkcipher_unmap(struct device *dev,
17050 +                            struct ablkcipher_edesc *edesc,
17051 +                            struct ablkcipher_request *req)
17052 +{
17053 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17054 +       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
17055 +       struct caam_request *caam_req = ablkcipher_request_ctx(req);
17056 +
17057 +       caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
17058 +                  edesc->iv_dma, ivsize, caam_req->op_type,
17059 +                  edesc->qm_sg_dma, edesc->qm_sg_bytes);
17060 +}
17061 +
17062 +static void aead_encrypt_done(void *cbk_ctx, u32 status)
17063 +{
17064 +       struct crypto_async_request *areq = cbk_ctx;
17065 +       struct aead_request *req = container_of(areq, struct aead_request,
17066 +                                               base);
17067 +       struct caam_request *req_ctx = to_caam_req(areq);
17068 +       struct aead_edesc *edesc = req_ctx->edesc;
17069 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
17070 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
17071 +       int ecode = 0;
17072 +
17073 +#ifdef DEBUG
17074 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17075 +#endif
17076 +
17077 +       if (unlikely(status)) {
17078 +               caam_qi2_strstatus(ctx->dev, status);
17079 +               ecode = -EIO;
17080 +       }
17081 +
17082 +       aead_unmap(ctx->dev, edesc, req);
17083 +       qi_cache_free(edesc);
17084 +       aead_request_complete(req, ecode);
17085 +}
17086 +
17087 +static void aead_decrypt_done(void *cbk_ctx, u32 status)
17088 +{
17089 +       struct crypto_async_request *areq = cbk_ctx;
17090 +       struct aead_request *req = container_of(areq, struct aead_request,
17091 +                                               base);
17092 +       struct caam_request *req_ctx = to_caam_req(areq);
17093 +       struct aead_edesc *edesc = req_ctx->edesc;
17094 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
17095 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
17096 +       int ecode = 0;
17097 +
17098 +#ifdef DEBUG
17099 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17100 +#endif
17101 +
17102 +       if (unlikely(status)) {
17103 +               caam_qi2_strstatus(ctx->dev, status);
17104 +               /*
17105 +                * verify hw auth check passed else return -EBADMSG
17106 +                */
17107 +               if ((status & JRSTA_CCBERR_ERRID_MASK) ==
17108 +                    JRSTA_CCBERR_ERRID_ICVCHK)
17109 +                       ecode = -EBADMSG;
17110 +               else
17111 +                       ecode = -EIO;
17112 +       }
17113 +
17114 +       aead_unmap(ctx->dev, edesc, req);
17115 +       qi_cache_free(edesc);
17116 +       aead_request_complete(req, ecode);
17117 +}
17118 +
17119 +static int aead_encrypt(struct aead_request *req)
17120 +{
17121 +       struct aead_edesc *edesc;
17122 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
17123 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
17124 +       struct caam_request *caam_req = aead_request_ctx(req);
17125 +       int ret;
17126 +
17127 +       /* allocate extended descriptor */
17128 +       edesc = aead_edesc_alloc(req, true);
17129 +       if (IS_ERR(edesc))
17130 +               return PTR_ERR(edesc);
17131 +
17132 +       caam_req->flc = &ctx->flc[ENCRYPT];
17133 +       caam_req->op_type = ENCRYPT;
17134 +       caam_req->cbk = aead_encrypt_done;
17135 +       caam_req->ctx = &req->base;
17136 +       caam_req->edesc = edesc;
17137 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17138 +       if (ret != -EINPROGRESS &&
17139 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17140 +               aead_unmap(ctx->dev, edesc, req);
17141 +               qi_cache_free(edesc);
17142 +       }
17143 +
17144 +       return ret;
17145 +}
17146 +
17147 +static int aead_decrypt(struct aead_request *req)
17148 +{
17149 +       struct aead_edesc *edesc;
17150 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
17151 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
17152 +       struct caam_request *caam_req = aead_request_ctx(req);
17153 +       int ret;
17154 +
17155 +       /* allocate extended descriptor */
17156 +       edesc = aead_edesc_alloc(req, false);
17157 +       if (IS_ERR(edesc))
17158 +               return PTR_ERR(edesc);
17159 +
17160 +       caam_req->flc = &ctx->flc[DECRYPT];
17161 +       caam_req->op_type = DECRYPT;
17162 +       caam_req->cbk = aead_decrypt_done;
17163 +       caam_req->ctx = &req->base;
17164 +       caam_req->edesc = edesc;
17165 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17166 +       if (ret != -EINPROGRESS &&
17167 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17168 +               aead_unmap(ctx->dev, edesc, req);
17169 +               qi_cache_free(edesc);
17170 +       }
17171 +
17172 +       return ret;
17173 +}
17174 +
17175 +static void tls_encrypt_done(void *cbk_ctx, u32 status)
17176 +{
17177 +       struct crypto_async_request *areq = cbk_ctx;
17178 +       struct aead_request *req = container_of(areq, struct aead_request,
17179 +                                               base);
17180 +       struct caam_request *req_ctx = to_caam_req(areq);
17181 +       struct tls_edesc *edesc = req_ctx->edesc;
17182 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
17183 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
17184 +       int ecode = 0;
17185 +
17186 +#ifdef DEBUG
17187 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17188 +#endif
17189 +
17190 +       if (unlikely(status)) {
17191 +               caam_qi2_strstatus(ctx->dev, status);
17192 +               ecode = -EIO;
17193 +       }
17194 +
17195 +       tls_unmap(ctx->dev, edesc, req);
17196 +       qi_cache_free(edesc);
17197 +       aead_request_complete(req, ecode);
17198 +}
17199 +
17200 +static void tls_decrypt_done(void *cbk_ctx, u32 status)
17201 +{
17202 +       struct crypto_async_request *areq = cbk_ctx;
17203 +       struct aead_request *req = container_of(areq, struct aead_request,
17204 +                                               base);
17205 +       struct caam_request *req_ctx = to_caam_req(areq);
17206 +       struct tls_edesc *edesc = req_ctx->edesc;
17207 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
17208 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
17209 +       int ecode = 0;
17210 +
17211 +#ifdef DEBUG
17212 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17213 +#endif
17214 +
17215 +       if (unlikely(status)) {
17216 +               caam_qi2_strstatus(ctx->dev, status);
17217 +               /*
17218 +                * verify hw auth check passed else return -EBADMSG
17219 +                */
17220 +               if ((status & JRSTA_CCBERR_ERRID_MASK) ==
17221 +                    JRSTA_CCBERR_ERRID_ICVCHK)
17222 +                       ecode = -EBADMSG;
17223 +               else
17224 +                       ecode = -EIO;
17225 +       }
17226 +
17227 +       tls_unmap(ctx->dev, edesc, req);
17228 +       qi_cache_free(edesc);
17229 +       aead_request_complete(req, ecode);
17230 +}
17231 +
17232 +static int tls_encrypt(struct aead_request *req)
17233 +{
17234 +       struct tls_edesc *edesc;
17235 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
17236 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
17237 +       struct caam_request *caam_req = aead_request_ctx(req);
17238 +       int ret;
17239 +
17240 +       /* allocate extended descriptor */
17241 +       edesc = tls_edesc_alloc(req, true);
17242 +       if (IS_ERR(edesc))
17243 +               return PTR_ERR(edesc);
17244 +
17245 +       caam_req->flc = &ctx->flc[ENCRYPT];
17246 +       caam_req->op_type = ENCRYPT;
17247 +       caam_req->cbk = tls_encrypt_done;
17248 +       caam_req->ctx = &req->base;
17249 +       caam_req->edesc = edesc;
17250 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17251 +       if (ret != -EINPROGRESS &&
17252 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17253 +               tls_unmap(ctx->dev, edesc, req);
17254 +               qi_cache_free(edesc);
17255 +       }
17256 +
17257 +       return ret;
17258 +}
17259 +
17260 +static int tls_decrypt(struct aead_request *req)
17261 +{
17262 +       struct tls_edesc *edesc;
17263 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
17264 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
17265 +       struct caam_request *caam_req = aead_request_ctx(req);
17266 +       int ret;
17267 +
17268 +       /* allocate extended descriptor */
17269 +       edesc = tls_edesc_alloc(req, false);
17270 +       if (IS_ERR(edesc))
17271 +               return PTR_ERR(edesc);
17272 +
17273 +       caam_req->flc = &ctx->flc[DECRYPT];
17274 +       caam_req->op_type = DECRYPT;
17275 +       caam_req->cbk = tls_decrypt_done;
17276 +       caam_req->ctx = &req->base;
17277 +       caam_req->edesc = edesc;
17278 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17279 +       if (ret != -EINPROGRESS &&
17280 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17281 +               tls_unmap(ctx->dev, edesc, req);
17282 +               qi_cache_free(edesc);
17283 +       }
17284 +
17285 +       return ret;
17286 +}
17287 +
17288 +static int ipsec_gcm_encrypt(struct aead_request *req)
17289 +{
17290 +       if (req->assoclen < 8)
17291 +               return -EINVAL;
17292 +
17293 +       return aead_encrypt(req);
17294 +}
17295 +
17296 +static int ipsec_gcm_decrypt(struct aead_request *req)
17297 +{
17298 +       if (req->assoclen < 8)
17299 +               return -EINVAL;
17300 +
17301 +       return aead_decrypt(req);
17302 +}
17303 +
17304 +static void ablkcipher_done(void *cbk_ctx, u32 status)
17305 +{
17306 +       struct crypto_async_request *areq = cbk_ctx;
17307 +       struct ablkcipher_request *req = ablkcipher_request_cast(areq);
17308 +       struct caam_request *req_ctx = to_caam_req(areq);
17309 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17310 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17311 +       struct ablkcipher_edesc *edesc = req_ctx->edesc;
17312 +       int ecode = 0;
17313 +       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
17314 +
17315 +#ifdef DEBUG
17316 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17317 +#endif
17318 +
17319 +       if (unlikely(status)) {
17320 +               caam_qi2_strstatus(ctx->dev, status);
17321 +               ecode = -EIO;
17322 +       }
17323 +
17324 +#ifdef DEBUG
17325 +       print_hex_dump(KERN_ERR, "dstiv  @" __stringify(__LINE__)": ",
17326 +                      DUMP_PREFIX_ADDRESS, 16, 4, req->info,
17327 +                      edesc->src_nents > 1 ? 100 : ivsize, 1);
17328 +       caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
17329 +                    DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
17330 +                    edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
17331 +#endif
17332 +
17333 +       ablkcipher_unmap(ctx->dev, edesc, req);
17334 +       qi_cache_free(edesc);
17335 +
17336 +       /*
17337 +        * The crypto API expects us to set the IV (req->info) to the last
17338 +        * ciphertext block. This is used e.g. by the CTS mode.
17339 +        */
17340 +       scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
17341 +                                ivsize, 0);
17342 +
17343 +       ablkcipher_request_complete(req, ecode);
17344 +}
17345 +
17346 +static int ablkcipher_encrypt(struct ablkcipher_request *req)
17347 +{
17348 +       struct ablkcipher_edesc *edesc;
17349 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17350 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17351 +       struct caam_request *caam_req = ablkcipher_request_ctx(req);
17352 +       int ret;
17353 +
17354 +       /* allocate extended descriptor */
17355 +       edesc = ablkcipher_edesc_alloc(req, true);
17356 +       if (IS_ERR(edesc))
17357 +               return PTR_ERR(edesc);
17358 +
17359 +       caam_req->flc = &ctx->flc[ENCRYPT];
17360 +       caam_req->op_type = ENCRYPT;
17361 +       caam_req->cbk = ablkcipher_done;
17362 +       caam_req->ctx = &req->base;
17363 +       caam_req->edesc = edesc;
17364 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17365 +       if (ret != -EINPROGRESS &&
17366 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17367 +               ablkcipher_unmap(ctx->dev, edesc, req);
17368 +               qi_cache_free(edesc);
17369 +       }
17370 +
17371 +       return ret;
17372 +}
17373 +
17374 +static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *greq)
17375 +{
17376 +       struct ablkcipher_request *req = &greq->creq;
17377 +       struct ablkcipher_edesc *edesc;
17378 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17379 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17380 +       struct caam_request *caam_req = ablkcipher_request_ctx(req);
17381 +       int ret;
17382 +
17383 +       /* allocate extended descriptor */
17384 +       edesc = ablkcipher_giv_edesc_alloc(greq);
17385 +       if (IS_ERR(edesc))
17386 +               return PTR_ERR(edesc);
17387 +
17388 +       caam_req->flc = &ctx->flc[GIVENCRYPT];
17389 +       caam_req->op_type = GIVENCRYPT;
17390 +       caam_req->cbk = ablkcipher_done;
17391 +       caam_req->ctx = &req->base;
17392 +       caam_req->edesc = edesc;
17393 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17394 +       if (ret != -EINPROGRESS &&
17395 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17396 +               ablkcipher_unmap(ctx->dev, edesc, req);
17397 +               qi_cache_free(edesc);
17398 +       }
17399 +
17400 +       return ret;
17401 +}
17402 +
17403 +static int ablkcipher_decrypt(struct ablkcipher_request *req)
17404 +{
17405 +       struct ablkcipher_edesc *edesc;
17406 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17407 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17408 +       struct caam_request *caam_req = ablkcipher_request_ctx(req);
17409 +       int ret;
17410 +
17411 +       /* allocate extended descriptor */
17412 +       edesc = ablkcipher_edesc_alloc(req, false);
17413 +       if (IS_ERR(edesc))
17414 +               return PTR_ERR(edesc);
17415 +
17416 +       caam_req->flc = &ctx->flc[DECRYPT];
17417 +       caam_req->op_type = DECRYPT;
17418 +       caam_req->cbk = ablkcipher_done;
17419 +       caam_req->ctx = &req->base;
17420 +       caam_req->edesc = edesc;
17421 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17422 +       if (ret != -EINPROGRESS &&
17423 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17424 +               ablkcipher_unmap(ctx->dev, edesc, req);
17425 +               qi_cache_free(edesc);
17426 +       }
17427 +
17428 +       return ret;
17429 +}
17430 +
17431 +struct caam_crypto_alg {
17432 +       struct list_head entry;
17433 +       struct crypto_alg crypto_alg;
17434 +       struct caam_alg_entry caam;
17435 +};
17436 +
17437 +static int caam_cra_init(struct crypto_tfm *tfm)
17438 +{
17439 +       struct crypto_alg *alg = tfm->__crt_alg;
17440 +       struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
17441 +                                                       crypto_alg);
17442 +       struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
17443 +
17444 +       /* copy descriptor header template value */
17445 +       ctx->cdata.algtype = OP_TYPE_CLASS1_ALG |
17446 +                            caam_alg->caam.class1_alg_type;
17447 +       ctx->adata.algtype = OP_TYPE_CLASS2_ALG |
17448 +                            caam_alg->caam.class2_alg_type;
17449 +
17450 +       ctx->dev = caam_alg->caam.dev;
17451 +
17452 +       return 0;
17453 +}
17454 +
17455 +static int caam_cra_init_ablkcipher(struct crypto_tfm *tfm)
17456 +{
17457 +       struct ablkcipher_tfm *ablkcipher_tfm =
17458 +               crypto_ablkcipher_crt(__crypto_ablkcipher_cast(tfm));
17459 +
17460 +       ablkcipher_tfm->reqsize = sizeof(struct caam_request);
17461 +       return caam_cra_init(tfm);
17462 +}
17463 +
17464 +static int caam_cra_init_aead(struct crypto_aead *tfm)
17465 +{
17466 +       crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
17467 +       return caam_cra_init(crypto_aead_tfm(tfm));
17468 +}
17469 +
17470 +static void caam_exit_common(struct caam_ctx *ctx)
17471 +{
17472 +       int i;
17473 +
17474 +       for (i = 0; i < NUM_OP; i++) {
17475 +               if (!ctx->flc[i].flc_dma)
17476 +                       continue;
17477 +               dma_unmap_single(ctx->dev, ctx->flc[i].flc_dma,
17478 +                                sizeof(ctx->flc[i].flc) +
17479 +                                       desc_bytes(ctx->flc[i].sh_desc),
17480 +                                DMA_TO_DEVICE);
17481 +       }
17482 +
17483 +       if (ctx->key_dma)
17484 +               dma_unmap_single(ctx->dev, ctx->key_dma,
17485 +                                ctx->cdata.keylen + ctx->adata.keylen_pad,
17486 +                                DMA_TO_DEVICE);
17487 +}
17488 +
17489 +static void caam_cra_exit(struct crypto_tfm *tfm)
17490 +{
17491 +       caam_exit_common(crypto_tfm_ctx(tfm));
17492 +}
17493 +
17494 +static void caam_cra_exit_aead(struct crypto_aead *tfm)
17495 +{
17496 +       caam_exit_common(crypto_aead_ctx(tfm));
17497 +}
17498 +
17499 +#define template_ablkcipher    template_u.ablkcipher
17500 +struct caam_alg_template {
17501 +       char name[CRYPTO_MAX_ALG_NAME];
17502 +       char driver_name[CRYPTO_MAX_ALG_NAME];
17503 +       unsigned int blocksize;
17504 +       u32 type;
17505 +       union {
17506 +               struct ablkcipher_alg ablkcipher;
17507 +       } template_u;
17508 +       u32 class1_alg_type;
17509 +       u32 class2_alg_type;
17510 +};
17511 +
17512 +static struct caam_alg_template driver_algs[] = {
17513 +       /* ablkcipher descriptor */
17514 +       {
17515 +               .name = "cbc(aes)",
17516 +               .driver_name = "cbc-aes-caam-qi2",
17517 +               .blocksize = AES_BLOCK_SIZE,
17518 +               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17519 +               .template_ablkcipher = {
17520 +                       .setkey = ablkcipher_setkey,
17521 +                       .encrypt = ablkcipher_encrypt,
17522 +                       .decrypt = ablkcipher_decrypt,
17523 +                       .givencrypt = ablkcipher_givencrypt,
17524 +                       .geniv = "<built-in>",
17525 +                       .min_keysize = AES_MIN_KEY_SIZE,
17526 +                       .max_keysize = AES_MAX_KEY_SIZE,
17527 +                       .ivsize = AES_BLOCK_SIZE,
17528 +               },
17529 +               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17530 +       },
17531 +       {
17532 +               .name = "cbc(des3_ede)",
17533 +               .driver_name = "cbc-3des-caam-qi2",
17534 +               .blocksize = DES3_EDE_BLOCK_SIZE,
17535 +               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17536 +               .template_ablkcipher = {
17537 +                       .setkey = ablkcipher_setkey,
17538 +                       .encrypt = ablkcipher_encrypt,
17539 +                       .decrypt = ablkcipher_decrypt,
17540 +                       .givencrypt = ablkcipher_givencrypt,
17541 +                       .geniv = "<built-in>",
17542 +                       .min_keysize = DES3_EDE_KEY_SIZE,
17543 +                       .max_keysize = DES3_EDE_KEY_SIZE,
17544 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
17545 +               },
17546 +               .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17547 +       },
17548 +       {
17549 +               .name = "cbc(des)",
17550 +               .driver_name = "cbc-des-caam-qi2",
17551 +               .blocksize = DES_BLOCK_SIZE,
17552 +               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17553 +               .template_ablkcipher = {
17554 +                       .setkey = ablkcipher_setkey,
17555 +                       .encrypt = ablkcipher_encrypt,
17556 +                       .decrypt = ablkcipher_decrypt,
17557 +                       .givencrypt = ablkcipher_givencrypt,
17558 +                       .geniv = "<built-in>",
17559 +                       .min_keysize = DES_KEY_SIZE,
17560 +                       .max_keysize = DES_KEY_SIZE,
17561 +                       .ivsize = DES_BLOCK_SIZE,
17562 +               },
17563 +               .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
17564 +       },
17565 +       {
17566 +               .name = "ctr(aes)",
17567 +               .driver_name = "ctr-aes-caam-qi2",
17568 +               .blocksize = 1,
17569 +               .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
17570 +               .template_ablkcipher = {
17571 +                       .setkey = ablkcipher_setkey,
17572 +                       .encrypt = ablkcipher_encrypt,
17573 +                       .decrypt = ablkcipher_decrypt,
17574 +                       .geniv = "chainiv",
17575 +                       .min_keysize = AES_MIN_KEY_SIZE,
17576 +                       .max_keysize = AES_MAX_KEY_SIZE,
17577 +                       .ivsize = AES_BLOCK_SIZE,
17578 +               },
17579 +               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
17580 +       },
17581 +       {
17582 +               .name = "rfc3686(ctr(aes))",
17583 +               .driver_name = "rfc3686-ctr-aes-caam-qi2",
17584 +               .blocksize = 1,
17585 +               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17586 +               .template_ablkcipher = {
17587 +                       .setkey = ablkcipher_setkey,
17588 +                       .encrypt = ablkcipher_encrypt,
17589 +                       .decrypt = ablkcipher_decrypt,
17590 +                       .givencrypt = ablkcipher_givencrypt,
17591 +                       .geniv = "<built-in>",
17592 +                       .min_keysize = AES_MIN_KEY_SIZE +
17593 +                                      CTR_RFC3686_NONCE_SIZE,
17594 +                       .max_keysize = AES_MAX_KEY_SIZE +
17595 +                                      CTR_RFC3686_NONCE_SIZE,
17596 +                       .ivsize = CTR_RFC3686_IV_SIZE,
17597 +               },
17598 +               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
17599 +       },
17600 +       {
17601 +               .name = "xts(aes)",
17602 +               .driver_name = "xts-aes-caam-qi2",
17603 +               .blocksize = AES_BLOCK_SIZE,
17604 +               .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
17605 +               .template_ablkcipher = {
17606 +                       .setkey = xts_ablkcipher_setkey,
17607 +                       .encrypt = ablkcipher_encrypt,
17608 +                       .decrypt = ablkcipher_decrypt,
17609 +                       .geniv = "eseqiv",
17610 +                       .min_keysize = 2 * AES_MIN_KEY_SIZE,
17611 +                       .max_keysize = 2 * AES_MAX_KEY_SIZE,
17612 +                       .ivsize = AES_BLOCK_SIZE,
17613 +               },
17614 +               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
17615 +       }
17616 +};
17617 +
17618 +static struct caam_aead_alg driver_aeads[] = {
17619 +       {
17620 +               .aead = {
17621 +                       .base = {
17622 +                               .cra_name = "rfc4106(gcm(aes))",
17623 +                               .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
17624 +                               .cra_blocksize = 1,
17625 +                       },
17626 +                       .setkey = rfc4106_setkey,
17627 +                       .setauthsize = rfc4106_setauthsize,
17628 +                       .encrypt = ipsec_gcm_encrypt,
17629 +                       .decrypt = ipsec_gcm_decrypt,
17630 +                       .ivsize = 8,
17631 +                       .maxauthsize = AES_BLOCK_SIZE,
17632 +               },
17633 +               .caam = {
17634 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17635 +               },
17636 +       },
17637 +       {
17638 +               .aead = {
17639 +                       .base = {
17640 +                               .cra_name = "rfc4543(gcm(aes))",
17641 +                               .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
17642 +                               .cra_blocksize = 1,
17643 +                       },
17644 +                       .setkey = rfc4543_setkey,
17645 +                       .setauthsize = rfc4543_setauthsize,
17646 +                       .encrypt = ipsec_gcm_encrypt,
17647 +                       .decrypt = ipsec_gcm_decrypt,
17648 +                       .ivsize = 8,
17649 +                       .maxauthsize = AES_BLOCK_SIZE,
17650 +               },
17651 +               .caam = {
17652 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17653 +               },
17654 +       },
17655 +       /* Galois Counter Mode */
17656 +       {
17657 +               .aead = {
17658 +                       .base = {
17659 +                               .cra_name = "gcm(aes)",
17660 +                               .cra_driver_name = "gcm-aes-caam-qi2",
17661 +                               .cra_blocksize = 1,
17662 +                       },
17663 +                       .setkey = gcm_setkey,
17664 +                       .setauthsize = gcm_setauthsize,
17665 +                       .encrypt = aead_encrypt,
17666 +                       .decrypt = aead_decrypt,
17667 +                       .ivsize = 12,
17668 +                       .maxauthsize = AES_BLOCK_SIZE,
17669 +               },
17670 +               .caam = {
17671 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17672 +               }
17673 +       },
17674 +       /* single-pass ipsec_esp descriptor */
17675 +       {
17676 +               .aead = {
17677 +                       .base = {
17678 +                               .cra_name = "authenc(hmac(md5),cbc(aes))",
17679 +                               .cra_driver_name = "authenc-hmac-md5-"
17680 +                                                  "cbc-aes-caam-qi2",
17681 +                               .cra_blocksize = AES_BLOCK_SIZE,
17682 +                       },
17683 +                       .setkey = aead_setkey,
17684 +                       .setauthsize = aead_setauthsize,
17685 +                       .encrypt = aead_encrypt,
17686 +                       .decrypt = aead_decrypt,
17687 +                       .ivsize = AES_BLOCK_SIZE,
17688 +                       .maxauthsize = MD5_DIGEST_SIZE,
17689 +               },
17690 +               .caam = {
17691 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17692 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17693 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17694 +               }
17695 +       },
17696 +       {
17697 +               .aead = {
17698 +                       .base = {
17699 +                               .cra_name = "echainiv(authenc(hmac(md5),"
17700 +                                           "cbc(aes)))",
17701 +                               .cra_driver_name = "echainiv-authenc-hmac-md5-"
17702 +                                                  "cbc-aes-caam-qi2",
17703 +                               .cra_blocksize = AES_BLOCK_SIZE,
17704 +                       },
17705 +                       .setkey = aead_setkey,
17706 +                       .setauthsize = aead_setauthsize,
17707 +                       .encrypt = aead_encrypt,
17708 +                       .decrypt = aead_decrypt,
17709 +                       .ivsize = AES_BLOCK_SIZE,
17710 +                       .maxauthsize = MD5_DIGEST_SIZE,
17711 +               },
17712 +               .caam = {
17713 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17714 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17715 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17716 +                       .geniv = true,
17717 +               }
17718 +       },
17719 +       {
17720 +               .aead = {
17721 +                       .base = {
17722 +                               .cra_name = "authenc(hmac(sha1),cbc(aes))",
17723 +                               .cra_driver_name = "authenc-hmac-sha1-"
17724 +                                                  "cbc-aes-caam-qi2",
17725 +                               .cra_blocksize = AES_BLOCK_SIZE,
17726 +                       },
17727 +                       .setkey = aead_setkey,
17728 +                       .setauthsize = aead_setauthsize,
17729 +                       .encrypt = aead_encrypt,
17730 +                       .decrypt = aead_decrypt,
17731 +                       .ivsize = AES_BLOCK_SIZE,
17732 +                       .maxauthsize = SHA1_DIGEST_SIZE,
17733 +               },
17734 +               .caam = {
17735 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17736 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17737 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17738 +               }
17739 +       },
17740 +       {
17741 +               .aead = {
17742 +                       .base = {
17743 +                               .cra_name = "echainiv(authenc(hmac(sha1),"
17744 +                                           "cbc(aes)))",
17745 +                               .cra_driver_name = "echainiv-authenc-"
17746 +                                                  "hmac-sha1-cbc-aes-caam-qi2",
17747 +                               .cra_blocksize = AES_BLOCK_SIZE,
17748 +                       },
17749 +                       .setkey = aead_setkey,
17750 +                       .setauthsize = aead_setauthsize,
17751 +                       .encrypt = aead_encrypt,
17752 +                       .decrypt = aead_decrypt,
17753 +                       .ivsize = AES_BLOCK_SIZE,
17754 +                       .maxauthsize = SHA1_DIGEST_SIZE,
17755 +               },
17756 +               .caam = {
17757 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17758 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17759 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17760 +                       .geniv = true,
17761 +               },
17762 +       },
17763 +       {
17764 +               .aead = {
17765 +                       .base = {
17766 +                               .cra_name = "authenc(hmac(sha224),cbc(aes))",
17767 +                               .cra_driver_name = "authenc-hmac-sha224-"
17768 +                                                  "cbc-aes-caam-qi2",
17769 +                               .cra_blocksize = AES_BLOCK_SIZE,
17770 +                       },
17771 +                       .setkey = aead_setkey,
17772 +                       .setauthsize = aead_setauthsize,
17773 +                       .encrypt = aead_encrypt,
17774 +                       .decrypt = aead_decrypt,
17775 +                       .ivsize = AES_BLOCK_SIZE,
17776 +                       .maxauthsize = SHA224_DIGEST_SIZE,
17777 +               },
17778 +               .caam = {
17779 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17780 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
17781 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17782 +               }
17783 +       },
17784 +       {
17785 +               .aead = {
17786 +                       .base = {
17787 +                               .cra_name = "echainiv(authenc(hmac(sha224),"
17788 +                                           "cbc(aes)))",
17789 +                               .cra_driver_name = "echainiv-authenc-"
17790 +                                                  "hmac-sha224-cbc-aes-caam-qi2",
17791 +                               .cra_blocksize = AES_BLOCK_SIZE,
17792 +                       },
17793 +                       .setkey = aead_setkey,
17794 +                       .setauthsize = aead_setauthsize,
17795 +                       .encrypt = aead_encrypt,
17796 +                       .decrypt = aead_decrypt,
17797 +                       .ivsize = AES_BLOCK_SIZE,
17798 +                       .maxauthsize = SHA224_DIGEST_SIZE,
17799 +               },
17800 +               .caam = {
17801 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17802 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
17803 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17804 +                       .geniv = true,
17805 +               }
17806 +       },
17807 +       {
17808 +               .aead = {
17809 +                       .base = {
17810 +                               .cra_name = "authenc(hmac(sha256),cbc(aes))",
17811 +                               .cra_driver_name = "authenc-hmac-sha256-"
17812 +                                                  "cbc-aes-caam-qi2",
17813 +                               .cra_blocksize = AES_BLOCK_SIZE,
17814 +                       },
17815 +                       .setkey = aead_setkey,
17816 +                       .setauthsize = aead_setauthsize,
17817 +                       .encrypt = aead_encrypt,
17818 +                       .decrypt = aead_decrypt,
17819 +                       .ivsize = AES_BLOCK_SIZE,
17820 +                       .maxauthsize = SHA256_DIGEST_SIZE,
17821 +               },
17822 +               .caam = {
17823 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17824 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
17825 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17826 +               }
17827 +       },
17828 +       {
17829 +               .aead = {
17830 +                       .base = {
17831 +                               .cra_name = "echainiv(authenc(hmac(sha256),"
17832 +                                           "cbc(aes)))",
17833 +                               .cra_driver_name = "echainiv-authenc-"
17834 +                                                  "hmac-sha256-cbc-aes-"
17835 +                                                  "caam-qi2",
17836 +                               .cra_blocksize = AES_BLOCK_SIZE,
17837 +                       },
17838 +                       .setkey = aead_setkey,
17839 +                       .setauthsize = aead_setauthsize,
17840 +                       .encrypt = aead_encrypt,
17841 +                       .decrypt = aead_decrypt,
17842 +                       .ivsize = AES_BLOCK_SIZE,
17843 +                       .maxauthsize = SHA256_DIGEST_SIZE,
17844 +               },
17845 +               .caam = {
17846 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17847 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
17848 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17849 +                       .geniv = true,
17850 +               }
17851 +       },
17852 +       {
17853 +               .aead = {
17854 +                       .base = {
17855 +                               .cra_name = "authenc(hmac(sha384),cbc(aes))",
17856 +                               .cra_driver_name = "authenc-hmac-sha384-"
17857 +                                                  "cbc-aes-caam-qi2",
17858 +                               .cra_blocksize = AES_BLOCK_SIZE,
17859 +                       },
17860 +                       .setkey = aead_setkey,
17861 +                       .setauthsize = aead_setauthsize,
17862 +                       .encrypt = aead_encrypt,
17863 +                       .decrypt = aead_decrypt,
17864 +                       .ivsize = AES_BLOCK_SIZE,
17865 +                       .maxauthsize = SHA384_DIGEST_SIZE,
17866 +               },
17867 +               .caam = {
17868 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17869 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
17870 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17871 +               }
17872 +       },
17873 +       {
17874 +               .aead = {
17875 +                       .base = {
17876 +                               .cra_name = "echainiv(authenc(hmac(sha384),"
17877 +                                           "cbc(aes)))",
17878 +                               .cra_driver_name = "echainiv-authenc-"
17879 +                                                  "hmac-sha384-cbc-aes-"
17880 +                                                  "caam-qi2",
17881 +                               .cra_blocksize = AES_BLOCK_SIZE,
17882 +                       },
17883 +                       .setkey = aead_setkey,
17884 +                       .setauthsize = aead_setauthsize,
17885 +                       .encrypt = aead_encrypt,
17886 +                       .decrypt = aead_decrypt,
17887 +                       .ivsize = AES_BLOCK_SIZE,
17888 +                       .maxauthsize = SHA384_DIGEST_SIZE,
17889 +               },
17890 +               .caam = {
17891 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17892 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
17893 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17894 +                       .geniv = true,
17895 +               }
17896 +       },
17897 +       {
17898 +               .aead = {
17899 +                       .base = {
17900 +                               .cra_name = "authenc(hmac(sha512),cbc(aes))",
17901 +                               .cra_driver_name = "authenc-hmac-sha512-"
17902 +                                                  "cbc-aes-caam-qi2",
17903 +                               .cra_blocksize = AES_BLOCK_SIZE,
17904 +                       },
17905 +                       .setkey = aead_setkey,
17906 +                       .setauthsize = aead_setauthsize,
17907 +                       .encrypt = aead_encrypt,
17908 +                       .decrypt = aead_decrypt,
17909 +                       .ivsize = AES_BLOCK_SIZE,
17910 +                       .maxauthsize = SHA512_DIGEST_SIZE,
17911 +               },
17912 +               .caam = {
17913 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17914 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
17915 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17916 +               }
17917 +       },
17918 +       {
17919 +               .aead = {
17920 +                       .base = {
17921 +                               .cra_name = "echainiv(authenc(hmac(sha512),"
17922 +                                           "cbc(aes)))",
17923 +                               .cra_driver_name = "echainiv-authenc-"
17924 +                                                  "hmac-sha512-cbc-aes-"
17925 +                                                  "caam-qi2",
17926 +                               .cra_blocksize = AES_BLOCK_SIZE,
17927 +                       },
17928 +                       .setkey = aead_setkey,
17929 +                       .setauthsize = aead_setauthsize,
17930 +                       .encrypt = aead_encrypt,
17931 +                       .decrypt = aead_decrypt,
17932 +                       .ivsize = AES_BLOCK_SIZE,
17933 +                       .maxauthsize = SHA512_DIGEST_SIZE,
17934 +               },
17935 +               .caam = {
17936 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17937 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
17938 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17939 +                       .geniv = true,
17940 +               }
17941 +       },
17942 +       {
17943 +               .aead = {
17944 +                       .base = {
17945 +                               .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
17946 +                               .cra_driver_name = "authenc-hmac-md5-"
17947 +                                                  "cbc-des3_ede-caam-qi2",
17948 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17949 +                       },
17950 +                       .setkey = aead_setkey,
17951 +                       .setauthsize = aead_setauthsize,
17952 +                       .encrypt = aead_encrypt,
17953 +                       .decrypt = aead_decrypt,
17954 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
17955 +                       .maxauthsize = MD5_DIGEST_SIZE,
17956 +               },
17957 +               .caam = {
17958 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17959 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17960 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17961 +               }
17962 +       },
17963 +       {
17964 +               .aead = {
17965 +                       .base = {
17966 +                               .cra_name = "echainiv(authenc(hmac(md5),"
17967 +                                           "cbc(des3_ede)))",
17968 +                               .cra_driver_name = "echainiv-authenc-hmac-md5-"
17969 +                                                  "cbc-des3_ede-caam-qi2",
17970 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17971 +                       },
17972 +                       .setkey = aead_setkey,
17973 +                       .setauthsize = aead_setauthsize,
17974 +                       .encrypt = aead_encrypt,
17975 +                       .decrypt = aead_decrypt,
17976 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
17977 +                       .maxauthsize = MD5_DIGEST_SIZE,
17978 +               },
17979 +               .caam = {
17980 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17981 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17982 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17983 +                       .geniv = true,
17984 +               }
17985 +       },
17986 +       {
17987 +               .aead = {
17988 +                       .base = {
17989 +                               .cra_name = "authenc(hmac(sha1),"
17990 +                                           "cbc(des3_ede))",
17991 +                               .cra_driver_name = "authenc-hmac-sha1-"
17992 +                                                  "cbc-des3_ede-caam-qi2",
17993 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17994 +                       },
17995 +                       .setkey = aead_setkey,
17996 +                       .setauthsize = aead_setauthsize,
17997 +                       .encrypt = aead_encrypt,
17998 +                       .decrypt = aead_decrypt,
17999 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
18000 +                       .maxauthsize = SHA1_DIGEST_SIZE,
18001 +               },
18002 +               .caam = {
18003 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18004 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18005 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18006 +               },
18007 +       },
18008 +       {
18009 +               .aead = {
18010 +                       .base = {
18011 +                               .cra_name = "echainiv(authenc(hmac(sha1),"
18012 +                                           "cbc(des3_ede)))",
18013 +                               .cra_driver_name = "echainiv-authenc-"
18014 +                                                  "hmac-sha1-"
18015 +                                                  "cbc-des3_ede-caam-qi2",
18016 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18017 +                       },
18018 +                       .setkey = aead_setkey,
18019 +                       .setauthsize = aead_setauthsize,
18020 +                       .encrypt = aead_encrypt,
18021 +                       .decrypt = aead_decrypt,
18022 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
18023 +                       .maxauthsize = SHA1_DIGEST_SIZE,
18024 +               },
18025 +               .caam = {
18026 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18027 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18028 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18029 +                       .geniv = true,
18030 +               }
18031 +       },
18032 +       {
18033 +               .aead = {
18034 +                       .base = {
18035 +                               .cra_name = "authenc(hmac(sha224),"
18036 +                                           "cbc(des3_ede))",
18037 +                               .cra_driver_name = "authenc-hmac-sha224-"
18038 +                                                  "cbc-des3_ede-caam-qi2",
18039 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18040 +                       },
18041 +                       .setkey = aead_setkey,
18042 +                       .setauthsize = aead_setauthsize,
18043 +                       .encrypt = aead_encrypt,
18044 +                       .decrypt = aead_decrypt,
18045 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
18046 +                       .maxauthsize = SHA224_DIGEST_SIZE,
18047 +               },
18048 +               .caam = {
18049 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18050 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18051 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18052 +               },
18053 +       },
18054 +       {
18055 +               .aead = {
18056 +                       .base = {
18057 +                               .cra_name = "echainiv(authenc(hmac(sha224),"
18058 +                                           "cbc(des3_ede)))",
18059 +                               .cra_driver_name = "echainiv-authenc-"
18060 +                                                  "hmac-sha224-"
18061 +                                                  "cbc-des3_ede-caam-qi2",
18062 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18063 +                       },
18064 +                       .setkey = aead_setkey,
18065 +                       .setauthsize = aead_setauthsize,
18066 +                       .encrypt = aead_encrypt,
18067 +                       .decrypt = aead_decrypt,
18068 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
18069 +                       .maxauthsize = SHA224_DIGEST_SIZE,
18070 +               },
18071 +               .caam = {
18072 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18073 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18074 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18075 +                       .geniv = true,
18076 +               }
18077 +       },
18078 +       {
18079 +               .aead = {
18080 +                       .base = {
18081 +                               .cra_name = "authenc(hmac(sha256),"
18082 +                                           "cbc(des3_ede))",
18083 +                               .cra_driver_name = "authenc-hmac-sha256-"
18084 +                                                  "cbc-des3_ede-caam-qi2",
18085 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18086 +                       },
18087 +                       .setkey = aead_setkey,
18088 +                       .setauthsize = aead_setauthsize,
18089 +                       .encrypt = aead_encrypt,
18090 +                       .decrypt = aead_decrypt,
18091 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
18092 +                       .maxauthsize = SHA256_DIGEST_SIZE,
18093 +               },
18094 +               .caam = {
18095 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18096 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18097 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18098 +               },
18099 +       },
18100 +       {
18101 +               .aead = {
18102 +                       .base = {
18103 +                               .cra_name = "echainiv(authenc(hmac(sha256),"
18104 +                                           "cbc(des3_ede)))",
18105 +                               .cra_driver_name = "echainiv-authenc-"
18106 +                                                  "hmac-sha256-"
18107 +                                                  "cbc-des3_ede-caam-qi2",
18108 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18109 +                       },
18110 +                       .setkey = aead_setkey,
18111 +                       .setauthsize = aead_setauthsize,
18112 +                       .encrypt = aead_encrypt,
18113 +                       .decrypt = aead_decrypt,
18114 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
18115 +                       .maxauthsize = SHA256_DIGEST_SIZE,
18116 +               },
18117 +               .caam = {
18118 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18119 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18120 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18121 +                       .geniv = true,
18122 +               }
18123 +       },
18124 +       {
18125 +               .aead = {
18126 +                       .base = {
18127 +                               .cra_name = "authenc(hmac(sha384),"
18128 +                                           "cbc(des3_ede))",
18129 +                               .cra_driver_name = "authenc-hmac-sha384-"
18130 +                                                  "cbc-des3_ede-caam-qi2",
18131 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18132 +                       },
18133 +                       .setkey = aead_setkey,
18134 +                       .setauthsize = aead_setauthsize,
18135 +                       .encrypt = aead_encrypt,
18136 +                       .decrypt = aead_decrypt,
18137 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
18138 +                       .maxauthsize = SHA384_DIGEST_SIZE,
18139 +               },
18140 +               .caam = {
18141 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18142 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18143 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18144 +               },
18145 +       },
18146 +       {
18147 +               .aead = {
18148 +                       .base = {
18149 +                               .cra_name = "echainiv(authenc(hmac(sha384),"
18150 +                                           "cbc(des3_ede)))",
18151 +                               .cra_driver_name = "echainiv-authenc-"
18152 +                                                  "hmac-sha384-"
18153 +                                                  "cbc-des3_ede-caam-qi2",
18154 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18155 +                       },
18156 +                       .setkey = aead_setkey,
18157 +                       .setauthsize = aead_setauthsize,
18158 +                       .encrypt = aead_encrypt,
18159 +                       .decrypt = aead_decrypt,
18160 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
18161 +                       .maxauthsize = SHA384_DIGEST_SIZE,
18162 +               },
18163 +               .caam = {
18164 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18165 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18166 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18167 +                       .geniv = true,
18168 +               }
18169 +       },
18170 +       {
18171 +               .aead = {
18172 +                       .base = {
18173 +                               .cra_name = "authenc(hmac(sha512),"
18174 +                                           "cbc(des3_ede))",
18175 +                               .cra_driver_name = "authenc-hmac-sha512-"
18176 +                                                  "cbc-des3_ede-caam-qi2",
18177 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18178 +                       },
18179 +                       .setkey = aead_setkey,
18180 +                       .setauthsize = aead_setauthsize,
18181 +                       .encrypt = aead_encrypt,
18182 +                       .decrypt = aead_decrypt,
18183 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
18184 +                       .maxauthsize = SHA512_DIGEST_SIZE,
18185 +               },
18186 +               .caam = {
18187 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18188 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18189 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18190 +               },
18191 +       },
18192 +       {
18193 +               .aead = {
18194 +                       .base = {
18195 +                               .cra_name = "echainiv(authenc(hmac(sha512),"
18196 +                                           "cbc(des3_ede)))",
18197 +                               .cra_driver_name = "echainiv-authenc-"
18198 +                                                  "hmac-sha512-"
18199 +                                                  "cbc-des3_ede-caam-qi2",
18200 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18201 +                       },
18202 +                       .setkey = aead_setkey,
18203 +                       .setauthsize = aead_setauthsize,
18204 +                       .encrypt = aead_encrypt,
18205 +                       .decrypt = aead_decrypt,
18206 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
18207 +                       .maxauthsize = SHA512_DIGEST_SIZE,
18208 +               },
18209 +               .caam = {
18210 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18211 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18212 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18213 +                       .geniv = true,
18214 +               }
18215 +       },
18216 +       {
18217 +               .aead = {
18218 +                       .base = {
18219 +                               .cra_name = "authenc(hmac(md5),cbc(des))",
18220 +                               .cra_driver_name = "authenc-hmac-md5-"
18221 +                                                  "cbc-des-caam-qi2",
18222 +                               .cra_blocksize = DES_BLOCK_SIZE,
18223 +                       },
18224 +                       .setkey = aead_setkey,
18225 +                       .setauthsize = aead_setauthsize,
18226 +                       .encrypt = aead_encrypt,
18227 +                       .decrypt = aead_decrypt,
18228 +                       .ivsize = DES_BLOCK_SIZE,
18229 +                       .maxauthsize = MD5_DIGEST_SIZE,
18230 +               },
18231 +               .caam = {
18232 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18233 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18234 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18235 +               },
18236 +       },
18237 +       {
18238 +               .aead = {
18239 +                       .base = {
18240 +                               .cra_name = "echainiv(authenc(hmac(md5),"
18241 +                                           "cbc(des)))",
18242 +                               .cra_driver_name = "echainiv-authenc-hmac-md5-"
18243 +                                                  "cbc-des-caam-qi2",
18244 +                               .cra_blocksize = DES_BLOCK_SIZE,
18245 +                       },
18246 +                       .setkey = aead_setkey,
18247 +                       .setauthsize = aead_setauthsize,
18248 +                       .encrypt = aead_encrypt,
18249 +                       .decrypt = aead_decrypt,
18250 +                       .ivsize = DES_BLOCK_SIZE,
18251 +                       .maxauthsize = MD5_DIGEST_SIZE,
18252 +               },
18253 +               .caam = {
18254 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18255 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18256 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18257 +                       .geniv = true,
18258 +               }
18259 +       },
18260 +       {
18261 +               .aead = {
18262 +                       .base = {
18263 +                               .cra_name = "authenc(hmac(sha1),cbc(des))",
18264 +                               .cra_driver_name = "authenc-hmac-sha1-"
18265 +                                                  "cbc-des-caam-qi2",
18266 +                               .cra_blocksize = DES_BLOCK_SIZE,
18267 +                       },
18268 +                       .setkey = aead_setkey,
18269 +                       .setauthsize = aead_setauthsize,
18270 +                       .encrypt = aead_encrypt,
18271 +                       .decrypt = aead_decrypt,
18272 +                       .ivsize = DES_BLOCK_SIZE,
18273 +                       .maxauthsize = SHA1_DIGEST_SIZE,
18274 +               },
18275 +               .caam = {
18276 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18277 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18278 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18279 +               },
18280 +       },
18281 +       {
18282 +               .aead = {
18283 +                       .base = {
18284 +                               .cra_name = "echainiv(authenc(hmac(sha1),"
18285 +                                           "cbc(des)))",
18286 +                               .cra_driver_name = "echainiv-authenc-"
18287 +                                                  "hmac-sha1-cbc-des-caam-qi2",
18288 +                               .cra_blocksize = DES_BLOCK_SIZE,
18289 +                       },
18290 +                       .setkey = aead_setkey,
18291 +                       .setauthsize = aead_setauthsize,
18292 +                       .encrypt = aead_encrypt,
18293 +                       .decrypt = aead_decrypt,
18294 +                       .ivsize = DES_BLOCK_SIZE,
18295 +                       .maxauthsize = SHA1_DIGEST_SIZE,
18296 +               },
18297 +               .caam = {
18298 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18299 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18300 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18301 +                       .geniv = true,
18302 +               }
18303 +       },
18304 +       {
18305 +               .aead = {
18306 +                       .base = {
18307 +                               .cra_name = "authenc(hmac(sha224),cbc(des))",
18308 +                               .cra_driver_name = "authenc-hmac-sha224-"
18309 +                                                  "cbc-des-caam-qi2",
18310 +                               .cra_blocksize = DES_BLOCK_SIZE,
18311 +                       },
18312 +                       .setkey = aead_setkey,
18313 +                       .setauthsize = aead_setauthsize,
18314 +                       .encrypt = aead_encrypt,
18315 +                       .decrypt = aead_decrypt,
18316 +                       .ivsize = DES_BLOCK_SIZE,
18317 +                       .maxauthsize = SHA224_DIGEST_SIZE,
18318 +               },
18319 +               .caam = {
18320 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18321 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18322 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18323 +               },
18324 +       },
18325 +       {
18326 +               .aead = {
18327 +                       .base = {
18328 +                               .cra_name = "echainiv(authenc(hmac(sha224),"
18329 +                                           "cbc(des)))",
18330 +                               .cra_driver_name = "echainiv-authenc-"
18331 +                                                  "hmac-sha224-cbc-des-"
18332 +                                                  "caam-qi2",
18333 +                               .cra_blocksize = DES_BLOCK_SIZE,
18334 +                       },
18335 +                       .setkey = aead_setkey,
18336 +                       .setauthsize = aead_setauthsize,
18337 +                       .encrypt = aead_encrypt,
18338 +                       .decrypt = aead_decrypt,
18339 +                       .ivsize = DES_BLOCK_SIZE,
18340 +                       .maxauthsize = SHA224_DIGEST_SIZE,
18341 +               },
18342 +               .caam = {
18343 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18344 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18345 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18346 +                       .geniv = true,
18347 +               }
18348 +       },
18349 +       {
18350 +               .aead = {
18351 +                       .base = {
18352 +                               .cra_name = "authenc(hmac(sha256),cbc(des))",
18353 +                               .cra_driver_name = "authenc-hmac-sha256-"
18354 +                                                  "cbc-des-caam-qi2",
18355 +                               .cra_blocksize = DES_BLOCK_SIZE,
18356 +                       },
18357 +                       .setkey = aead_setkey,
18358 +                       .setauthsize = aead_setauthsize,
18359 +                       .encrypt = aead_encrypt,
18360 +                       .decrypt = aead_decrypt,
18361 +                       .ivsize = DES_BLOCK_SIZE,
18362 +                       .maxauthsize = SHA256_DIGEST_SIZE,
18363 +               },
18364 +               .caam = {
18365 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18366 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18367 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18368 +               },
18369 +       },
18370 +       {
18371 +               .aead = {
18372 +                       .base = {
18373 +                               .cra_name = "echainiv(authenc(hmac(sha256),"
18374 +                                           "cbc(des)))",
18375 +                               .cra_driver_name = "echainiv-authenc-"
18376 +                                                  "hmac-sha256-cbc-desi-"
18377 +                                                  "caam-qi2",
18378 +                               .cra_blocksize = DES_BLOCK_SIZE,
18379 +                       },
18380 +                       .setkey = aead_setkey,
18381 +                       .setauthsize = aead_setauthsize,
18382 +                       .encrypt = aead_encrypt,
18383 +                       .decrypt = aead_decrypt,
18384 +                       .ivsize = DES_BLOCK_SIZE,
18385 +                       .maxauthsize = SHA256_DIGEST_SIZE,
18386 +               },
18387 +               .caam = {
18388 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18389 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18390 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18391 +                       .geniv = true,
18392 +               },
18393 +       },
18394 +       {
18395 +               .aead = {
18396 +                       .base = {
18397 +                               .cra_name = "authenc(hmac(sha384),cbc(des))",
18398 +                               .cra_driver_name = "authenc-hmac-sha384-"
18399 +                                                  "cbc-des-caam-qi2",
18400 +                               .cra_blocksize = DES_BLOCK_SIZE,
18401 +                       },
18402 +                       .setkey = aead_setkey,
18403 +                       .setauthsize = aead_setauthsize,
18404 +                       .encrypt = aead_encrypt,
18405 +                       .decrypt = aead_decrypt,
18406 +                       .ivsize = DES_BLOCK_SIZE,
18407 +                       .maxauthsize = SHA384_DIGEST_SIZE,
18408 +               },
18409 +               .caam = {
18410 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18411 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18412 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18413 +               },
18414 +       },
18415 +       {
18416 +               .aead = {
18417 +                       .base = {
18418 +                               .cra_name = "echainiv(authenc(hmac(sha384),"
18419 +                                           "cbc(des)))",
18420 +                               .cra_driver_name = "echainiv-authenc-"
18421 +                                                  "hmac-sha384-cbc-des-"
18422 +                                                  "caam-qi2",
18423 +                               .cra_blocksize = DES_BLOCK_SIZE,
18424 +                       },
18425 +                       .setkey = aead_setkey,
18426 +                       .setauthsize = aead_setauthsize,
18427 +                       .encrypt = aead_encrypt,
18428 +                       .decrypt = aead_decrypt,
18429 +                       .ivsize = DES_BLOCK_SIZE,
18430 +                       .maxauthsize = SHA384_DIGEST_SIZE,
18431 +               },
18432 +               .caam = {
18433 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18434 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18435 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18436 +                       .geniv = true,
18437 +               }
18438 +       },
18439 +       {
18440 +               .aead = {
18441 +                       .base = {
18442 +                               .cra_name = "authenc(hmac(sha512),cbc(des))",
18443 +                               .cra_driver_name = "authenc-hmac-sha512-"
18444 +                                                  "cbc-des-caam-qi2",
18445 +                               .cra_blocksize = DES_BLOCK_SIZE,
18446 +                       },
18447 +                       .setkey = aead_setkey,
18448 +                       .setauthsize = aead_setauthsize,
18449 +                       .encrypt = aead_encrypt,
18450 +                       .decrypt = aead_decrypt,
18451 +                       .ivsize = DES_BLOCK_SIZE,
18452 +                       .maxauthsize = SHA512_DIGEST_SIZE,
18453 +               },
18454 +               .caam = {
18455 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18456 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18457 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18458 +               }
18459 +       },
18460 +       {
18461 +               .aead = {
18462 +                       .base = {
18463 +                               .cra_name = "echainiv(authenc(hmac(sha512),"
18464 +                                           "cbc(des)))",
18465 +                               .cra_driver_name = "echainiv-authenc-"
18466 +                                                  "hmac-sha512-cbc-des-"
18467 +                                                  "caam-qi2",
18468 +                               .cra_blocksize = DES_BLOCK_SIZE,
18469 +                       },
18470 +                       .setkey = aead_setkey,
18471 +                       .setauthsize = aead_setauthsize,
18472 +                       .encrypt = aead_encrypt,
18473 +                       .decrypt = aead_decrypt,
18474 +                       .ivsize = DES_BLOCK_SIZE,
18475 +                       .maxauthsize = SHA512_DIGEST_SIZE,
18476 +               },
18477 +               .caam = {
18478 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18479 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18480 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18481 +                       .geniv = true,
18482 +               }
18483 +       },
18484 +       {
18485 +               .aead = {
18486 +                       .base = {
18487 +                               .cra_name = "authenc(hmac(md5),"
18488 +                                           "rfc3686(ctr(aes)))",
18489 +                               .cra_driver_name = "authenc-hmac-md5-"
18490 +                                                  "rfc3686-ctr-aes-caam-qi2",
18491 +                               .cra_blocksize = 1,
18492 +                       },
18493 +                       .setkey = aead_setkey,
18494 +                       .setauthsize = aead_setauthsize,
18495 +                       .encrypt = aead_encrypt,
18496 +                       .decrypt = aead_decrypt,
18497 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18498 +                       .maxauthsize = MD5_DIGEST_SIZE,
18499 +               },
18500 +               .caam = {
18501 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18502 +                                          OP_ALG_AAI_CTR_MOD128,
18503 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18504 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18505 +                       .rfc3686 = true,
18506 +               },
18507 +       },
18508 +       {
18509 +               .aead = {
18510 +                       .base = {
18511 +                               .cra_name = "seqiv(authenc("
18512 +                                           "hmac(md5),rfc3686(ctr(aes))))",
18513 +                               .cra_driver_name = "seqiv-authenc-hmac-md5-"
18514 +                                                  "rfc3686-ctr-aes-caam-qi2",
18515 +                               .cra_blocksize = 1,
18516 +                       },
18517 +                       .setkey = aead_setkey,
18518 +                       .setauthsize = aead_setauthsize,
18519 +                       .encrypt = aead_encrypt,
18520 +                       .decrypt = aead_decrypt,
18521 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18522 +                       .maxauthsize = MD5_DIGEST_SIZE,
18523 +               },
18524 +               .caam = {
18525 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18526 +                                          OP_ALG_AAI_CTR_MOD128,
18527 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18528 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18529 +                       .rfc3686 = true,
18530 +                       .geniv = true,
18531 +               },
18532 +       },
18533 +       {
18534 +               .aead = {
18535 +                       .base = {
18536 +                               .cra_name = "authenc(hmac(sha1),"
18537 +                                           "rfc3686(ctr(aes)))",
18538 +                               .cra_driver_name = "authenc-hmac-sha1-"
18539 +                                                  "rfc3686-ctr-aes-caam-qi2",
18540 +                               .cra_blocksize = 1,
18541 +                       },
18542 +                       .setkey = aead_setkey,
18543 +                       .setauthsize = aead_setauthsize,
18544 +                       .encrypt = aead_encrypt,
18545 +                       .decrypt = aead_decrypt,
18546 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18547 +                       .maxauthsize = SHA1_DIGEST_SIZE,
18548 +               },
18549 +               .caam = {
18550 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18551 +                                          OP_ALG_AAI_CTR_MOD128,
18552 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18553 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18554 +                       .rfc3686 = true,
18555 +               },
18556 +       },
18557 +       {
18558 +               .aead = {
18559 +                       .base = {
18560 +                               .cra_name = "seqiv(authenc("
18561 +                                           "hmac(sha1),rfc3686(ctr(aes))))",
18562 +                               .cra_driver_name = "seqiv-authenc-hmac-sha1-"
18563 +                                                  "rfc3686-ctr-aes-caam-qi2",
18564 +                               .cra_blocksize = 1,
18565 +                       },
18566 +                       .setkey = aead_setkey,
18567 +                       .setauthsize = aead_setauthsize,
18568 +                       .encrypt = aead_encrypt,
18569 +                       .decrypt = aead_decrypt,
18570 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18571 +                       .maxauthsize = SHA1_DIGEST_SIZE,
18572 +               },
18573 +               .caam = {
18574 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18575 +                                          OP_ALG_AAI_CTR_MOD128,
18576 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18577 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18578 +                       .rfc3686 = true,
18579 +                       .geniv = true,
18580 +               },
18581 +       },
18582 +       {
18583 +               .aead = {
18584 +                       .base = {
18585 +                               .cra_name = "authenc(hmac(sha224),"
18586 +                                           "rfc3686(ctr(aes)))",
18587 +                               .cra_driver_name = "authenc-hmac-sha224-"
18588 +                                                  "rfc3686-ctr-aes-caam-qi2",
18589 +                               .cra_blocksize = 1,
18590 +                       },
18591 +                       .setkey = aead_setkey,
18592 +                       .setauthsize = aead_setauthsize,
18593 +                       .encrypt = aead_encrypt,
18594 +                       .decrypt = aead_decrypt,
18595 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18596 +                       .maxauthsize = SHA224_DIGEST_SIZE,
18597 +               },
18598 +               .caam = {
18599 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18600 +                                          OP_ALG_AAI_CTR_MOD128,
18601 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18602 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18603 +                       .rfc3686 = true,
18604 +               },
18605 +       },
18606 +       {
18607 +               .aead = {
18608 +                       .base = {
18609 +                               .cra_name = "seqiv(authenc("
18610 +                                           "hmac(sha224),rfc3686(ctr(aes))))",
18611 +                               .cra_driver_name = "seqiv-authenc-hmac-sha224-"
18612 +                                                  "rfc3686-ctr-aes-caam-qi2",
18613 +                               .cra_blocksize = 1,
18614 +                       },
18615 +                       .setkey = aead_setkey,
18616 +                       .setauthsize = aead_setauthsize,
18617 +                       .encrypt = aead_encrypt,
18618 +                       .decrypt = aead_decrypt,
18619 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18620 +                       .maxauthsize = SHA224_DIGEST_SIZE,
18621 +               },
18622 +               .caam = {
18623 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18624 +                                          OP_ALG_AAI_CTR_MOD128,
18625 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18626 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18627 +                       .rfc3686 = true,
18628 +                       .geniv = true,
18629 +               },
18630 +       },
18631 +       {
18632 +               .aead = {
18633 +                       .base = {
18634 +                               .cra_name = "authenc(hmac(sha256),"
18635 +                                           "rfc3686(ctr(aes)))",
18636 +                               .cra_driver_name = "authenc-hmac-sha256-"
18637 +                                                  "rfc3686-ctr-aes-caam-qi2",
18638 +                               .cra_blocksize = 1,
18639 +                       },
18640 +                       .setkey = aead_setkey,
18641 +                       .setauthsize = aead_setauthsize,
18642 +                       .encrypt = aead_encrypt,
18643 +                       .decrypt = aead_decrypt,
18644 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18645 +                       .maxauthsize = SHA256_DIGEST_SIZE,
18646 +               },
18647 +               .caam = {
18648 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18649 +                                          OP_ALG_AAI_CTR_MOD128,
18650 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18651 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18652 +                       .rfc3686 = true,
18653 +               },
18654 +       },
18655 +       {
18656 +               .aead = {
18657 +                       .base = {
18658 +                               .cra_name = "seqiv(authenc(hmac(sha256),"
18659 +                                           "rfc3686(ctr(aes))))",
18660 +                               .cra_driver_name = "seqiv-authenc-hmac-sha256-"
18661 +                                                  "rfc3686-ctr-aes-caam-qi2",
18662 +                               .cra_blocksize = 1,
18663 +                       },
18664 +                       .setkey = aead_setkey,
18665 +                       .setauthsize = aead_setauthsize,
18666 +                       .encrypt = aead_encrypt,
18667 +                       .decrypt = aead_decrypt,
18668 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18669 +                       .maxauthsize = SHA256_DIGEST_SIZE,
18670 +               },
18671 +               .caam = {
18672 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18673 +                                          OP_ALG_AAI_CTR_MOD128,
18674 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18675 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18676 +                       .rfc3686 = true,
18677 +                       .geniv = true,
18678 +               },
18679 +       },
18680 +       {
18681 +               .aead = {
18682 +                       .base = {
18683 +                               .cra_name = "authenc(hmac(sha384),"
18684 +                                           "rfc3686(ctr(aes)))",
18685 +                               .cra_driver_name = "authenc-hmac-sha384-"
18686 +                                                  "rfc3686-ctr-aes-caam-qi2",
18687 +                               .cra_blocksize = 1,
18688 +                       },
18689 +                       .setkey = aead_setkey,
18690 +                       .setauthsize = aead_setauthsize,
18691 +                       .encrypt = aead_encrypt,
18692 +                       .decrypt = aead_decrypt,
18693 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18694 +                       .maxauthsize = SHA384_DIGEST_SIZE,
18695 +               },
18696 +               .caam = {
18697 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18698 +                                          OP_ALG_AAI_CTR_MOD128,
18699 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18700 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18701 +                       .rfc3686 = true,
18702 +               },
18703 +       },
18704 +       {
18705 +               .aead = {
18706 +                       .base = {
18707 +                               .cra_name = "seqiv(authenc(hmac(sha384),"
18708 +                                           "rfc3686(ctr(aes))))",
18709 +                               .cra_driver_name = "seqiv-authenc-hmac-sha384-"
18710 +                                                  "rfc3686-ctr-aes-caam-qi2",
18711 +                               .cra_blocksize = 1,
18712 +                       },
18713 +                       .setkey = aead_setkey,
18714 +                       .setauthsize = aead_setauthsize,
18715 +                       .encrypt = aead_encrypt,
18716 +                       .decrypt = aead_decrypt,
18717 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18718 +                       .maxauthsize = SHA384_DIGEST_SIZE,
18719 +               },
18720 +               .caam = {
18721 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18722 +                                          OP_ALG_AAI_CTR_MOD128,
18723 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18724 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18725 +                       .rfc3686 = true,
18726 +                       .geniv = true,
18727 +               },
18728 +       },
18729 +       {
18730 +               .aead = {
18731 +                       .base = {
18732 +                               .cra_name = "authenc(hmac(sha512),"
18733 +                                           "rfc3686(ctr(aes)))",
18734 +                               .cra_driver_name = "authenc-hmac-sha512-"
18735 +                                                  "rfc3686-ctr-aes-caam-qi2",
18736 +                               .cra_blocksize = 1,
18737 +                       },
18738 +                       .setkey = aead_setkey,
18739 +                       .setauthsize = aead_setauthsize,
18740 +                       .encrypt = aead_encrypt,
18741 +                       .decrypt = aead_decrypt,
18742 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18743 +                       .maxauthsize = SHA512_DIGEST_SIZE,
18744 +               },
18745 +               .caam = {
18746 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18747 +                                          OP_ALG_AAI_CTR_MOD128,
18748 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18749 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18750 +                       .rfc3686 = true,
18751 +               },
18752 +       },
18753 +       {
18754 +               .aead = {
18755 +                       .base = {
18756 +                               .cra_name = "seqiv(authenc(hmac(sha512),"
18757 +                                           "rfc3686(ctr(aes))))",
18758 +                               .cra_driver_name = "seqiv-authenc-hmac-sha512-"
18759 +                                                  "rfc3686-ctr-aes-caam-qi2",
18760 +                               .cra_blocksize = 1,
18761 +                       },
18762 +                       .setkey = aead_setkey,
18763 +                       .setauthsize = aead_setauthsize,
18764 +                       .encrypt = aead_encrypt,
18765 +                       .decrypt = aead_decrypt,
18766 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18767 +                       .maxauthsize = SHA512_DIGEST_SIZE,
18768 +               },
18769 +               .caam = {
18770 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18771 +                                          OP_ALG_AAI_CTR_MOD128,
18772 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18773 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18774 +                       .rfc3686 = true,
18775 +                       .geniv = true,
18776 +               },
18777 +       },
18778 +       {
18779 +               .aead = {
18780 +                       .base = {
18781 +                               .cra_name = "tls10(hmac(sha1),cbc(aes))",
18782 +                               .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2",
18783 +                               .cra_blocksize = AES_BLOCK_SIZE,
18784 +                       },
18785 +                       .setkey = tls_setkey,
18786 +                       .setauthsize = tls_setauthsize,
18787 +                       .encrypt = tls_encrypt,
18788 +                       .decrypt = tls_decrypt,
18789 +                       .ivsize = AES_BLOCK_SIZE,
18790 +                       .maxauthsize = SHA1_DIGEST_SIZE,
18791 +               },
18792 +               .caam = {
18793 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
18794 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18795 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18796 +               },
18797 +       },
18798 +};
18799 +
18800 +static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
18801 +                                             *template)
18802 +{
18803 +       struct caam_crypto_alg *t_alg;
18804 +       struct crypto_alg *alg;
18805 +
18806 +       t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
18807 +       if (!t_alg)
18808 +               return ERR_PTR(-ENOMEM);
18809 +
18810 +       alg = &t_alg->crypto_alg;
18811 +
18812 +       snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
18813 +       snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
18814 +                template->driver_name);
18815 +       alg->cra_module = THIS_MODULE;
18816 +       alg->cra_exit = caam_cra_exit;
18817 +       alg->cra_priority = CAAM_CRA_PRIORITY;
18818 +       alg->cra_blocksize = template->blocksize;
18819 +       alg->cra_alignmask = 0;
18820 +       alg->cra_ctxsize = sizeof(struct caam_ctx);
18821 +       alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
18822 +                        template->type;
18823 +       switch (template->type) {
18824 +       case CRYPTO_ALG_TYPE_GIVCIPHER:
18825 +               alg->cra_init = caam_cra_init_ablkcipher;
18826 +               alg->cra_type = &crypto_givcipher_type;
18827 +               alg->cra_ablkcipher = template->template_ablkcipher;
18828 +               break;
18829 +       case CRYPTO_ALG_TYPE_ABLKCIPHER:
18830 +               alg->cra_init = caam_cra_init_ablkcipher;
18831 +               alg->cra_type = &crypto_ablkcipher_type;
18832 +               alg->cra_ablkcipher = template->template_ablkcipher;
18833 +               break;
18834 +       }
18835 +
18836 +       t_alg->caam.class1_alg_type = template->class1_alg_type;
18837 +       t_alg->caam.class2_alg_type = template->class2_alg_type;
18838 +
18839 +       return t_alg;
18840 +}
18841 +
18842 +static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
18843 +{
18844 +       struct aead_alg *alg = &t_alg->aead;
18845 +
18846 +       alg->base.cra_module = THIS_MODULE;
18847 +       alg->base.cra_priority = CAAM_CRA_PRIORITY;
18848 +       alg->base.cra_ctxsize = sizeof(struct caam_ctx);
18849 +       alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
18850 +
18851 +       alg->init = caam_cra_init_aead;
18852 +       alg->exit = caam_cra_exit_aead;
18853 +}
18854 +
18855 +static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
18856 +{
18857 +       struct dpaa2_caam_priv_per_cpu *ppriv;
18858 +
18859 +       ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
18860 +       napi_schedule_irqoff(&ppriv->napi);
18861 +}
18862 +
18863 +static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
18864 +{
18865 +       struct device *dev = priv->dev;
18866 +       struct dpaa2_io_notification_ctx *nctx;
18867 +       struct dpaa2_caam_priv_per_cpu *ppriv;
18868 +       int err, i = 0, cpu;
18869 +
18870 +       for_each_online_cpu(cpu) {
18871 +               ppriv = per_cpu_ptr(priv->ppriv, cpu);
18872 +               ppriv->priv = priv;
18873 +               nctx = &ppriv->nctx;
18874 +               nctx->is_cdan = 0;
18875 +               nctx->id = ppriv->rsp_fqid;
18876 +               nctx->desired_cpu = cpu;
18877 +               nctx->cb = dpaa2_caam_fqdan_cb;
18878 +
18879 +               /* Register notification callbacks */
18880 +               err = dpaa2_io_service_register(NULL, nctx);
18881 +               if (unlikely(err)) {
18882 +                       dev_err(dev, "notification register failed\n");
18883 +                       nctx->cb = NULL;
18884 +                       goto err;
18885 +               }
18886 +
18887 +               ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
18888 +                                                    dev);
18889 +               if (unlikely(!ppriv->store)) {
18890 +                       dev_err(dev, "dpaa2_io_store_create() failed\n");
18891 +                       goto err;
18892 +               }
18893 +
18894 +               if (++i == priv->num_pairs)
18895 +                       break;
18896 +       }
18897 +
18898 +       return 0;
18899 +
18900 +err:
18901 +       for_each_online_cpu(cpu) {
18902 +               ppriv = per_cpu_ptr(priv->ppriv, cpu);
18903 +               if (!ppriv->nctx.cb)
18904 +                       break;
18905 +               dpaa2_io_service_deregister(NULL, &ppriv->nctx);
18906 +       }
18907 +
18908 +       for_each_online_cpu(cpu) {
18909 +               ppriv = per_cpu_ptr(priv->ppriv, cpu);
18910 +               if (!ppriv->store)
18911 +                       break;
18912 +               dpaa2_io_store_destroy(ppriv->store);
18913 +       }
18914 +
18915 +       return err;
18916 +}
18917 +
18918 +static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
18919 +{
18920 +       struct dpaa2_caam_priv_per_cpu *ppriv;
18921 +       int i = 0, cpu;
18922 +
18923 +       for_each_online_cpu(cpu) {
18924 +               ppriv = per_cpu_ptr(priv->ppriv, cpu);
18925 +               dpaa2_io_service_deregister(NULL, &ppriv->nctx);
18926 +               dpaa2_io_store_destroy(ppriv->store);
18927 +
18928 +               if (++i == priv->num_pairs)
18929 +                       return;
18930 +       }
18931 +}
18932 +
18933 +static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
18934 +{
18935 +       struct dpseci_rx_queue_cfg rx_queue_cfg;
18936 +       struct device *dev = priv->dev;
18937 +       struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
18938 +       struct dpaa2_caam_priv_per_cpu *ppriv;
18939 +       int err = 0, i = 0, cpu;
18940 +
18941 +       /* Configure Rx queues */
18942 +       for_each_online_cpu(cpu) {
18943 +               ppriv = per_cpu_ptr(priv->ppriv, cpu);
18944 +
18945 +               rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
18946 +                                      DPSECI_QUEUE_OPT_USER_CTX;
18947 +               rx_queue_cfg.order_preservation_en = 0;
18948 +               rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
18949 +               rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
18950 +               /*
18951 +                * Rx priority (WQ) doesn't really matter, since we use
18952 +                * pull mode, i.e. volatile dequeues from specific FQs
18953 +                */
18954 +               rx_queue_cfg.dest_cfg.priority = 0;
18955 +               rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
18956 +
18957 +               err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
18958 +                                         &rx_queue_cfg);
18959 +               if (err) {
18960 +                       dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
18961 +                               err);
18962 +                       return err;
18963 +               }
18964 +
18965 +               if (++i == priv->num_pairs)
18966 +                       break;
18967 +       }
18968 +
18969 +       return err;
18970 +}
18971 +
18972 +static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
18973 +{
18974 +       struct device *dev = priv->dev;
18975 +
18976 +       if (!priv->cscn_mem)
18977 +               return;
18978 +
18979 +       dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
18980 +       kfree(priv->cscn_mem);
18981 +}
18982 +
18983 +static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
18984 +{
18985 +       struct device *dev = priv->dev;
18986 +       struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
18987 +
18988 +       dpaa2_dpseci_congestion_free(priv);
18989 +       dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
18990 +}
18991 +
18992 +static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
18993 +                                 const struct dpaa2_fd *fd)
18994 +{
18995 +       struct caam_request *req;
18996 +       u32 fd_err;
18997 +
18998 +       if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
18999 +               dev_err(priv->dev, "Only Frame List FD format is supported!\n");
19000 +               return;
19001 +       }
19002 +
19003 +       fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
19004 +       if (unlikely(fd_err))
19005 +               dev_err(priv->dev, "FD error: %08x\n", fd_err);
19006 +
19007 +       /*
19008 +        * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
19009 +        * in FD[ERR] or FD[FRC].
19010 +        */
19011 +       req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
19012 +       dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
19013 +                        DMA_BIDIRECTIONAL);
19014 +       req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
19015 +}
19016 +
19017 +static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
19018 +{
19019 +       int err;
19020 +
19021 +       /* Retry while portal is busy */
19022 +       do {
19023 +               err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
19024 +                                              ppriv->store);
19025 +       } while (err == -EBUSY);
19026 +
19027 +       if (unlikely(err))
19028 +               dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
19029 +
19030 +       return err;
19031 +}
19032 +
19033 +static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
19034 +{
19035 +       struct dpaa2_dq *dq;
19036 +       int cleaned = 0, is_last;
19037 +
19038 +       do {
19039 +               dq = dpaa2_io_store_next(ppriv->store, &is_last);
19040 +               if (unlikely(!dq)) {
19041 +                       if (unlikely(!is_last)) {
19042 +                               dev_dbg(ppriv->priv->dev,
19043 +                                       "FQ %d returned no valid frames\n",
19044 +                                       ppriv->rsp_fqid);
19045 +                               /*
19046 +                                * MUST retry until we get some sort of
19047 +                                * valid response token (be it "empty dequeue"
19048 +                                * or a valid frame).
19049 +                                */
19050 +                               continue;
19051 +                       }
19052 +                       break;
19053 +               }
19054 +
19055 +               /* Process FD */
19056 +               dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
19057 +               cleaned++;
19058 +       } while (!is_last);
19059 +
19060 +       return cleaned;
19061 +}
19062 +
19063 +static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
19064 +{
19065 +       struct dpaa2_caam_priv_per_cpu *ppriv;
19066 +       struct dpaa2_caam_priv *priv;
19067 +       int err, cleaned = 0, store_cleaned;
19068 +
19069 +       ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
19070 +       priv = ppriv->priv;
19071 +
19072 +       if (unlikely(dpaa2_caam_pull_fq(ppriv)))
19073 +               return 0;
19074 +
19075 +       do {
19076 +               store_cleaned = dpaa2_caam_store_consume(ppriv);
19077 +               cleaned += store_cleaned;
19078 +
19079 +               if (store_cleaned == 0 ||
19080 +                   cleaned > budget - DPAA2_CAAM_STORE_SIZE)
19081 +                       break;
19082 +
19083 +               /* Try to dequeue some more */
19084 +               err = dpaa2_caam_pull_fq(ppriv);
19085 +               if (unlikely(err))
19086 +                       break;
19087 +       } while (1);
19088 +
19089 +       if (cleaned < budget) {
19090 +               napi_complete_done(napi, cleaned);
19091 +               err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
19092 +               if (unlikely(err))
19093 +                       dev_err(priv->dev, "Notification rearm failed: %d\n",
19094 +                               err);
19095 +       }
19096 +
19097 +       return cleaned;
19098 +}
19099 +
19100 +static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
19101 +                                        u16 token)
19102 +{
19103 +       struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
19104 +       struct device *dev = priv->dev;
19105 +       int err;
19106 +
19107 +       /*
19108 +        * Congestion group feature supported starting with DPSECI API v5.1
19109 +        * and only when object has been created with this capability.
19110 +        */
19111 +       if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
19112 +           !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
19113 +               return 0;
19114 +
19115 +       priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
19116 +                                GFP_KERNEL | GFP_DMA);
19117 +       if (!priv->cscn_mem)
19118 +               return -ENOMEM;
19119 +
19120 +       priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
19121 +       priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
19122 +                                       DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
19123 +       if (dma_mapping_error(dev, priv->cscn_dma)) {
19124 +               dev_err(dev, "Error mapping CSCN memory area\n");
19125 +               err = -ENOMEM;
19126 +               goto err_dma_map;
19127 +       }
19128 +
19129 +       cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
19130 +       cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
19131 +       cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
19132 +       cong_notif_cfg.message_ctx = (u64)priv;
19133 +       cong_notif_cfg.message_iova = priv->cscn_dma;
19134 +       cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
19135 +                                       DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
19136 +                                       DPSECI_CGN_MODE_COHERENT_WRITE;
19137 +
19138 +       err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
19139 +                                                &cong_notif_cfg);
19140 +       if (err) {
19141 +               dev_err(dev, "dpseci_set_congestion_notification failed\n");
19142 +               goto err_set_cong;
19143 +       }
19144 +
19145 +       return 0;
19146 +
19147 +err_set_cong:
19148 +       dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
19149 +err_dma_map:
19150 +       kfree(priv->cscn_mem);
19151 +
19152 +       return err;
19153 +}
19154 +
19155 +static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
19156 +{
19157 +       struct device *dev = &ls_dev->dev;
19158 +       struct dpaa2_caam_priv *priv;
19159 +       struct dpaa2_caam_priv_per_cpu *ppriv;
19160 +       int err, cpu;
19161 +       u8 i;
19162 +
19163 +       priv = dev_get_drvdata(dev);
19164 +
19165 +       priv->dev = dev;
19166 +       priv->dpsec_id = ls_dev->obj_desc.id;
19167 +
19168 +       /* Get a handle for the DPSECI this interface is associate with */
19169 +       err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
19170 +       if (err) {
19171 +               dev_err(dev, "dpsec_open() failed: %d\n", err);
19172 +               goto err_open;
19173 +       }
19174 +
19175 +       dev_info(dev, "Opened dpseci object successfully\n");
19176 +
19177 +       err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
19178 +                                    &priv->minor_ver);
19179 +       if (err) {
19180 +               dev_err(dev, "dpseci_get_api_version() failed\n");
19181 +               goto err_get_vers;
19182 +       }
19183 +
19184 +       err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
19185 +                                   &priv->dpseci_attr);
19186 +       if (err) {
19187 +               dev_err(dev, "dpseci_get_attributes() failed\n");
19188 +               goto err_get_vers;
19189 +       }
19190 +
19191 +       err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
19192 +                                 &priv->sec_attr);
19193 +       if (err) {
19194 +               dev_err(dev, "dpseci_get_sec_attr() failed\n");
19195 +               goto err_get_vers;
19196 +       }
19197 +
19198 +       err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
19199 +       if (err) {
19200 +               dev_err(dev, "setup_congestion() failed\n");
19201 +               goto err_get_vers;
19202 +       }
19203 +
19204 +       priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
19205 +                             priv->dpseci_attr.num_tx_queues);
19206 +       if (priv->num_pairs > num_online_cpus()) {
19207 +               dev_warn(dev, "%d queues won't be used\n",
19208 +                        priv->num_pairs - num_online_cpus());
19209 +               priv->num_pairs = num_online_cpus();
19210 +       }
19211 +
19212 +       for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
19213 +               err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
19214 +                                         &priv->rx_queue_attr[i]);
19215 +               if (err) {
19216 +                       dev_err(dev, "dpseci_get_rx_queue() failed\n");
19217 +                       goto err_get_rx_queue;
19218 +               }
19219 +       }
19220 +
19221 +       for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
19222 +               err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
19223 +                                         &priv->tx_queue_attr[i]);
19224 +               if (err) {
19225 +                       dev_err(dev, "dpseci_get_tx_queue() failed\n");
19226 +                       goto err_get_rx_queue;
19227 +               }
19228 +       }
19229 +
19230 +       i = 0;
19231 +       for_each_online_cpu(cpu) {
19232 +               dev_info(dev, "prio %d: rx queue %d, tx queue %d\n", i,
19233 +                        priv->rx_queue_attr[i].fqid,
19234 +                        priv->tx_queue_attr[i].fqid);
19235 +
19236 +               ppriv = per_cpu_ptr(priv->ppriv, cpu);
19237 +               ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
19238 +               ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
19239 +               ppriv->prio = i;
19240 +
19241 +               ppriv->net_dev.dev = *dev;
19242 +               INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
19243 +               netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
19244 +                              DPAA2_CAAM_NAPI_WEIGHT);
19245 +               if (++i == priv->num_pairs)
19246 +                       break;
19247 +       }
19248 +
19249 +       return 0;
19250 +
19251 +err_get_rx_queue:
19252 +       dpaa2_dpseci_congestion_free(priv);
19253 +err_get_vers:
19254 +       dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
19255 +err_open:
19256 +       return err;
19257 +}
19258 +
19259 +static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
19260 +{
19261 +       struct device *dev = priv->dev;
19262 +       struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
19263 +       struct dpaa2_caam_priv_per_cpu *ppriv;
19264 +       int err, i;
19265 +
19266 +       for (i = 0; i < priv->num_pairs; i++) {
19267 +               ppriv = per_cpu_ptr(priv->ppriv, i);
19268 +               napi_enable(&ppriv->napi);
19269 +       }
19270 +
19271 +       err = dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
19272 +       if (err) {
19273 +               dev_err(dev, "dpseci_enable() failed\n");
19274 +               return err;
19275 +       }
19276 +
19277 +       dev_info(dev, "DPSECI version %d.%d\n",
19278 +                priv->major_ver,
19279 +                priv->minor_ver);
19280 +
19281 +       return 0;
19282 +}
19283 +
19284 +static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
19285 +{
19286 +       struct device *dev = priv->dev;
19287 +       struct dpaa2_caam_priv_per_cpu *ppriv;
19288 +       struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
19289 +       int i, err = 0, enabled;
19290 +
19291 +       err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
19292 +       if (err) {
19293 +               dev_err(dev, "dpseci_disable() failed\n");
19294 +               return err;
19295 +       }
19296 +
19297 +       err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
19298 +       if (err) {
19299 +               dev_err(dev, "dpseci_is_enabled() failed\n");
19300 +               return err;
19301 +       }
19302 +
19303 +       dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
19304 +
19305 +       for (i = 0; i < priv->num_pairs; i++) {
19306 +               ppriv = per_cpu_ptr(priv->ppriv, i);
19307 +               napi_disable(&ppriv->napi);
19308 +               netif_napi_del(&ppriv->napi);
19309 +       }
19310 +
19311 +       return 0;
19312 +}
19313 +
19314 +static struct list_head alg_list;
19315 +
19316 +static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
19317 +{
19318 +       struct device *dev;
19319 +       struct dpaa2_caam_priv *priv;
19320 +       int i, err = 0;
19321 +       bool registered = false;
19322 +
19323 +       /*
19324 +        * There is no way to get CAAM endianness - there is no direct register
19325 +        * space access and MC f/w does not provide this attribute.
19326 +        * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
19327 +        * property.
19328 +        */
19329 +       caam_little_end = true;
19330 +
19331 +       caam_imx = false;
19332 +
19333 +       dev = &dpseci_dev->dev;
19334 +
19335 +       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
19336 +       if (!priv)
19337 +               return -ENOMEM;
19338 +
19339 +       dev_set_drvdata(dev, priv);
19340 +
19341 +       priv->domain = iommu_get_domain_for_dev(dev);
19342 +
19343 +       qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
19344 +                                    0, SLAB_CACHE_DMA, NULL);
19345 +       if (!qi_cache) {
19346 +               dev_err(dev, "Can't allocate SEC cache\n");
19347 +               err = -ENOMEM;
19348 +               goto err_qicache;
19349 +       }
19350 +
19351 +       err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
19352 +       if (err) {
19353 +               dev_err(dev, "dma_set_mask_and_coherent() failed\n");
19354 +               goto err_dma_mask;
19355 +       }
19356 +
19357 +       /* Obtain a MC portal */
19358 +       err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
19359 +       if (err) {
19360 +               dev_err(dev, "MC portal allocation failed\n");
19361 +               goto err_dma_mask;
19362 +       }
19363 +
19364 +       priv->ppriv = alloc_percpu(*priv->ppriv);
19365 +       if (!priv->ppriv) {
19366 +               dev_err(dev, "alloc_percpu() failed\n");
19367 +               goto err_alloc_ppriv;
19368 +       }
19369 +
19370 +       /* DPSECI initialization */
19371 +       err = dpaa2_dpseci_setup(dpseci_dev);
19372 +       if (err < 0) {
19373 +               dev_err(dev, "dpaa2_dpseci_setup() failed\n");
19374 +               goto err_dpseci_setup;
19375 +       }
19376 +
19377 +       /* DPIO */
19378 +       err = dpaa2_dpseci_dpio_setup(priv);
19379 +       if (err) {
19380 +               dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
19381 +               goto err_dpio_setup;
19382 +       }
19383 +
19384 +       /* DPSECI binding to DPIO */
19385 +       err = dpaa2_dpseci_bind(priv);
19386 +       if (err) {
19387 +               dev_err(dev, "dpaa2_dpseci_bind() failed\n");
19388 +               goto err_bind;
19389 +       }
19390 +
19391 +       /* DPSECI enable */
19392 +       err = dpaa2_dpseci_enable(priv);
19393 +       if (err) {
19394 +               dev_err(dev, "dpaa2_dpseci_enable() failed");
19395 +               goto err_bind;
19396 +       }
19397 +
19398 +       /* register crypto algorithms the device supports */
19399 +       INIT_LIST_HEAD(&alg_list);
19400 +       for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
19401 +               struct caam_crypto_alg *t_alg;
19402 +               struct caam_alg_template *alg = driver_algs + i;
19403 +               u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
19404 +
19405 +               /* Skip DES algorithms if not supported by device */
19406 +               if (!priv->sec_attr.des_acc_num &&
19407 +                   ((alg_sel == OP_ALG_ALGSEL_3DES) ||
19408 +                    (alg_sel == OP_ALG_ALGSEL_DES)))
19409 +                       continue;
19410 +
19411 +               /* Skip AES algorithms if not supported by device */
19412 +               if (!priv->sec_attr.aes_acc_num &&
19413 +                   (alg_sel == OP_ALG_ALGSEL_AES))
19414 +                       continue;
19415 +
19416 +               t_alg = caam_alg_alloc(alg);
19417 +               if (IS_ERR(t_alg)) {
19418 +                       err = PTR_ERR(t_alg);
19419 +                       dev_warn(dev, "%s alg allocation failed: %d\n",
19420 +                                alg->driver_name, err);
19421 +                       continue;
19422 +               }
19423 +               t_alg->caam.dev = dev;
19424 +
19425 +               err = crypto_register_alg(&t_alg->crypto_alg);
19426 +               if (err) {
19427 +                       dev_warn(dev, "%s alg registration failed: %d\n",
19428 +                                t_alg->crypto_alg.cra_driver_name, err);
19429 +                       kfree(t_alg);
19430 +                       continue;
19431 +               }
19432 +
19433 +               list_add_tail(&t_alg->entry, &alg_list);
19434 +               registered = true;
19435 +       }
19436 +
19437 +       for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
19438 +               struct caam_aead_alg *t_alg = driver_aeads + i;
19439 +               u32 c1_alg_sel = t_alg->caam.class1_alg_type &
19440 +                                OP_ALG_ALGSEL_MASK;
19441 +               u32 c2_alg_sel = t_alg->caam.class2_alg_type &
19442 +                                OP_ALG_ALGSEL_MASK;
19443 +
19444 +               /* Skip DES algorithms if not supported by device */
19445 +               if (!priv->sec_attr.des_acc_num &&
19446 +                   ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
19447 +                    (c1_alg_sel == OP_ALG_ALGSEL_DES)))
19448 +                       continue;
19449 +
19450 +               /* Skip AES algorithms if not supported by device */
19451 +               if (!priv->sec_attr.aes_acc_num &&
19452 +                   (c1_alg_sel == OP_ALG_ALGSEL_AES))
19453 +                       continue;
19454 +
19455 +               /*
19456 +                * Skip algorithms requiring message digests
19457 +                * if MD not supported by device.
19458 +                */
19459 +               if (!priv->sec_attr.md_acc_num && c2_alg_sel)
19460 +                       continue;
19461 +
19462 +               t_alg->caam.dev = dev;
19463 +               caam_aead_alg_init(t_alg);
19464 +
19465 +               err = crypto_register_aead(&t_alg->aead);
19466 +               if (err) {
19467 +                       dev_warn(dev, "%s alg registration failed: %d\n",
19468 +                                t_alg->aead.base.cra_driver_name, err);
19469 +                       continue;
19470 +               }
19471 +
19472 +               t_alg->registered = true;
19473 +               registered = true;
19474 +       }
19475 +       if (registered)
19476 +               dev_info(dev, "algorithms registered in /proc/crypto\n");
19477 +
19478 +       return err;
19479 +
19480 +err_bind:
19481 +       dpaa2_dpseci_dpio_free(priv);
19482 +err_dpio_setup:
19483 +       dpaa2_dpseci_free(priv);
19484 +err_dpseci_setup:
19485 +       free_percpu(priv->ppriv);
19486 +err_alloc_ppriv:
19487 +       fsl_mc_portal_free(priv->mc_io);
19488 +err_dma_mask:
19489 +       kmem_cache_destroy(qi_cache);
19490 +err_qicache:
19491 +       dev_set_drvdata(dev, NULL);
19492 +
19493 +       return err;
19494 +}
19495 +
19496 +static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
19497 +{
19498 +       struct device *dev;
19499 +       struct dpaa2_caam_priv *priv;
19500 +       int i;
19501 +
19502 +       dev = &ls_dev->dev;
19503 +       priv = dev_get_drvdata(dev);
19504 +
19505 +       for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
19506 +               struct caam_aead_alg *t_alg = driver_aeads + i;
19507 +
19508 +               if (t_alg->registered)
19509 +                       crypto_unregister_aead(&t_alg->aead);
19510 +       }
19511 +
19512 +       if (alg_list.next) {
19513 +               struct caam_crypto_alg *t_alg, *n;
19514 +
19515 +               list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
19516 +                       crypto_unregister_alg(&t_alg->crypto_alg);
19517 +                       list_del(&t_alg->entry);
19518 +                       kfree(t_alg);
19519 +               }
19520 +       }
19521 +
19522 +       dpaa2_dpseci_disable(priv);
19523 +       dpaa2_dpseci_dpio_free(priv);
19524 +       dpaa2_dpseci_free(priv);
19525 +       free_percpu(priv->ppriv);
19526 +       fsl_mc_portal_free(priv->mc_io);
19527 +       dev_set_drvdata(dev, NULL);
19528 +       kmem_cache_destroy(qi_cache);
19529 +
19530 +       return 0;
19531 +}
19532 +
19533 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
19534 +{
19535 +       struct dpaa2_fd fd;
19536 +       struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
19537 +       int err = 0, i, id;
19538 +
19539 +       if (IS_ERR(req))
19540 +               return PTR_ERR(req);
19541 +
19542 +       if (priv->cscn_mem) {
19543 +               dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
19544 +                                       DPAA2_CSCN_SIZE,
19545 +                                       DMA_FROM_DEVICE);
19546 +               if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
19547 +                       dev_dbg_ratelimited(dev, "Dropping request\n");
19548 +                       return -EBUSY;
19549 +               }
19550 +       }
19551 +
19552 +       dpaa2_fl_set_flc(&req->fd_flt[1], req->flc->flc_dma);
19553 +
19554 +       req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
19555 +                                        DMA_BIDIRECTIONAL);
19556 +       if (dma_mapping_error(dev, req->fd_flt_dma)) {
19557 +               dev_err(dev, "DMA mapping error for QI enqueue request\n");
19558 +               goto err_out;
19559 +       }
19560 +
19561 +       memset(&fd, 0, sizeof(fd));
19562 +       dpaa2_fd_set_format(&fd, dpaa2_fd_list);
19563 +       dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
19564 +       dpaa2_fd_set_len(&fd, req->fd_flt[1].len);
19565 +       dpaa2_fd_set_flc(&fd, req->flc->flc_dma);
19566 +
19567 +       /*
19568 +        * There is no guarantee that preemption is disabled here,
19569 +        * thus take action.
19570 +        */
19571 +       preempt_disable();
19572 +       id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
19573 +       for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
19574 +               err = dpaa2_io_service_enqueue_fq(NULL,
19575 +                                                 priv->tx_queue_attr[id].fqid,
19576 +                                                 &fd);
19577 +               if (err != -EBUSY)
19578 +                       break;
19579 +       }
19580 +       preempt_enable();
19581 +
19582 +       if (unlikely(err < 0)) {
19583 +               dev_err(dev, "Error enqueuing frame: %d\n", err);
19584 +               goto err_out;
19585 +       }
19586 +
19587 +       return -EINPROGRESS;
19588 +
19589 +err_out:
19590 +       dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
19591 +                        DMA_BIDIRECTIONAL);
19592 +       return -EIO;
19593 +}
19594 +EXPORT_SYMBOL(dpaa2_caam_enqueue);
19595 +
19596 +const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
19597 +       {
19598 +               .vendor = FSL_MC_VENDOR_FREESCALE,
19599 +               .obj_type = "dpseci",
19600 +       },
19601 +       { .vendor = 0x0 }
19602 +};
19603 +
19604 +static struct fsl_mc_driver dpaa2_caam_driver = {
19605 +       .driver = {
19606 +               .name           = KBUILD_MODNAME,
19607 +               .owner          = THIS_MODULE,
19608 +       },
19609 +       .probe          = dpaa2_caam_probe,
19610 +       .remove         = dpaa2_caam_remove,
19611 +       .match_id_table = dpaa2_caam_match_id_table
19612 +};
19613 +
19614 +MODULE_LICENSE("Dual BSD/GPL");
19615 +MODULE_AUTHOR("Freescale Semiconductor, Inc");
19616 +MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
19617 +
19618 +module_fsl_mc_driver(dpaa2_caam_driver);
19619 diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
19620 new file mode 100644
19621 index 00000000..2ba179db
19622 --- /dev/null
19623 +++ b/drivers/crypto/caam/caamalg_qi2.h
19624 @@ -0,0 +1,265 @@
19625 +/*
19626 + * Copyright 2015-2016 Freescale Semiconductor Inc.
19627 + * Copyright 2017 NXP
19628 + *
19629 + * Redistribution and use in source and binary forms, with or without
19630 + * modification, are permitted provided that the following conditions are met:
19631 + *     * Redistributions of source code must retain the above copyright
19632 + *      notice, this list of conditions and the following disclaimer.
19633 + *     * Redistributions in binary form must reproduce the above copyright
19634 + *      notice, this list of conditions and the following disclaimer in the
19635 + *      documentation and/or other materials provided with the distribution.
19636 + *     * Neither the names of the above-listed copyright holders nor the
19637 + *      names of any contributors may be used to endorse or promote products
19638 + *      derived from this software without specific prior written permission.
19639 + *
19640 + *
19641 + * ALTERNATIVELY, this software may be distributed under the terms of the
19642 + * GNU General Public License ("GPL") as published by the Free Software
19643 + * Foundation, either version 2 of that License or (at your option) any
19644 + * later version.
19645 + *
19646 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19647 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19648 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19649 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
19650 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19651 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19652 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19653 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
19654 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
19655 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
19656 + * POSSIBILITY OF SUCH DAMAGE.
19657 + */
19658 +
19659 +#ifndef _CAAMALG_QI2_H_
19660 +#define _CAAMALG_QI2_H_
19661 +
19662 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
19663 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
19664 +#include <linux/threads.h>
19665 +#include "dpseci.h"
19666 +#include "desc_constr.h"
19667 +
19668 +#define DPAA2_CAAM_STORE_SIZE  16
19669 +/* NAPI weight *must* be a multiple of the store size. */
19670 +#define DPAA2_CAAM_NAPI_WEIGHT 64
19671 +
19672 +/* The congestion entrance threshold was chosen so that on LS2088
19673 + * we support the maximum throughput for the available memory
19674 + */
19675 +#define DPAA2_SEC_CONG_ENTRY_THRESH    (128 * 1024 * 1024)
19676 +#define DPAA2_SEC_CONG_EXIT_THRESH     (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10)
19677 +
19678 +/**
19679 + * dpaa2_caam_priv - driver private data
19680 + * @dpseci_id: DPSECI object unique ID
19681 + * @major_ver: DPSECI major version
19682 + * @minor_ver: DPSECI minor version
19683 + * @dpseci_attr: DPSECI attributes
19684 + * @sec_attr: SEC engine attributes
19685 + * @rx_queue_attr: array of Rx queue attributes
19686 + * @tx_queue_attr: array of Tx queue attributes
19687 + * @cscn_mem: pointer to memory region containing the
19688 + *     dpaa2_cscn struct; it's size is larger than
19689 + *     sizeof(struct dpaa2_cscn) to accommodate alignment
19690 + * @cscn_mem_aligned: pointer to struct dpaa2_cscn; it is computed
19691 + *     as PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
19692 + * @cscn_dma: dma address used by the QMAN to write CSCN messages
19693 + * @dev: device associated with the DPSECI object
19694 + * @mc_io: pointer to MC portal's I/O object
19695 + * @domain: IOMMU domain
19696 + * @ppriv: per CPU pointers to privata data
19697 + */
19698 +struct dpaa2_caam_priv {
19699 +       int dpsec_id;
19700 +
19701 +       u16 major_ver;
19702 +       u16 minor_ver;
19703 +
19704 +       struct dpseci_attr dpseci_attr;
19705 +       struct dpseci_sec_attr sec_attr;
19706 +       struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_PRIO_NUM];
19707 +       struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_PRIO_NUM];
19708 +       int num_pairs;
19709 +
19710 +       /* congestion */
19711 +       void *cscn_mem;
19712 +       void *cscn_mem_aligned;
19713 +       dma_addr_t cscn_dma;
19714 +
19715 +       struct device *dev;
19716 +       struct fsl_mc_io *mc_io;
19717 +       struct iommu_domain *domain;
19718 +
19719 +       struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
19720 +};
19721 +
19722 +/**
19723 + * dpaa2_caam_priv_per_cpu - per CPU private data
19724 + * @napi: napi structure
19725 + * @net_dev: netdev used by napi
19726 + * @req_fqid: (virtual) request (Tx / enqueue) FQID
19727 + * @rsp_fqid: (virtual) response (Rx / dequeue) FQID
19728 + * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr
19729 + * @nctx: notification context of response FQ
19730 + * @store: where dequeued frames are stored
19731 + * @priv: backpointer to dpaa2_caam_priv
19732 + */
19733 +struct dpaa2_caam_priv_per_cpu {
19734 +       struct napi_struct napi;
19735 +       struct net_device net_dev;
19736 +       int req_fqid;
19737 +       int rsp_fqid;
19738 +       int prio;
19739 +       struct dpaa2_io_notification_ctx nctx;
19740 +       struct dpaa2_io_store *store;
19741 +       struct dpaa2_caam_priv *priv;
19742 +};
19743 +
19744 +/*
19745 + * The CAAM QI hardware constructs a job descriptor which points
19746 + * to shared descriptor (as pointed by context_a of FQ to CAAM).
19747 + * When the job descriptor is executed by deco, the whole job
19748 + * descriptor together with shared descriptor gets loaded in
19749 + * deco buffer which is 64 words long (each 32-bit).
19750 + *
19751 + * The job descriptor constructed by QI hardware has layout:
19752 + *
19753 + *     HEADER          (1 word)
19754 + *     Shdesc ptr      (1 or 2 words)
19755 + *     SEQ_OUT_PTR     (1 word)
19756 + *     Out ptr         (1 or 2 words)
19757 + *     Out length      (1 word)
19758 + *     SEQ_IN_PTR      (1 word)
19759 + *     In ptr          (1 or 2 words)
19760 + *     In length       (1 word)
19761 + *
19762 + * The shdesc ptr is used to fetch shared descriptor contents
19763 + * into deco buffer.
19764 + *
19765 + * Apart from shdesc contents, the total number of words that
19766 + * get loaded in deco buffer are '8' or '11'. The remaining words
19767 + * in deco buffer can be used for storing shared descriptor.
19768 + */
19769 +#define MAX_SDLEN      ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
19770 +
19771 +/* Length of a single buffer in the QI driver memory cache */
19772 +#define CAAM_QI_MEMCACHE_SIZE  512
19773 +
19774 +/*
19775 + * aead_edesc - s/w-extended aead descriptor
19776 + * @src_nents: number of segments in input scatterlist
19777 + * @dst_nents: number of segments in output scatterlist
19778 + * @iv_dma: dma address of iv for checking continuity and link table
19779 + * @qm_sg_bytes: length of dma mapped h/w link table
19780 + * @qm_sg_dma: bus physical mapped address of h/w link table
19781 + * @assoclen_dma: bus physical mapped address of req->assoclen
19782 + * @sgt: the h/w link table
19783 + */
19784 +struct aead_edesc {
19785 +       int src_nents;
19786 +       int dst_nents;
19787 +       dma_addr_t iv_dma;
19788 +       int qm_sg_bytes;
19789 +       dma_addr_t qm_sg_dma;
19790 +       dma_addr_t assoclen_dma;
19791 +#define CAAM_QI_MAX_AEAD_SG                                            \
19792 +       ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) /   \
19793 +        sizeof(struct dpaa2_sg_entry))
19794 +       struct dpaa2_sg_entry sgt[0];
19795 +};
19796 +
19797 +/*
19798 + * tls_edesc - s/w-extended tls descriptor
19799 + * @src_nents: number of segments in input scatterlist
19800 + * @dst_nents: number of segments in output scatterlist
19801 + * @iv_dma: dma address of iv for checking continuity and link table
19802 + * @qm_sg_bytes: length of dma mapped h/w link table
19803 + * @qm_sg_dma: bus physical mapped address of h/w link table
19804 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
19805 + * @dst: pointer to output scatterlist, usefull for unmapping
19806 + * @sgt: the h/w link table
19807 + */
19808 +struct tls_edesc {
19809 +       int src_nents;
19810 +       int dst_nents;
19811 +       dma_addr_t iv_dma;
19812 +       int qm_sg_bytes;
19813 +       dma_addr_t qm_sg_dma;
19814 +       struct scatterlist tmp[2];
19815 +       struct scatterlist *dst;
19816 +       struct dpaa2_sg_entry sgt[0];
19817 +};
19818 +
19819 +/*
19820 + * ablkcipher_edesc - s/w-extended ablkcipher descriptor
19821 + * @src_nents: number of segments in input scatterlist
19822 + * @dst_nents: number of segments in output scatterlist
19823 + * @iv_dma: dma address of iv for checking continuity and link table
19824 + * @qm_sg_bytes: length of dma mapped qm_sg space
19825 + * @qm_sg_dma: I/O virtual address of h/w link table
19826 + * @sgt: the h/w link table
19827 + */
19828 +struct ablkcipher_edesc {
19829 +       int src_nents;
19830 +       int dst_nents;
19831 +       dma_addr_t iv_dma;
19832 +       int qm_sg_bytes;
19833 +       dma_addr_t qm_sg_dma;
19834 +#define CAAM_QI_MAX_ABLKCIPHER_SG                                          \
19835 +       ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
19836 +        sizeof(struct dpaa2_sg_entry))
19837 +       struct dpaa2_sg_entry sgt[0];
19838 +};
19839 +
19840 +/**
19841 + * caam_flc - Flow Context (FLC)
19842 + * @flc: Flow Context options
19843 + * @sh_desc: Shared Descriptor
19844 + * @flc_dma: DMA address of the Flow Context
19845 + */
19846 +struct caam_flc {
19847 +       u32 flc[16];
19848 +       u32 sh_desc[MAX_SDLEN];
19849 +       dma_addr_t flc_dma;
19850 +} ____cacheline_aligned;
19851 +
19852 +enum optype {
19853 +       ENCRYPT = 0,
19854 +       DECRYPT,
19855 +       GIVENCRYPT,
19856 +       NUM_OP
19857 +};
19858 +
19859 +/**
19860 + * caam_request - the request structure the driver application should fill while
19861 + *                submitting a job to driver.
19862 + * @fd_flt: Frame list table defining input and output
19863 + *          fd_flt[0] - FLE pointing to output buffer
19864 + *          fd_flt[1] - FLE pointing to input buffer
19865 + * @fd_flt_dma: DMA address for the frame list table
19866 + * @flc: Flow Context
19867 + * @op_type: operation type
19868 + * @cbk: Callback function to invoke when job is completed
19869 + * @ctx: arbit context attached with request by the application
19870 + * @edesc: extended descriptor; points to one of {ablkcipher,aead}_edesc
19871 + */
19872 +struct caam_request {
19873 +       struct dpaa2_fl_entry fd_flt[2];
19874 +       dma_addr_t fd_flt_dma;
19875 +       struct caam_flc *flc;
19876 +       enum optype op_type;
19877 +       void (*cbk)(void *ctx, u32 err);
19878 +       void *ctx;
19879 +       void *edesc;
19880 +};
19881 +
19882 +/**
19883 + * dpaa2_caam_enqueue() - enqueue a crypto request
19884 + * @dev: device associated with the DPSECI object
19885 + * @req: pointer to caam_request
19886 + */
19887 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
19888 +
19889 +#endif /* _CAAMALG_QI2_H_ */
19890 diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
19891 index 631337c2..698580b6 100644
19892 --- a/drivers/crypto/caam/caamhash.c
19893 +++ b/drivers/crypto/caam/caamhash.c
19894 @@ -72,7 +72,7 @@
19895  #define CAAM_MAX_HASH_DIGEST_SIZE      SHA512_DIGEST_SIZE
19896  
19897  /* length of descriptors text */
19898 -#define DESC_AHASH_BASE                        (4 * CAAM_CMD_SZ)
19899 +#define DESC_AHASH_BASE                        (3 * CAAM_CMD_SZ)
19900  #define DESC_AHASH_UPDATE_LEN          (6 * CAAM_CMD_SZ)
19901  #define DESC_AHASH_UPDATE_FIRST_LEN    (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
19902  #define DESC_AHASH_FINAL_LEN           (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
19903 @@ -103,20 +103,14 @@ struct caam_hash_ctx {
19904         u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
19905         u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
19906         u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
19907 -       u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
19908         dma_addr_t sh_desc_update_dma ____cacheline_aligned;
19909         dma_addr_t sh_desc_update_first_dma;
19910         dma_addr_t sh_desc_fin_dma;
19911         dma_addr_t sh_desc_digest_dma;
19912 -       dma_addr_t sh_desc_finup_dma;
19913         struct device *jrdev;
19914 -       u32 alg_type;
19915 -       u32 alg_op;
19916         u8 key[CAAM_MAX_HASH_KEY_SIZE];
19917 -       dma_addr_t key_dma;
19918         int ctx_len;
19919 -       unsigned int split_key_len;
19920 -       unsigned int split_key_pad_len;
19921 +       struct alginfo adata;
19922  };
19923  
19924  /* ahash state */
19925 @@ -143,6 +137,31 @@ struct caam_export_state {
19926         int (*finup)(struct ahash_request *req);
19927  };
19928  
19929 +static inline void switch_buf(struct caam_hash_state *state)
19930 +{
19931 +       state->current_buf ^= 1;
19932 +}
19933 +
19934 +static inline u8 *current_buf(struct caam_hash_state *state)
19935 +{
19936 +       return state->current_buf ? state->buf_1 : state->buf_0;
19937 +}
19938 +
19939 +static inline u8 *alt_buf(struct caam_hash_state *state)
19940 +{
19941 +       return state->current_buf ? state->buf_0 : state->buf_1;
19942 +}
19943 +
19944 +static inline int *current_buflen(struct caam_hash_state *state)
19945 +{
19946 +       return state->current_buf ? &state->buflen_1 : &state->buflen_0;
19947 +}
19948 +
19949 +static inline int *alt_buflen(struct caam_hash_state *state)
19950 +{
19951 +       return state->current_buf ? &state->buflen_0 : &state->buflen_1;
19952 +}
19953 +
19954  /* Common job descriptor seq in/out ptr routines */
19955  
19956  /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
19957 @@ -175,36 +194,27 @@ static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
19958         return dst_dma;
19959  }
19960  
19961 -/* Map current buffer in state and put it in link table */
19962 -static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
19963 -                                           struct sec4_sg_entry *sec4_sg,
19964 -                                           u8 *buf, int buflen)
19965 +/* Map current buffer in state (if length > 0) and put it in link table */
19966 +static inline int buf_map_to_sec4_sg(struct device *jrdev,
19967 +                                    struct sec4_sg_entry *sec4_sg,
19968 +                                    struct caam_hash_state *state)
19969  {
19970 -       dma_addr_t buf_dma;
19971 +       int buflen = *current_buflen(state);
19972  
19973 -       buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
19974 -       dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
19975 +       if (!buflen)
19976 +               return 0;
19977  
19978 -       return buf_dma;
19979 -}
19980 +       state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
19981 +                                       DMA_TO_DEVICE);
19982 +       if (dma_mapping_error(jrdev, state->buf_dma)) {
19983 +               dev_err(jrdev, "unable to map buf\n");
19984 +               state->buf_dma = 0;
19985 +               return -ENOMEM;
19986 +       }
19987  
19988 -/*
19989 - * Only put buffer in link table if it contains data, which is possible,
19990 - * since a buffer has previously been used, and needs to be unmapped,
19991 - */
19992 -static inline dma_addr_t
19993 -try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
19994 -                      u8 *buf, dma_addr_t buf_dma, int buflen,
19995 -                      int last_buflen)
19996 -{
19997 -       if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
19998 -               dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
19999 -       if (buflen)
20000 -               buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
20001 -       else
20002 -               buf_dma = 0;
20003 -
20004 -       return buf_dma;
20005 +       dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
20006 +
20007 +       return 0;
20008  }
20009  
20010  /* Map state->caam_ctx, and add it to link table */
20011 @@ -224,89 +234,54 @@ static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
20012         return 0;
20013  }
20014  
20015 -/* Common shared descriptor commands */
20016 -static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
20017 -{
20018 -       append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
20019 -                         ctx->split_key_len, CLASS_2 |
20020 -                         KEY_DEST_MDHA_SPLIT | KEY_ENC);
20021 -}
20022 -
20023 -/* Append key if it has been set */
20024 -static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
20025 -{
20026 -       u32 *key_jump_cmd;
20027 -
20028 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
20029 -
20030 -       if (ctx->split_key_len) {
20031 -               /* Skip if already shared */
20032 -               key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
20033 -                                          JUMP_COND_SHRD);
20034 -
20035 -               append_key_ahash(desc, ctx);
20036 -
20037 -               set_jump_tgt_here(desc, key_jump_cmd);
20038 -       }
20039 -
20040 -       /* Propagate errors from shared to job descriptor */
20041 -       append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
20042 -}
20043 -
20044  /*
20045 - * For ahash read data from seqin following state->caam_ctx,
20046 - * and write resulting class2 context to seqout, which may be state->caam_ctx
20047 - * or req->result
20048 + * For ahash update, final and finup (import_ctx = true)
20049 + *     import context, read and write to seqout
20050 + * For ahash firsts and digest (import_ctx = false)
20051 + *     read and write to seqout
20052   */
20053 -static inline void ahash_append_load_str(u32 *desc, int digestsize)
20054 +static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
20055 +                                    struct caam_hash_ctx *ctx, bool import_ctx)
20056  {
20057 -       /* Calculate remaining bytes to read */
20058 -       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
20059 -
20060 -       /* Read remaining bytes */
20061 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
20062 -                            FIFOLD_TYPE_MSG | KEY_VLF);
20063 +       u32 op = ctx->adata.algtype;
20064 +       u32 *skip_key_load;
20065  
20066 -       /* Store class2 context bytes */
20067 -       append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
20068 -                        LDST_SRCDST_BYTE_CONTEXT);
20069 -}
20070 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
20071  
20072 -/*
20073 - * For ahash update, final and finup, import context, read and write to seqout
20074 - */
20075 -static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
20076 -                                        int digestsize,
20077 -                                        struct caam_hash_ctx *ctx)
20078 -{
20079 -       init_sh_desc_key_ahash(desc, ctx);
20080 +       /* Append key if it has been set; ahash update excluded */
20081 +       if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
20082 +               /* Skip key loading if already shared */
20083 +               skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
20084 +                                           JUMP_COND_SHRD);
20085  
20086 -       /* Import context from software */
20087 -       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
20088 -                  LDST_CLASS_2_CCB | ctx->ctx_len);
20089 +               append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
20090 +                                 ctx->adata.keylen, CLASS_2 |
20091 +                                 KEY_DEST_MDHA_SPLIT | KEY_ENC);
20092  
20093 -       /* Class 2 operation */
20094 -       append_operation(desc, op | state | OP_ALG_ENCRYPT);
20095 +               set_jump_tgt_here(desc, skip_key_load);
20096  
20097 -       /*
20098 -        * Load from buf and/or src and write to req->result or state->context
20099 -        */
20100 -       ahash_append_load_str(desc, digestsize);
20101 -}
20102 +               op |= OP_ALG_AAI_HMAC_PRECOMP;
20103 +       }
20104  
20105 -/* For ahash firsts and digest, read and write to seqout */
20106 -static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
20107 -                                    int digestsize, struct caam_hash_ctx *ctx)
20108 -{
20109 -       init_sh_desc_key_ahash(desc, ctx);
20110 +       /* If needed, import context from software */
20111 +       if (import_ctx)
20112 +               append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
20113 +                               LDST_SRCDST_BYTE_CONTEXT);
20114  
20115         /* Class 2 operation */
20116         append_operation(desc, op | state | OP_ALG_ENCRYPT);
20117  
20118         /*
20119          * Load from buf and/or src and write to req->result or state->context
20120 +        * Calculate remaining bytes to read
20121          */
20122 -       ahash_append_load_str(desc, digestsize);
20123 +       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
20124 +       /* Read remaining bytes */
20125 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
20126 +                            FIFOLD_TYPE_MSG | KEY_VLF);
20127 +       /* Store class2 context bytes */
20128 +       append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
20129 +                        LDST_SRCDST_BYTE_CONTEXT);
20130  }
20131  
20132  static int ahash_set_sh_desc(struct crypto_ahash *ahash)
20133 @@ -314,34 +289,13 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
20134         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20135         int digestsize = crypto_ahash_digestsize(ahash);
20136         struct device *jrdev = ctx->jrdev;
20137 -       u32 have_key = 0;
20138         u32 *desc;
20139  
20140 -       if (ctx->split_key_len)
20141 -               have_key = OP_ALG_AAI_HMAC_PRECOMP;
20142 -
20143         /* ahash_update shared descriptor */
20144         desc = ctx->sh_desc_update;
20145 -
20146 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
20147 -
20148 -       /* Import context from software */
20149 -       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
20150 -                  LDST_CLASS_2_CCB | ctx->ctx_len);
20151 -
20152 -       /* Class 2 operation */
20153 -       append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
20154 -                        OP_ALG_ENCRYPT);
20155 -
20156 -       /* Load data and write to result or context */
20157 -       ahash_append_load_str(desc, ctx->ctx_len);
20158 -
20159 -       ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
20160 -                                                DMA_TO_DEVICE);
20161 -       if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
20162 -               dev_err(jrdev, "unable to map shared descriptor\n");
20163 -               return -ENOMEM;
20164 -       }
20165 +       ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
20166 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
20167 +                                  desc_bytes(desc), DMA_TO_DEVICE);
20168  #ifdef DEBUG
20169         print_hex_dump(KERN_ERR,
20170                        "ahash update shdesc@"__stringify(__LINE__)": ",
20171 @@ -350,17 +304,9 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
20172  
20173         /* ahash_update_first shared descriptor */
20174         desc = ctx->sh_desc_update_first;
20175 -
20176 -       ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
20177 -                         ctx->ctx_len, ctx);
20178 -
20179 -       ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
20180 -                                                      desc_bytes(desc),
20181 -                                                      DMA_TO_DEVICE);
20182 -       if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
20183 -               dev_err(jrdev, "unable to map shared descriptor\n");
20184 -               return -ENOMEM;
20185 -       }
20186 +       ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
20187 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
20188 +                                  desc_bytes(desc), DMA_TO_DEVICE);
20189  #ifdef DEBUG
20190         print_hex_dump(KERN_ERR,
20191                        "ahash update first shdesc@"__stringify(__LINE__)": ",
20192 @@ -369,53 +315,20 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
20193  
20194         /* ahash_final shared descriptor */
20195         desc = ctx->sh_desc_fin;
20196 -
20197 -       ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
20198 -                             OP_ALG_AS_FINALIZE, digestsize, ctx);
20199 -
20200 -       ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
20201 -                                             DMA_TO_DEVICE);
20202 -       if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
20203 -               dev_err(jrdev, "unable to map shared descriptor\n");
20204 -               return -ENOMEM;
20205 -       }
20206 +       ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
20207 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
20208 +                                  desc_bytes(desc), DMA_TO_DEVICE);
20209  #ifdef DEBUG
20210         print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
20211                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
20212                        desc_bytes(desc), 1);
20213  #endif
20214  
20215 -       /* ahash_finup shared descriptor */
20216 -       desc = ctx->sh_desc_finup;
20217 -
20218 -       ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
20219 -                             OP_ALG_AS_FINALIZE, digestsize, ctx);
20220 -
20221 -       ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
20222 -                                               DMA_TO_DEVICE);
20223 -       if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
20224 -               dev_err(jrdev, "unable to map shared descriptor\n");
20225 -               return -ENOMEM;
20226 -       }
20227 -#ifdef DEBUG
20228 -       print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
20229 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
20230 -                      desc_bytes(desc), 1);
20231 -#endif
20232 -
20233         /* ahash_digest shared descriptor */
20234         desc = ctx->sh_desc_digest;
20235 -
20236 -       ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
20237 -                         digestsize, ctx);
20238 -
20239 -       ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
20240 -                                                desc_bytes(desc),
20241 -                                                DMA_TO_DEVICE);
20242 -       if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
20243 -               dev_err(jrdev, "unable to map shared descriptor\n");
20244 -               return -ENOMEM;
20245 -       }
20246 +       ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
20247 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
20248 +                                  desc_bytes(desc), DMA_TO_DEVICE);
20249  #ifdef DEBUG
20250         print_hex_dump(KERN_ERR,
20251                        "ahash digest shdesc@"__stringify(__LINE__)": ",
20252 @@ -426,14 +339,6 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
20253         return 0;
20254  }
20255  
20256 -static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
20257 -                             u32 keylen)
20258 -{
20259 -       return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
20260 -                              ctx->split_key_pad_len, key_in, keylen,
20261 -                              ctx->alg_op);
20262 -}
20263 -
20264  /* Digest hash size if it is too large */
20265  static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
20266                            u32 *keylen, u8 *key_out, u32 digestsize)
20267 @@ -469,7 +374,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
20268         }
20269  
20270         /* Job descriptor to perform unkeyed hash on key_in */
20271 -       append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
20272 +       append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
20273                          OP_ALG_AS_INITFINAL);
20274         append_seq_in_ptr(desc, src_dma, *keylen, 0);
20275         append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
20276 @@ -513,10 +418,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
20277  static int ahash_setkey(struct crypto_ahash *ahash,
20278                         const u8 *key, unsigned int keylen)
20279  {
20280 -       /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
20281 -       static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
20282         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20283 -       struct device *jrdev = ctx->jrdev;
20284         int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
20285         int digestsize = crypto_ahash_digestsize(ahash);
20286         int ret;
20287 @@ -539,43 +441,19 @@ static int ahash_setkey(struct crypto_ahash *ahash,
20288                 key = hashed_key;
20289         }
20290  
20291 -       /* Pick class 2 key length from algorithm submask */
20292 -       ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
20293 -                                     OP_ALG_ALGSEL_SHIFT] * 2;
20294 -       ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
20295 -
20296 -#ifdef DEBUG
20297 -       printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
20298 -              ctx->split_key_len, ctx->split_key_pad_len);
20299 -       print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
20300 -                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
20301 -#endif
20302 -
20303 -       ret = gen_split_hash_key(ctx, key, keylen);
20304 +       ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
20305 +                           CAAM_MAX_HASH_KEY_SIZE);
20306         if (ret)
20307                 goto bad_free_key;
20308  
20309 -       ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
20310 -                                     DMA_TO_DEVICE);
20311 -       if (dma_mapping_error(jrdev, ctx->key_dma)) {
20312 -               dev_err(jrdev, "unable to map key i/o memory\n");
20313 -               ret = -ENOMEM;
20314 -               goto error_free_key;
20315 -       }
20316  #ifdef DEBUG
20317         print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
20318                        DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
20319 -                      ctx->split_key_pad_len, 1);
20320 +                      ctx->adata.keylen_pad, 1);
20321  #endif
20322  
20323 -       ret = ahash_set_sh_desc(ahash);
20324 -       if (ret) {
20325 -               dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
20326 -                                DMA_TO_DEVICE);
20327 -       }
20328 - error_free_key:
20329         kfree(hashed_key);
20330 -       return ret;
20331 +       return ahash_set_sh_desc(ahash);
20332   bad_free_key:
20333         kfree(hashed_key);
20334         crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
20335 @@ -604,6 +482,8 @@ static inline void ahash_unmap(struct device *dev,
20336                         struct ahash_edesc *edesc,
20337                         struct ahash_request *req, int dst_len)
20338  {
20339 +       struct caam_hash_state *state = ahash_request_ctx(req);
20340 +
20341         if (edesc->src_nents)
20342                 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
20343         if (edesc->dst_dma)
20344 @@ -612,6 +492,12 @@ static inline void ahash_unmap(struct device *dev,
20345         if (edesc->sec4_sg_bytes)
20346                 dma_unmap_single(dev, edesc->sec4_sg_dma,
20347                                  edesc->sec4_sg_bytes, DMA_TO_DEVICE);
20348 +
20349 +       if (state->buf_dma) {
20350 +               dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
20351 +                                DMA_TO_DEVICE);
20352 +               state->buf_dma = 0;
20353 +       }
20354  }
20355  
20356  static inline void ahash_unmap_ctx(struct device *dev,
20357 @@ -643,8 +529,7 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
20358         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
20359  #endif
20360  
20361 -       edesc = (struct ahash_edesc *)((char *)desc -
20362 -                offsetof(struct ahash_edesc, hw_desc));
20363 +       edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
20364         if (err)
20365                 caam_jr_strstatus(jrdev, err);
20366  
20367 @@ -671,19 +556,19 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
20368         struct ahash_edesc *edesc;
20369         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
20370         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20371 -#ifdef DEBUG
20372         struct caam_hash_state *state = ahash_request_ctx(req);
20373 +#ifdef DEBUG
20374         int digestsize = crypto_ahash_digestsize(ahash);
20375  
20376         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
20377  #endif
20378  
20379 -       edesc = (struct ahash_edesc *)((char *)desc -
20380 -                offsetof(struct ahash_edesc, hw_desc));
20381 +       edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
20382         if (err)
20383                 caam_jr_strstatus(jrdev, err);
20384  
20385         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
20386 +       switch_buf(state);
20387         kfree(edesc);
20388  
20389  #ifdef DEBUG
20390 @@ -713,8 +598,7 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
20391         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
20392  #endif
20393  
20394 -       edesc = (struct ahash_edesc *)((char *)desc -
20395 -                offsetof(struct ahash_edesc, hw_desc));
20396 +       edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
20397         if (err)
20398                 caam_jr_strstatus(jrdev, err);
20399  
20400 @@ -741,19 +625,19 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
20401         struct ahash_edesc *edesc;
20402         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
20403         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20404 -#ifdef DEBUG
20405         struct caam_hash_state *state = ahash_request_ctx(req);
20406 +#ifdef DEBUG
20407         int digestsize = crypto_ahash_digestsize(ahash);
20408  
20409         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
20410  #endif
20411  
20412 -       edesc = (struct ahash_edesc *)((char *)desc -
20413 -                offsetof(struct ahash_edesc, hw_desc));
20414 +       edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
20415         if (err)
20416                 caam_jr_strstatus(jrdev, err);
20417  
20418         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
20419 +       switch_buf(state);
20420         kfree(edesc);
20421  
20422  #ifdef DEBUG
20423 @@ -835,13 +719,12 @@ static int ahash_update_ctx(struct ahash_request *req)
20424         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20425         struct caam_hash_state *state = ahash_request_ctx(req);
20426         struct device *jrdev = ctx->jrdev;
20427 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20428 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20429 -       u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20430 -       int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
20431 -       u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
20432 -       int *next_buflen = state->current_buf ? &state->buflen_0 :
20433 -                          &state->buflen_1, last_buflen;
20434 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20435 +                      GFP_KERNEL : GFP_ATOMIC;
20436 +       u8 *buf = current_buf(state);
20437 +       int *buflen = current_buflen(state);
20438 +       u8 *next_buf = alt_buf(state);
20439 +       int *next_buflen = alt_buflen(state), last_buflen;
20440         int in_len = *buflen + req->nbytes, to_hash;
20441         u32 *desc;
20442         int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
20443 @@ -895,10 +778,9 @@ static int ahash_update_ctx(struct ahash_request *req)
20444                 if (ret)
20445                         goto unmap_ctx;
20446  
20447 -               state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
20448 -                                                       edesc->sec4_sg + 1,
20449 -                                                       buf, state->buf_dma,
20450 -                                                       *buflen, last_buflen);
20451 +               ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
20452 +               if (ret)
20453 +                       goto unmap_ctx;
20454  
20455                 if (mapped_nents) {
20456                         sg_to_sec4_sg_last(req->src, mapped_nents,
20457 @@ -909,12 +791,10 @@ static int ahash_update_ctx(struct ahash_request *req)
20458                                                          to_hash - *buflen,
20459                                                          *next_buflen, 0);
20460                 } else {
20461 -                       (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
20462 -                               cpu_to_caam32(SEC4_SG_LEN_FIN);
20463 +                       sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
20464 +                                           1);
20465                 }
20466  
20467 -               state->current_buf = !state->current_buf;
20468 -
20469                 desc = edesc->hw_desc;
20470  
20471                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
20472 @@ -969,12 +849,9 @@ static int ahash_final_ctx(struct ahash_request *req)
20473         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20474         struct caam_hash_state *state = ahash_request_ctx(req);
20475         struct device *jrdev = ctx->jrdev;
20476 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20477 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20478 -       u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20479 -       int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
20480 -       int last_buflen = state->current_buf ? state->buflen_0 :
20481 -                         state->buflen_1;
20482 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20483 +                      GFP_KERNEL : GFP_ATOMIC;
20484 +       int buflen = *current_buflen(state);
20485         u32 *desc;
20486         int sec4_sg_bytes, sec4_sg_src_index;
20487         int digestsize = crypto_ahash_digestsize(ahash);
20488 @@ -1001,11 +878,11 @@ static int ahash_final_ctx(struct ahash_request *req)
20489         if (ret)
20490                 goto unmap_ctx;
20491  
20492 -       state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
20493 -                                               buf, state->buf_dma, buflen,
20494 -                                               last_buflen);
20495 -       (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
20496 -               cpu_to_caam32(SEC4_SG_LEN_FIN);
20497 +       ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
20498 +       if (ret)
20499 +               goto unmap_ctx;
20500 +
20501 +       sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
20502  
20503         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
20504                                             sec4_sg_bytes, DMA_TO_DEVICE);
20505 @@ -1048,12 +925,9 @@ static int ahash_finup_ctx(struct ahash_request *req)
20506         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20507         struct caam_hash_state *state = ahash_request_ctx(req);
20508         struct device *jrdev = ctx->jrdev;
20509 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20510 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20511 -       u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20512 -       int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
20513 -       int last_buflen = state->current_buf ? state->buflen_0 :
20514 -                         state->buflen_1;
20515 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20516 +                      GFP_KERNEL : GFP_ATOMIC;
20517 +       int buflen = *current_buflen(state);
20518         u32 *desc;
20519         int sec4_sg_src_index;
20520         int src_nents, mapped_nents;
20521 @@ -1082,7 +956,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
20522  
20523         /* allocate space for base edesc and hw desc commands, link tables */
20524         edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
20525 -                                 ctx->sh_desc_finup, ctx->sh_desc_finup_dma,
20526 +                                 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
20527                                   flags);
20528         if (!edesc) {
20529                 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
20530 @@ -1098,9 +972,9 @@ static int ahash_finup_ctx(struct ahash_request *req)
20531         if (ret)
20532                 goto unmap_ctx;
20533  
20534 -       state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
20535 -                                               buf, state->buf_dma, buflen,
20536 -                                               last_buflen);
20537 +       ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
20538 +       if (ret)
20539 +               goto unmap_ctx;
20540  
20541         ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
20542                                   sec4_sg_src_index, ctx->ctx_len + buflen,
20543 @@ -1136,15 +1010,18 @@ static int ahash_digest(struct ahash_request *req)
20544  {
20545         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
20546         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20547 +       struct caam_hash_state *state = ahash_request_ctx(req);
20548         struct device *jrdev = ctx->jrdev;
20549 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20550 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20551 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20552 +                      GFP_KERNEL : GFP_ATOMIC;
20553         u32 *desc;
20554         int digestsize = crypto_ahash_digestsize(ahash);
20555         int src_nents, mapped_nents;
20556         struct ahash_edesc *edesc;
20557         int ret;
20558  
20559 +       state->buf_dma = 0;
20560 +
20561         src_nents = sg_nents_for_len(req->src, req->nbytes);
20562         if (src_nents < 0) {
20563                 dev_err(jrdev, "Invalid number of src SG.\n");
20564 @@ -1215,10 +1092,10 @@ static int ahash_final_no_ctx(struct ahash_request *req)
20565         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20566         struct caam_hash_state *state = ahash_request_ctx(req);
20567         struct device *jrdev = ctx->jrdev;
20568 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20569 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20570 -       u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20571 -       int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
20572 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20573 +                      GFP_KERNEL : GFP_ATOMIC;
20574 +       u8 *buf = current_buf(state);
20575 +       int buflen = *current_buflen(state);
20576         u32 *desc;
20577         int digestsize = crypto_ahash_digestsize(ahash);
20578         struct ahash_edesc *edesc;
20579 @@ -1276,13 +1153,12 @@ static int ahash_update_no_ctx(struct ahash_request *req)
20580         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20581         struct caam_hash_state *state = ahash_request_ctx(req);
20582         struct device *jrdev = ctx->jrdev;
20583 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20584 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20585 -       u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20586 -       int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
20587 -       u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
20588 -       int *next_buflen = state->current_buf ? &state->buflen_0 :
20589 -                          &state->buflen_1;
20590 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20591 +                      GFP_KERNEL : GFP_ATOMIC;
20592 +       u8 *buf = current_buf(state);
20593 +       int *buflen = current_buflen(state);
20594 +       u8 *next_buf = alt_buf(state);
20595 +       int *next_buflen = alt_buflen(state);
20596         int in_len = *buflen + req->nbytes, to_hash;
20597         int sec4_sg_bytes, src_nents, mapped_nents;
20598         struct ahash_edesc *edesc;
20599 @@ -1331,8 +1207,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
20600                 edesc->sec4_sg_bytes = sec4_sg_bytes;
20601                 edesc->dst_dma = 0;
20602  
20603 -               state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
20604 -                                                   buf, *buflen);
20605 +               ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
20606 +               if (ret)
20607 +                       goto unmap_ctx;
20608 +
20609                 sg_to_sec4_sg_last(req->src, mapped_nents,
20610                                    edesc->sec4_sg + 1, 0);
20611  
20612 @@ -1342,8 +1220,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
20613                                                  *next_buflen, 0);
20614                 }
20615  
20616 -               state->current_buf = !state->current_buf;
20617 -
20618                 desc = edesc->hw_desc;
20619  
20620                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
20621 @@ -1403,12 +1279,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
20622         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20623         struct caam_hash_state *state = ahash_request_ctx(req);
20624         struct device *jrdev = ctx->jrdev;
20625 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20626 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20627 -       u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20628 -       int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
20629 -       int last_buflen = state->current_buf ? state->buflen_0 :
20630 -                         state->buflen_1;
20631 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20632 +                      GFP_KERNEL : GFP_ATOMIC;
20633 +       int buflen = *current_buflen(state);
20634         u32 *desc;
20635         int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
20636         int digestsize = crypto_ahash_digestsize(ahash);
20637 @@ -1450,9 +1323,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
20638         edesc->src_nents = src_nents;
20639         edesc->sec4_sg_bytes = sec4_sg_bytes;
20640  
20641 -       state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
20642 -                                               state->buf_dma, buflen,
20643 -                                               last_buflen);
20644 +       ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
20645 +       if (ret)
20646 +               goto unmap;
20647  
20648         ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
20649                                   req->nbytes);
20650 @@ -1496,11 +1369,10 @@ static int ahash_update_first(struct ahash_request *req)
20651         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20652         struct caam_hash_state *state = ahash_request_ctx(req);
20653         struct device *jrdev = ctx->jrdev;
20654 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20655 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20656 -       u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
20657 -       int *next_buflen = state->current_buf ?
20658 -               &state->buflen_1 : &state->buflen_0;
20659 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20660 +                      GFP_KERNEL : GFP_ATOMIC;
20661 +       u8 *next_buf = alt_buf(state);
20662 +       int *next_buflen = alt_buflen(state);
20663         int to_hash;
20664         u32 *desc;
20665         int src_nents, mapped_nents;
20666 @@ -1582,6 +1454,7 @@ static int ahash_update_first(struct ahash_request *req)
20667                 state->final = ahash_final_no_ctx;
20668                 scatterwalk_map_and_copy(next_buf, req->src, 0,
20669                                          req->nbytes, 0);
20670 +               switch_buf(state);
20671         }
20672  #ifdef DEBUG
20673         print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
20674 @@ -1688,7 +1561,6 @@ struct caam_hash_template {
20675         unsigned int blocksize;
20676         struct ahash_alg template_ahash;
20677         u32 alg_type;
20678 -       u32 alg_op;
20679  };
20680  
20681  /* ahash descriptors */
20682 @@ -1714,7 +1586,6 @@ static struct caam_hash_template driver_hash[] = {
20683                         },
20684                 },
20685                 .alg_type = OP_ALG_ALGSEL_SHA1,
20686 -               .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
20687         }, {
20688                 .name = "sha224",
20689                 .driver_name = "sha224-caam",
20690 @@ -1736,7 +1607,6 @@ static struct caam_hash_template driver_hash[] = {
20691                         },
20692                 },
20693                 .alg_type = OP_ALG_ALGSEL_SHA224,
20694 -               .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
20695         }, {
20696                 .name = "sha256",
20697                 .driver_name = "sha256-caam",
20698 @@ -1758,7 +1628,6 @@ static struct caam_hash_template driver_hash[] = {
20699                         },
20700                 },
20701                 .alg_type = OP_ALG_ALGSEL_SHA256,
20702 -               .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
20703         }, {
20704                 .name = "sha384",
20705                 .driver_name = "sha384-caam",
20706 @@ -1780,7 +1649,6 @@ static struct caam_hash_template driver_hash[] = {
20707                         },
20708                 },
20709                 .alg_type = OP_ALG_ALGSEL_SHA384,
20710 -               .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
20711         }, {
20712                 .name = "sha512",
20713                 .driver_name = "sha512-caam",
20714 @@ -1802,7 +1670,6 @@ static struct caam_hash_template driver_hash[] = {
20715                         },
20716                 },
20717                 .alg_type = OP_ALG_ALGSEL_SHA512,
20718 -               .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
20719         }, {
20720                 .name = "md5",
20721                 .driver_name = "md5-caam",
20722 @@ -1824,14 +1691,12 @@ static struct caam_hash_template driver_hash[] = {
20723                         },
20724                 },
20725                 .alg_type = OP_ALG_ALGSEL_MD5,
20726 -               .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
20727         },
20728  };
20729  
20730  struct caam_hash_alg {
20731         struct list_head entry;
20732         int alg_type;
20733 -       int alg_op;
20734         struct ahash_alg ahash_alg;
20735  };
20736  
20737 @@ -1853,6 +1718,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
20738                                          HASH_MSG_LEN + SHA256_DIGEST_SIZE,
20739                                          HASH_MSG_LEN + 64,
20740                                          HASH_MSG_LEN + SHA512_DIGEST_SIZE };
20741 +       dma_addr_t dma_addr;
20742  
20743         /*
20744          * Get a Job ring from Job Ring driver to ensure in-order
20745 @@ -1863,11 +1729,31 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
20746                 pr_err("Job Ring Device allocation for transform failed\n");
20747                 return PTR_ERR(ctx->jrdev);
20748         }
20749 +
20750 +       dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
20751 +                                       offsetof(struct caam_hash_ctx,
20752 +                                                sh_desc_update_dma),
20753 +                                       DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
20754 +       if (dma_mapping_error(ctx->jrdev, dma_addr)) {
20755 +               dev_err(ctx->jrdev, "unable to map shared descriptors\n");
20756 +               caam_jr_free(ctx->jrdev);
20757 +               return -ENOMEM;
20758 +       }
20759 +
20760 +       ctx->sh_desc_update_dma = dma_addr;
20761 +       ctx->sh_desc_update_first_dma = dma_addr +
20762 +                                       offsetof(struct caam_hash_ctx,
20763 +                                                sh_desc_update_first);
20764 +       ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
20765 +                                                  sh_desc_fin);
20766 +       ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
20767 +                                                     sh_desc_digest);
20768 +
20769         /* copy descriptor header template value */
20770 -       ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
20771 -       ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
20772 +       ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
20773  
20774 -       ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
20775 +       ctx->ctx_len = runninglen[(ctx->adata.algtype &
20776 +                                  OP_ALG_ALGSEL_SUBMASK) >>
20777                                   OP_ALG_ALGSEL_SHIFT];
20778  
20779         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
20780 @@ -1879,30 +1765,10 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
20781  {
20782         struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
20783  
20784 -       if (ctx->sh_desc_update_dma &&
20785 -           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
20786 -               dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
20787 -                                desc_bytes(ctx->sh_desc_update),
20788 -                                DMA_TO_DEVICE);
20789 -       if (ctx->sh_desc_update_first_dma &&
20790 -           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
20791 -               dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
20792 -                                desc_bytes(ctx->sh_desc_update_first),
20793 -                                DMA_TO_DEVICE);
20794 -       if (ctx->sh_desc_fin_dma &&
20795 -           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
20796 -               dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
20797 -                                desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
20798 -       if (ctx->sh_desc_digest_dma &&
20799 -           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
20800 -               dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
20801 -                                desc_bytes(ctx->sh_desc_digest),
20802 -                                DMA_TO_DEVICE);
20803 -       if (ctx->sh_desc_finup_dma &&
20804 -           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
20805 -               dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
20806 -                                desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
20807 -
20808 +       dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
20809 +                              offsetof(struct caam_hash_ctx,
20810 +                                       sh_desc_update_dma),
20811 +                              DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
20812         caam_jr_free(ctx->jrdev);
20813  }
20814  
20815 @@ -1961,7 +1827,6 @@ caam_hash_alloc(struct caam_hash_template *template,
20816         alg->cra_type = &crypto_ahash_type;
20817  
20818         t_alg->alg_type = template->alg_type;
20819 -       t_alg->alg_op = template->alg_op;
20820  
20821         return t_alg;
20822  }
20823 diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
20824 index 354a16ab..4fcb378e 100644
20825 --- a/drivers/crypto/caam/caampkc.c
20826 +++ b/drivers/crypto/caam/caampkc.c
20827 @@ -18,6 +18,10 @@
20828  #define DESC_RSA_PUB_LEN       (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
20829  #define DESC_RSA_PRIV_F1_LEN   (2 * CAAM_CMD_SZ + \
20830                                  sizeof(struct rsa_priv_f1_pdb))
20831 +#define DESC_RSA_PRIV_F2_LEN   (2 * CAAM_CMD_SZ + \
20832 +                                sizeof(struct rsa_priv_f2_pdb))
20833 +#define DESC_RSA_PRIV_F3_LEN   (2 * CAAM_CMD_SZ + \
20834 +                                sizeof(struct rsa_priv_f3_pdb))
20835  
20836  static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
20837                          struct akcipher_request *req)
20838 @@ -54,6 +58,42 @@ static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
20839         dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
20840  }
20841  
20842 +static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
20843 +                             struct akcipher_request *req)
20844 +{
20845 +       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
20846 +       struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
20847 +       struct caam_rsa_key *key = &ctx->key;
20848 +       struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
20849 +       size_t p_sz = key->p_sz;
20850 +       size_t q_sz = key->p_sz;
20851 +
20852 +       dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
20853 +       dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
20854 +       dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
20855 +       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
20856 +       dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
20857 +}
20858 +
20859 +static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
20860 +                             struct akcipher_request *req)
20861 +{
20862 +       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
20863 +       struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
20864 +       struct caam_rsa_key *key = &ctx->key;
20865 +       struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
20866 +       size_t p_sz = key->p_sz;
20867 +       size_t q_sz = key->p_sz;
20868 +
20869 +       dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
20870 +       dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
20871 +       dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
20872 +       dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
20873 +       dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
20874 +       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
20875 +       dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
20876 +}
20877 +
20878  /* RSA Job Completion handler */
20879  static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
20880  {
20881 @@ -90,6 +130,42 @@ static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
20882         akcipher_request_complete(req, err);
20883  }
20884  
20885 +static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
20886 +                            void *context)
20887 +{
20888 +       struct akcipher_request *req = context;
20889 +       struct rsa_edesc *edesc;
20890 +
20891 +       if (err)
20892 +               caam_jr_strstatus(dev, err);
20893 +
20894 +       edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
20895 +
20896 +       rsa_priv_f2_unmap(dev, edesc, req);
20897 +       rsa_io_unmap(dev, edesc, req);
20898 +       kfree(edesc);
20899 +
20900 +       akcipher_request_complete(req, err);
20901 +}
20902 +
20903 +static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
20904 +                            void *context)
20905 +{
20906 +       struct akcipher_request *req = context;
20907 +       struct rsa_edesc *edesc;
20908 +
20909 +       if (err)
20910 +               caam_jr_strstatus(dev, err);
20911 +
20912 +       edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
20913 +
20914 +       rsa_priv_f3_unmap(dev, edesc, req);
20915 +       rsa_io_unmap(dev, edesc, req);
20916 +       kfree(edesc);
20917 +
20918 +       akcipher_request_complete(req, err);
20919 +}
20920 +
20921  static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
20922                                          size_t desclen)
20923  {
20924 @@ -97,8 +173,8 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
20925         struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
20926         struct device *dev = ctx->dev;
20927         struct rsa_edesc *edesc;
20928 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20929 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20930 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20931 +                      GFP_KERNEL : GFP_ATOMIC;
20932         int sgc;
20933         int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
20934         int src_nents, dst_nents;
20935 @@ -258,6 +334,172 @@ static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
20936         return 0;
20937  }
20938  
20939 +static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
20940 +                              struct rsa_edesc *edesc)
20941 +{
20942 +       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
20943 +       struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
20944 +       struct caam_rsa_key *key = &ctx->key;
20945 +       struct device *dev = ctx->dev;
20946 +       struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
20947 +       int sec4_sg_index = 0;
20948 +       size_t p_sz = key->p_sz;
20949 +       size_t q_sz = key->p_sz;
20950 +
20951 +       pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
20952 +       if (dma_mapping_error(dev, pdb->d_dma)) {
20953 +               dev_err(dev, "Unable to map RSA private exponent memory\n");
20954 +               return -ENOMEM;
20955 +       }
20956 +
20957 +       pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
20958 +       if (dma_mapping_error(dev, pdb->p_dma)) {
20959 +               dev_err(dev, "Unable to map RSA prime factor p memory\n");
20960 +               goto unmap_d;
20961 +       }
20962 +
20963 +       pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
20964 +       if (dma_mapping_error(dev, pdb->q_dma)) {
20965 +               dev_err(dev, "Unable to map RSA prime factor q memory\n");
20966 +               goto unmap_p;
20967 +       }
20968 +
20969 +       pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
20970 +       if (dma_mapping_error(dev, pdb->tmp1_dma)) {
20971 +               dev_err(dev, "Unable to map RSA tmp1 memory\n");
20972 +               goto unmap_q;
20973 +       }
20974 +
20975 +       pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
20976 +       if (dma_mapping_error(dev, pdb->tmp2_dma)) {
20977 +               dev_err(dev, "Unable to map RSA tmp2 memory\n");
20978 +               goto unmap_tmp1;
20979 +       }
20980 +
20981 +       if (edesc->src_nents > 1) {
20982 +               pdb->sgf |= RSA_PRIV_PDB_SGF_G;
20983 +               pdb->g_dma = edesc->sec4_sg_dma;
20984 +               sec4_sg_index += edesc->src_nents;
20985 +       } else {
20986 +               pdb->g_dma = sg_dma_address(req->src);
20987 +       }
20988 +
20989 +       if (edesc->dst_nents > 1) {
20990 +               pdb->sgf |= RSA_PRIV_PDB_SGF_F;
20991 +               pdb->f_dma = edesc->sec4_sg_dma +
20992 +                            sec4_sg_index * sizeof(struct sec4_sg_entry);
20993 +       } else {
20994 +               pdb->f_dma = sg_dma_address(req->dst);
20995 +       }
20996 +
20997 +       pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
20998 +       pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
20999 +
21000 +       return 0;
21001 +
21002 +unmap_tmp1:
21003 +       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
21004 +unmap_q:
21005 +       dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
21006 +unmap_p:
21007 +       dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
21008 +unmap_d:
21009 +       dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
21010 +
21011 +       return -ENOMEM;
21012 +}
21013 +
21014 +static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
21015 +                              struct rsa_edesc *edesc)
21016 +{
21017 +       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21018 +       struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21019 +       struct caam_rsa_key *key = &ctx->key;
21020 +       struct device *dev = ctx->dev;
21021 +       struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
21022 +       int sec4_sg_index = 0;
21023 +       size_t p_sz = key->p_sz;
21024 +       size_t q_sz = key->p_sz;
21025 +
21026 +       pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
21027 +       if (dma_mapping_error(dev, pdb->p_dma)) {
21028 +               dev_err(dev, "Unable to map RSA prime factor p memory\n");
21029 +               return -ENOMEM;
21030 +       }
21031 +
21032 +       pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
21033 +       if (dma_mapping_error(dev, pdb->q_dma)) {
21034 +               dev_err(dev, "Unable to map RSA prime factor q memory\n");
21035 +               goto unmap_p;
21036 +       }
21037 +
21038 +       pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
21039 +       if (dma_mapping_error(dev, pdb->dp_dma)) {
21040 +               dev_err(dev, "Unable to map RSA exponent dp memory\n");
21041 +               goto unmap_q;
21042 +       }
21043 +
21044 +       pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
21045 +       if (dma_mapping_error(dev, pdb->dq_dma)) {
21046 +               dev_err(dev, "Unable to map RSA exponent dq memory\n");
21047 +               goto unmap_dp;
21048 +       }
21049 +
21050 +       pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
21051 +       if (dma_mapping_error(dev, pdb->c_dma)) {
21052 +               dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
21053 +               goto unmap_dq;
21054 +       }
21055 +
21056 +       pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
21057 +       if (dma_mapping_error(dev, pdb->tmp1_dma)) {
21058 +               dev_err(dev, "Unable to map RSA tmp1 memory\n");
21059 +               goto unmap_qinv;
21060 +       }
21061 +
21062 +       pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
21063 +       if (dma_mapping_error(dev, pdb->tmp2_dma)) {
21064 +               dev_err(dev, "Unable to map RSA tmp2 memory\n");
21065 +               goto unmap_tmp1;
21066 +       }
21067 +
21068 +       if (edesc->src_nents > 1) {
21069 +               pdb->sgf |= RSA_PRIV_PDB_SGF_G;
21070 +               pdb->g_dma = edesc->sec4_sg_dma;
21071 +               sec4_sg_index += edesc->src_nents;
21072 +       } else {
21073 +               pdb->g_dma = sg_dma_address(req->src);
21074 +       }
21075 +
21076 +       if (edesc->dst_nents > 1) {
21077 +               pdb->sgf |= RSA_PRIV_PDB_SGF_F;
21078 +               pdb->f_dma = edesc->sec4_sg_dma +
21079 +                            sec4_sg_index * sizeof(struct sec4_sg_entry);
21080 +       } else {
21081 +               pdb->f_dma = sg_dma_address(req->dst);
21082 +       }
21083 +
21084 +       pdb->sgf |= key->n_sz;
21085 +       pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
21086 +
21087 +       return 0;
21088 +
21089 +unmap_tmp1:
21090 +       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
21091 +unmap_qinv:
21092 +       dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
21093 +unmap_dq:
21094 +       dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
21095 +unmap_dp:
21096 +       dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
21097 +unmap_q:
21098 +       dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
21099 +unmap_p:
21100 +       dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
21101 +
21102 +       return -ENOMEM;
21103 +}
21104 +
21105  static int caam_rsa_enc(struct akcipher_request *req)
21106  {
21107         struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21108 @@ -301,24 +543,14 @@ static int caam_rsa_enc(struct akcipher_request *req)
21109         return ret;
21110  }
21111  
21112 -static int caam_rsa_dec(struct akcipher_request *req)
21113 +static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
21114  {
21115         struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21116         struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21117 -       struct caam_rsa_key *key = &ctx->key;
21118         struct device *jrdev = ctx->dev;
21119         struct rsa_edesc *edesc;
21120         int ret;
21121  
21122 -       if (unlikely(!key->n || !key->d))
21123 -               return -EINVAL;
21124 -
21125 -       if (req->dst_len < key->n_sz) {
21126 -               req->dst_len = key->n_sz;
21127 -               dev_err(jrdev, "Output buffer length less than parameter n\n");
21128 -               return -EOVERFLOW;
21129 -       }
21130 -
21131         /* Allocate extended descriptor */
21132         edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
21133         if (IS_ERR(edesc))
21134 @@ -344,17 +576,147 @@ static int caam_rsa_dec(struct akcipher_request *req)
21135         return ret;
21136  }
21137  
21138 +static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
21139 +{
21140 +       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21141 +       struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21142 +       struct device *jrdev = ctx->dev;
21143 +       struct rsa_edesc *edesc;
21144 +       int ret;
21145 +
21146 +       /* Allocate extended descriptor */
21147 +       edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
21148 +       if (IS_ERR(edesc))
21149 +               return PTR_ERR(edesc);
21150 +
21151 +       /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
21152 +       ret = set_rsa_priv_f2_pdb(req, edesc);
21153 +       if (ret)
21154 +               goto init_fail;
21155 +
21156 +       /* Initialize Job Descriptor */
21157 +       init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
21158 +
21159 +       ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
21160 +       if (!ret)
21161 +               return -EINPROGRESS;
21162 +
21163 +       rsa_priv_f2_unmap(jrdev, edesc, req);
21164 +
21165 +init_fail:
21166 +       rsa_io_unmap(jrdev, edesc, req);
21167 +       kfree(edesc);
21168 +       return ret;
21169 +}
21170 +
21171 +static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
21172 +{
21173 +       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21174 +       struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21175 +       struct device *jrdev = ctx->dev;
21176 +       struct rsa_edesc *edesc;
21177 +       int ret;
21178 +
21179 +       /* Allocate extended descriptor */
21180 +       edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
21181 +       if (IS_ERR(edesc))
21182 +               return PTR_ERR(edesc);
21183 +
21184 +       /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
21185 +       ret = set_rsa_priv_f3_pdb(req, edesc);
21186 +       if (ret)
21187 +               goto init_fail;
21188 +
21189 +       /* Initialize Job Descriptor */
21190 +       init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
21191 +
21192 +       ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
21193 +       if (!ret)
21194 +               return -EINPROGRESS;
21195 +
21196 +       rsa_priv_f3_unmap(jrdev, edesc, req);
21197 +
21198 +init_fail:
21199 +       rsa_io_unmap(jrdev, edesc, req);
21200 +       kfree(edesc);
21201 +       return ret;
21202 +}
21203 +
21204 +static int caam_rsa_dec(struct akcipher_request *req)
21205 +{
21206 +       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21207 +       struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21208 +       struct caam_rsa_key *key = &ctx->key;
21209 +       int ret;
21210 +
21211 +       if (unlikely(!key->n || !key->d))
21212 +               return -EINVAL;
21213 +
21214 +       if (req->dst_len < key->n_sz) {
21215 +               req->dst_len = key->n_sz;
21216 +               dev_err(ctx->dev, "Output buffer length less than parameter n\n");
21217 +               return -EOVERFLOW;
21218 +       }
21219 +
21220 +       if (key->priv_form == FORM3)
21221 +               ret = caam_rsa_dec_priv_f3(req);
21222 +       else if (key->priv_form == FORM2)
21223 +               ret = caam_rsa_dec_priv_f2(req);
21224 +       else
21225 +               ret = caam_rsa_dec_priv_f1(req);
21226 +
21227 +       return ret;
21228 +}
21229 +
21230  static void caam_rsa_free_key(struct caam_rsa_key *key)
21231  {
21232         kzfree(key->d);
21233 +       kzfree(key->p);
21234 +       kzfree(key->q);
21235 +       kzfree(key->dp);
21236 +       kzfree(key->dq);
21237 +       kzfree(key->qinv);
21238 +       kzfree(key->tmp1);
21239 +       kzfree(key->tmp2);
21240         kfree(key->e);
21241         kfree(key->n);
21242 -       key->d = NULL;
21243 -       key->e = NULL;
21244 -       key->n = NULL;
21245 -       key->d_sz = 0;
21246 -       key->e_sz = 0;
21247 -       key->n_sz = 0;
21248 +       memset(key, 0, sizeof(*key));
21249 +}
21250 +
21251 +static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
21252 +{
21253 +       while (!**ptr && *nbytes) {
21254 +               (*ptr)++;
21255 +               (*nbytes)--;
21256 +       }
21257 +}
21258 +
21259 +/**
21260 + * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
21261 + * dP, dQ and qInv could decode to less than corresponding p, q length, as the
21262 + * BER-encoding requires that the minimum number of bytes be used to encode the
21263 + * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
21264 + * length.
21265 + *
21266 + * @ptr   : pointer to {dP, dQ, qInv} CRT member
21267 + * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
21268 + * @dstlen: length in bytes of corresponding p or q prime factor
21269 + */
21270 +static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
21271 +{
21272 +       u8 *dst;
21273 +
21274 +       caam_rsa_drop_leading_zeros(&ptr, &nbytes);
21275 +       if (!nbytes)
21276 +               return NULL;
21277 +
21278 +       dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
21279 +       if (!dst)
21280 +               return NULL;
21281 +
21282 +       memcpy(dst + (dstlen - nbytes), ptr, nbytes);
21283 +
21284 +       return dst;
21285  }
21286  
21287  /**
21288 @@ -370,10 +732,9 @@ static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
21289  {
21290         u8 *val;
21291  
21292 -       while (!*buf && *nbytes) {
21293 -               buf++;
21294 -               (*nbytes)--;
21295 -       }
21296 +       caam_rsa_drop_leading_zeros(&buf, nbytes);
21297 +       if (!*nbytes)
21298 +               return NULL;
21299  
21300         val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL);
21301         if (!val)
21302 @@ -395,7 +756,7 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
21303                                 unsigned int keylen)
21304  {
21305         struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21306 -       struct rsa_key raw_key = {0};
21307 +       struct rsa_key raw_key = {NULL};
21308         struct caam_rsa_key *rsa_key = &ctx->key;
21309         int ret;
21310  
21311 @@ -437,11 +798,69 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
21312         return -ENOMEM;
21313  }
21314  
21315 +static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
21316 +                                      struct rsa_key *raw_key)
21317 +{
21318 +       struct caam_rsa_key *rsa_key = &ctx->key;
21319 +       size_t p_sz = raw_key->p_sz;
21320 +       size_t q_sz = raw_key->q_sz;
21321 +
21322 +       rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
21323 +       if (!rsa_key->p)
21324 +               return;
21325 +       rsa_key->p_sz = p_sz;
21326 +
21327 +       rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
21328 +       if (!rsa_key->q)
21329 +               goto free_p;
21330 +       rsa_key->q_sz = q_sz;
21331 +
21332 +       rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
21333 +       if (!rsa_key->tmp1)
21334 +               goto free_q;
21335 +
21336 +       rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
21337 +       if (!rsa_key->tmp2)
21338 +               goto free_tmp1;
21339 +
21340 +       rsa_key->priv_form = FORM2;
21341 +
21342 +       rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
21343 +       if (!rsa_key->dp)
21344 +               goto free_tmp2;
21345 +
21346 +       rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
21347 +       if (!rsa_key->dq)
21348 +               goto free_dp;
21349 +
21350 +       rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
21351 +                                         q_sz);
21352 +       if (!rsa_key->qinv)
21353 +               goto free_dq;
21354 +
21355 +       rsa_key->priv_form = FORM3;
21356 +
21357 +       return;
21358 +
21359 +free_dq:
21360 +       kzfree(rsa_key->dq);
21361 +free_dp:
21362 +       kzfree(rsa_key->dp);
21363 +free_tmp2:
21364 +       kzfree(rsa_key->tmp2);
21365 +free_tmp1:
21366 +       kzfree(rsa_key->tmp1);
21367 +free_q:
21368 +       kzfree(rsa_key->q);
21369 +free_p:
21370 +       kzfree(rsa_key->p);
21371 +}
21372 +
21373  static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
21374                                  unsigned int keylen)
21375  {
21376         struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21377 -       struct rsa_key raw_key = {0};
21378 +       struct rsa_key raw_key = {NULL};
21379         struct caam_rsa_key *rsa_key = &ctx->key;
21380         int ret;
21381  
21382 @@ -483,6 +902,8 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
21383         memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
21384         memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
21385  
21386 +       caam_rsa_set_priv_key_form(ctx, &raw_key);
21387 +
21388         return 0;
21389  
21390  err:
21391 diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
21392 index f595d159..87ab75e9 100644
21393 --- a/drivers/crypto/caam/caampkc.h
21394 +++ b/drivers/crypto/caam/caampkc.h
21395 @@ -12,22 +12,76 @@
21396  #include "compat.h"
21397  #include "pdb.h"
21398  
21399 +/**
21400 + * caam_priv_key_form - CAAM RSA private key representation
21401 + * CAAM RSA private key may have either of three forms.
21402 + *
21403 + * 1. The first representation consists of the pair (n, d), where the
21404 + *    components have the following meanings:
21405 + *        n      the RSA modulus
21406 + *        d      the RSA private exponent
21407 + *
21408 + * 2. The second representation consists of the triplet (p, q, d), where the
21409 + *    components have the following meanings:
21410 + *        p      the first prime factor of the RSA modulus n
21411 + *        q      the second prime factor of the RSA modulus n
21412 + *        d      the RSA private exponent
21413 + *
21414 + * 3. The third representation consists of the quintuple (p, q, dP, dQ, qInv),
21415 + *    where the components have the following meanings:
21416 + *        p      the first prime factor of the RSA modulus n
21417 + *        q      the second prime factor of the RSA modulus n
21418 + *        dP     the first factors's CRT exponent
21419 + *        dQ     the second factors's CRT exponent
21420 + *        qInv   the (first) CRT coefficient
21421 + *
21422 + * The benefit of using the third or the second key form is lower computational
21423 + * cost for the decryption and signature operations.
21424 + */
21425 +enum caam_priv_key_form {
21426 +       FORM1,
21427 +       FORM2,
21428 +       FORM3
21429 +};
21430 +
21431  /**
21432   * caam_rsa_key - CAAM RSA key structure. Keys are allocated in DMA zone.
21433   * @n           : RSA modulus raw byte stream
21434   * @e           : RSA public exponent raw byte stream
21435   * @d           : RSA private exponent raw byte stream
21436 + * @p           : RSA prime factor p of RSA modulus n
21437 + * @q           : RSA prime factor q of RSA modulus n
21438 + * @dp          : RSA CRT exponent of p
21439 + * @dp          : RSA CRT exponent of q
21440 + * @qinv        : RSA CRT coefficient
21441 + * @tmp1        : CAAM uses this temporary buffer as internal state buffer.
21442 + *                It is assumed to be as long as p.
21443 + * @tmp2        : CAAM uses this temporary buffer as internal state buffer.
21444 + *                It is assumed to be as long as q.
21445   * @n_sz        : length in bytes of RSA modulus n
21446   * @e_sz        : length in bytes of RSA public exponent
21447   * @d_sz        : length in bytes of RSA private exponent
21448 + * @p_sz        : length in bytes of RSA prime factor p of RSA modulus n
21449 + * @q_sz        : length in bytes of RSA prime factor q of RSA modulus n
21450 + * @priv_form   : CAAM RSA private key representation
21451   */
21452  struct caam_rsa_key {
21453         u8 *n;
21454         u8 *e;
21455         u8 *d;
21456 +       u8 *p;
21457 +       u8 *q;
21458 +       u8 *dp;
21459 +       u8 *dq;
21460 +       u8 *qinv;
21461 +       u8 *tmp1;
21462 +       u8 *tmp2;
21463         size_t n_sz;
21464         size_t e_sz;
21465         size_t d_sz;
21466 +       size_t p_sz;
21467 +       size_t q_sz;
21468 +       enum caam_priv_key_form priv_form;
21469  };
21470  
21471  /**
21472 @@ -59,6 +113,8 @@ struct rsa_edesc {
21473         union {
21474                 struct rsa_pub_pdb pub;
21475                 struct rsa_priv_f1_pdb priv_f1;
21476 +               struct rsa_priv_f2_pdb priv_f2;
21477 +               struct rsa_priv_f3_pdb priv_f3;
21478         } pdb;
21479         u32 hw_desc[];
21480  };
21481 @@ -66,5 +122,7 @@ struct rsa_edesc {
21482  /* Descriptor construction primitives. */
21483  void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb);
21484  void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb);
21485 +void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb);
21486 +void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb);
21487  
21488  #endif
21489 diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
21490 index 9b92af2c..fde07d4f 100644
21491 --- a/drivers/crypto/caam/caamrng.c
21492 +++ b/drivers/crypto/caam/caamrng.c
21493 @@ -52,7 +52,7 @@
21494  
21495  /* length of descriptors */
21496  #define DESC_JOB_O_LEN                 (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
21497 -#define DESC_RNG_LEN                   (4 * CAAM_CMD_SZ)
21498 +#define DESC_RNG_LEN                   (3 * CAAM_CMD_SZ)
21499  
21500  /* Buffer, its dma address and lock */
21501  struct buf_data {
21502 @@ -100,8 +100,7 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
21503  {
21504         struct buf_data *bd;
21505  
21506 -       bd = (struct buf_data *)((char *)desc -
21507 -             offsetof(struct buf_data, hw_desc));
21508 +       bd = container_of(desc, struct buf_data, hw_desc[0]);
21509  
21510         if (err)
21511                 caam_jr_strstatus(jrdev, err);
21512 @@ -196,9 +195,6 @@ static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
21513  
21514         init_sh_desc(desc, HDR_SHARE_SERIAL);
21515  
21516 -       /* Propagate errors from shared to job descriptor */
21517 -       append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
21518 -
21519         /* Generate random bytes */
21520         append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
21521  
21522 @@ -289,11 +285,7 @@ static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
21523         if (err)
21524                 return err;
21525  
21526 -       err = caam_init_buf(ctx, 1);
21527 -       if (err)
21528 -               return err;
21529 -
21530 -       return 0;
21531 +       return caam_init_buf(ctx, 1);
21532  }
21533  
21534  static struct hwrng caam_rng = {
21535 @@ -351,7 +343,7 @@ static int __init caam_rng_init(void)
21536                 pr_err("Job Ring Device allocation for transform failed\n");
21537                 return PTR_ERR(dev);
21538         }
21539 -       rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA);
21540 +       rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
21541         if (!rng_ctx) {
21542                 err = -ENOMEM;
21543                 goto free_caam_alloc;
21544 diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
21545 index 7149cd24..4e084f51 100644
21546 --- a/drivers/crypto/caam/compat.h
21547 +++ b/drivers/crypto/caam/compat.h
21548 @@ -16,6 +16,7 @@
21549  #include <linux/of_platform.h>
21550  #include <linux/dma-mapping.h>
21551  #include <linux/io.h>
21552 +#include <linux/iommu.h>
21553  #include <linux/spinlock.h>
21554  #include <linux/rtnetlink.h>
21555  #include <linux/in.h>
21556 diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
21557 index 98468b96..8f9642c6 100644
21558 --- a/drivers/crypto/caam/ctrl.c
21559 +++ b/drivers/crypto/caam/ctrl.c
21560 @@ -2,40 +2,41 @@
21561   * Controller-level driver, kernel property detection, initialization
21562   *
21563   * Copyright 2008-2012 Freescale Semiconductor, Inc.
21564 + * Copyright 2017 NXP
21565   */
21566  
21567  #include <linux/device.h>
21568  #include <linux/of_address.h>
21569  #include <linux/of_irq.h>
21570 +#include <linux/sys_soc.h>
21571  
21572  #include "compat.h"
21573  #include "regs.h"
21574  #include "intern.h"
21575  #include "jr.h"
21576  #include "desc_constr.h"
21577 -#include "error.h"
21578  #include "ctrl.h"
21579  
21580  bool caam_little_end;
21581  EXPORT_SYMBOL(caam_little_end);
21582 +bool caam_imx;
21583 +EXPORT_SYMBOL(caam_imx);
21584 +bool caam_dpaa2;
21585 +EXPORT_SYMBOL(caam_dpaa2);
21586 +
21587 +#ifdef CONFIG_CAAM_QI
21588 +#include "qi.h"
21589 +#endif
21590  
21591  /*
21592   * i.MX targets tend to have clock control subsystems that can
21593   * enable/disable clocking to our device.
21594   */
21595 -#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
21596 -static inline struct clk *caam_drv_identify_clk(struct device *dev,
21597 -                                               char *clk_name)
21598 -{
21599 -       return devm_clk_get(dev, clk_name);
21600 -}
21601 -#else
21602  static inline struct clk *caam_drv_identify_clk(struct device *dev,
21603                                                 char *clk_name)
21604  {
21605 -       return NULL;
21606 +       return caam_imx ? devm_clk_get(dev, clk_name) : NULL;
21607  }
21608 -#endif
21609  
21610  /*
21611   * Descriptor to instantiate RNG State Handle 0 in normal mode and
21612 @@ -270,7 +271,7 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
21613                 /*
21614                  * If the corresponding bit is set, then it means the state
21615                  * handle was initialized by us, and thus it needs to be
21616 -                * deintialized as well
21617 +                * deinitialized as well
21618                  */
21619                 if ((1 << sh_idx) & state_handle_mask) {
21620                         /*
21621 @@ -303,20 +304,24 @@ static int caam_remove(struct platform_device *pdev)
21622         struct device *ctrldev;
21623         struct caam_drv_private *ctrlpriv;
21624         struct caam_ctrl __iomem *ctrl;
21625 -       int ring;
21626  
21627         ctrldev = &pdev->dev;
21628         ctrlpriv = dev_get_drvdata(ctrldev);
21629         ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
21630  
21631 -       /* Remove platform devices for JobRs */
21632 -       for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
21633 -               if (ctrlpriv->jrpdev[ring])
21634 -                       of_device_unregister(ctrlpriv->jrpdev[ring]);
21635 -       }
21636 +       /* Remove platform devices under the crypto node */
21637 +       of_platform_depopulate(ctrldev);
21638 +
21639 +#ifdef CONFIG_CAAM_QI
21640 +       if (ctrlpriv->qidev)
21641 +               caam_qi_shutdown(ctrlpriv->qidev);
21642 +#endif
21643  
21644 -       /* De-initialize RNG state handles initialized by this driver. */
21645 -       if (ctrlpriv->rng4_sh_init)
21646 +       /*
21647 +        * De-initialize RNG state handles initialized by this driver.
21648 +        * In case of DPAA 2.x, RNG is managed by MC firmware.
21649 +        */
21650 +       if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
21651                 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
21652  
21653         /* Shut down debug views */
21654 @@ -331,8 +336,8 @@ static int caam_remove(struct platform_device *pdev)
21655         clk_disable_unprepare(ctrlpriv->caam_ipg);
21656         clk_disable_unprepare(ctrlpriv->caam_mem);
21657         clk_disable_unprepare(ctrlpriv->caam_aclk);
21658 -       clk_disable_unprepare(ctrlpriv->caam_emi_slow);
21659 -
21660 +       if (ctrlpriv->caam_emi_slow)
21661 +               clk_disable_unprepare(ctrlpriv->caam_emi_slow);
21662         return 0;
21663  }
21664  
21665 @@ -366,11 +371,8 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
21666          */
21667         val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
21668               >> RTSDCTL_ENT_DLY_SHIFT;
21669 -       if (ent_delay <= val) {
21670 -               /* put RNG4 into run mode */
21671 -               clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, 0);
21672 -               return;
21673 -       }
21674 +       if (ent_delay <= val)
21675 +               goto start_rng;
21676  
21677         val = rd_reg32(&r4tst->rtsdctl);
21678         val = (val & ~RTSDCTL_ENT_DLY_MASK) |
21679 @@ -382,15 +384,12 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
21680         wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
21681         /* read the control register */
21682         val = rd_reg32(&r4tst->rtmctl);
21683 +start_rng:
21684         /*
21685          * select raw sampling in both entropy shifter
21686 -        * and statistical checker
21687 +        * and statistical checker; ; put RNG4 into run mode
21688          */
21689 -       clrsetbits_32(&val, 0, RTMCTL_SAMP_MODE_RAW_ES_SC);
21690 -       /* put RNG4 into run mode */
21691 -       clrsetbits_32(&val, RTMCTL_PRGM, 0);
21692 -       /* write back the control register */
21693 -       wr_reg32(&r4tst->rtmctl, val);
21694 +       clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
21695  }
21696  
21697  /**
21698 @@ -411,28 +410,26 @@ int caam_get_era(void)
21699  }
21700  EXPORT_SYMBOL(caam_get_era);
21701  
21702 -#ifdef CONFIG_DEBUG_FS
21703 -static int caam_debugfs_u64_get(void *data, u64 *val)
21704 -{
21705 -       *val = caam64_to_cpu(*(u64 *)data);
21706 -       return 0;
21707 -}
21708 -
21709 -static int caam_debugfs_u32_get(void *data, u64 *val)
21710 -{
21711 -       *val = caam32_to_cpu(*(u32 *)data);
21712 -       return 0;
21713 -}
21714 -
21715 -DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
21716 -DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
21717 -#endif
21718 +static const struct of_device_id caam_match[] = {
21719 +       {
21720 +               .compatible = "fsl,sec-v4.0",
21721 +       },
21722 +       {
21723 +               .compatible = "fsl,sec4.0",
21724 +       },
21725 +       {},
21726 +};
21727 +MODULE_DEVICE_TABLE(of, caam_match);
21728  
21729  /* Probe routine for CAAM top (controller) level */
21730  static int caam_probe(struct platform_device *pdev)
21731  {
21732 -       int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
21733 +       int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
21734         u64 caam_id;
21735 +       static const struct soc_device_attribute imx_soc[] = {
21736 +               {.family = "Freescale i.MX"},
21737 +               {},
21738 +       };
21739         struct device *dev;
21740         struct device_node *nprop, *np;
21741         struct caam_ctrl __iomem *ctrl;
21742 @@ -452,9 +449,10 @@ static int caam_probe(struct platform_device *pdev)
21743  
21744         dev = &pdev->dev;
21745         dev_set_drvdata(dev, ctrlpriv);
21746 -       ctrlpriv->pdev = pdev;
21747         nprop = pdev->dev.of_node;
21748  
21749 +       caam_imx = (bool)soc_device_match(imx_soc);
21750 +
21751         /* Enable clocking */
21752         clk = caam_drv_identify_clk(&pdev->dev, "ipg");
21753         if (IS_ERR(clk)) {
21754 @@ -483,14 +481,16 @@ static int caam_probe(struct platform_device *pdev)
21755         }
21756         ctrlpriv->caam_aclk = clk;
21757  
21758 -       clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
21759 -       if (IS_ERR(clk)) {
21760 -               ret = PTR_ERR(clk);
21761 -               dev_err(&pdev->dev,
21762 -                       "can't identify CAAM emi_slow clk: %d\n", ret);
21763 -               return ret;
21764 +       if (!of_machine_is_compatible("fsl,imx6ul")) {
21765 +               clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
21766 +               if (IS_ERR(clk)) {
21767 +                       ret = PTR_ERR(clk);
21768 +                       dev_err(&pdev->dev,
21769 +                               "can't identify CAAM emi_slow clk: %d\n", ret);
21770 +                       return ret;
21771 +               }
21772 +               ctrlpriv->caam_emi_slow = clk;
21773         }
21774 -       ctrlpriv->caam_emi_slow = clk;
21775  
21776         ret = clk_prepare_enable(ctrlpriv->caam_ipg);
21777         if (ret < 0) {
21778 @@ -511,11 +511,13 @@ static int caam_probe(struct platform_device *pdev)
21779                 goto disable_caam_mem;
21780         }
21781  
21782 -       ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
21783 -       if (ret < 0) {
21784 -               dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
21785 -                       ret);
21786 -               goto disable_caam_aclk;
21787 +       if (ctrlpriv->caam_emi_slow) {
21788 +               ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
21789 +               if (ret < 0) {
21790 +                       dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
21791 +                               ret);
21792 +                       goto disable_caam_aclk;
21793 +               }
21794         }
21795  
21796         /* Get configuration properties from device tree */
21797 @@ -542,13 +544,13 @@ static int caam_probe(struct platform_device *pdev)
21798         else
21799                 BLOCK_OFFSET = PG_SIZE_64K;
21800  
21801 -       ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
21802 -       ctrlpriv->assure = (struct caam_assurance __force *)
21803 -                          ((uint8_t *)ctrl +
21804 +       ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
21805 +       ctrlpriv->assure = (struct caam_assurance __iomem __force *)
21806 +                          ((__force uint8_t *)ctrl +
21807                             BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
21808                            );
21809 -       ctrlpriv->deco = (struct caam_deco __force *)
21810 -                        ((uint8_t *)ctrl +
21811 +       ctrlpriv->deco = (struct caam_deco __iomem __force *)
21812 +                        ((__force uint8_t *)ctrl +
21813                          BLOCK_OFFSET * DECO_BLOCK_NUMBER
21814                          );
21815  
21816 @@ -557,12 +559,17 @@ static int caam_probe(struct platform_device *pdev)
21817  
21818         /*
21819          * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
21820 -        * long pointers in master configuration register
21821 +        * long pointers in master configuration register.
21822 +        * In case of DPAA 2.x, Management Complex firmware performs
21823 +        * the configuration.
21824          */
21825 -       clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
21826 -                     MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
21827 -                     MCFGR_WDENABLE | MCFGR_LARGE_BURST |
21828 -                     (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
21829 +       caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
21830 +       if (!caam_dpaa2)
21831 +               clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
21832 +                             MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
21833 +                             MCFGR_WDENABLE | MCFGR_LARGE_BURST |
21834 +                             (sizeof(dma_addr_t) == sizeof(u64) ?
21835 +                              MCFGR_LONG_PTR : 0));
21836  
21837         /*
21838          *  Read the Compile Time paramters and SCFGR to determine
21839 @@ -590,64 +597,67 @@ static int caam_probe(struct platform_device *pdev)
21840                               JRSTART_JR1_START | JRSTART_JR2_START |
21841                               JRSTART_JR3_START);
21842  
21843 -       if (sizeof(dma_addr_t) == sizeof(u64))
21844 -               if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
21845 -                       dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
21846 +       if (sizeof(dma_addr_t) == sizeof(u64)) {
21847 +               if (caam_dpaa2)
21848 +                       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
21849 +               else if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
21850 +                       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
21851                 else
21852 -                       dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
21853 -       else
21854 -               dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
21855 -
21856 -       /*
21857 -        * Detect and enable JobRs
21858 -        * First, find out how many ring spec'ed, allocate references
21859 -        * for all, then go probe each one.
21860 -        */
21861 -       rspec = 0;
21862 -       for_each_available_child_of_node(nprop, np)
21863 -               if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
21864 -                   of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
21865 -                       rspec++;
21866 +                       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
21867 +       } else {
21868 +               ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
21869 +       }
21870 +       if (ret) {
21871 +               dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
21872 +               goto iounmap_ctrl;
21873 +       }
21874  
21875 -       ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
21876 -                                       sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
21877 -       if (ctrlpriv->jrpdev == NULL) {
21878 -               ret = -ENOMEM;
21879 +       ret = of_platform_populate(nprop, caam_match, NULL, dev);
21880 +       if (ret) {
21881 +               dev_err(dev, "JR platform devices creation error\n");
21882                 goto iounmap_ctrl;
21883         }
21884  
21885 +#ifdef CONFIG_DEBUG_FS
21886 +       /*
21887 +        * FIXME: needs better naming distinction, as some amalgamation of
21888 +        * "caam" and nprop->full_name. The OF name isn't distinctive,
21889 +        * but does separate instances
21890 +        */
21891 +       perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
21892 +
21893 +       ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
21894 +       ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
21895 +#endif
21896         ring = 0;
21897 -       ctrlpriv->total_jobrs = 0;
21898         for_each_available_child_of_node(nprop, np)
21899                 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
21900                     of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
21901 -                       ctrlpriv->jrpdev[ring] =
21902 -                               of_platform_device_create(np, NULL, dev);
21903 -                       if (!ctrlpriv->jrpdev[ring]) {
21904 -                               pr_warn("JR%d Platform device creation error\n",
21905 -                                       ring);
21906 -                               continue;
21907 -                       }
21908 -                       ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
21909 -                                            ((uint8_t *)ctrl +
21910 +                       ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
21911 +                                            ((__force uint8_t *)ctrl +
21912                                              (ring + JR_BLOCK_NUMBER) *
21913                                               BLOCK_OFFSET
21914                                              );
21915                         ctrlpriv->total_jobrs++;
21916                         ring++;
21917 -       }
21918 +               }
21919  
21920 -       /* Check to see if QI present. If so, enable */
21921 -       ctrlpriv->qi_present =
21922 -                       !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
21923 -                          CTPR_MS_QI_MASK);
21924 -       if (ctrlpriv->qi_present) {
21925 -               ctrlpriv->qi = (struct caam_queue_if __force *)
21926 -                              ((uint8_t *)ctrl +
21927 +       /* Check to see if (DPAA 1.x) QI present. If so, enable */
21928 +       ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
21929 +       if (ctrlpriv->qi_present && !caam_dpaa2) {
21930 +               ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
21931 +                              ((__force uint8_t *)ctrl +
21932                                  BLOCK_OFFSET * QI_BLOCK_NUMBER
21933                                );
21934                 /* This is all that's required to physically enable QI */
21935                 wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
21936 +
21937 +               /* If QMAN driver is present, init CAAM-QI backend */
21938 +#ifdef CONFIG_CAAM_QI
21939 +               ret = caam_qi_init(pdev);
21940 +               if (ret)
21941 +                       dev_err(dev, "caam qi i/f init failed: %d\n", ret);
21942 +#endif
21943         }
21944  
21945         /* If no QI and no rings specified, quit and go home */
21946 @@ -662,8 +672,10 @@ static int caam_probe(struct platform_device *pdev)
21947         /*
21948          * If SEC has RNG version >= 4 and RNG state handle has not been
21949          * already instantiated, do RNG instantiation
21950 +        * In case of DPAA 2.x, RNG is managed by MC firmware.
21951          */
21952 -       if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
21953 +       if (!caam_dpaa2 &&
21954 +           (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
21955                 ctrlpriv->rng4_sh_init =
21956                         rd_reg32(&ctrl->r4tst[0].rdsta);
21957                 /*
21958 @@ -731,77 +743,46 @@ static int caam_probe(struct platform_device *pdev)
21959         /* Report "alive" for developer to see */
21960         dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
21961                  caam_get_era());
21962 -       dev_info(dev, "job rings = %d, qi = %d\n",
21963 -                ctrlpriv->total_jobrs, ctrlpriv->qi_present);
21964 +       dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
21965 +                ctrlpriv->total_jobrs, ctrlpriv->qi_present,
21966 +                caam_dpaa2 ? "yes" : "no");
21967  
21968  #ifdef CONFIG_DEBUG_FS
21969 -       /*
21970 -        * FIXME: needs better naming distinction, as some amalgamation of
21971 -        * "caam" and nprop->full_name. The OF name isn't distinctive,
21972 -        * but does separate instances
21973 -        */
21974 -       perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
21975 -
21976 -       ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
21977 -       ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
21978 -
21979 -       /* Controller-level - performance monitor counters */
21980 -
21981 -       ctrlpriv->ctl_rq_dequeued =
21982 -               debugfs_create_file("rq_dequeued",
21983 -                                   S_IRUSR | S_IRGRP | S_IROTH,
21984 -                                   ctrlpriv->ctl, &perfmon->req_dequeued,
21985 -                                   &caam_fops_u64_ro);
21986 -       ctrlpriv->ctl_ob_enc_req =
21987 -               debugfs_create_file("ob_rq_encrypted",
21988 -                                   S_IRUSR | S_IRGRP | S_IROTH,
21989 -                                   ctrlpriv->ctl, &perfmon->ob_enc_req,
21990 -                                   &caam_fops_u64_ro);
21991 -       ctrlpriv->ctl_ib_dec_req =
21992 -               debugfs_create_file("ib_rq_decrypted",
21993 -                                   S_IRUSR | S_IRGRP | S_IROTH,
21994 -                                   ctrlpriv->ctl, &perfmon->ib_dec_req,
21995 -                                   &caam_fops_u64_ro);
21996 -       ctrlpriv->ctl_ob_enc_bytes =
21997 -               debugfs_create_file("ob_bytes_encrypted",
21998 -                                   S_IRUSR | S_IRGRP | S_IROTH,
21999 -                                   ctrlpriv->ctl, &perfmon->ob_enc_bytes,
22000 -                                   &caam_fops_u64_ro);
22001 -       ctrlpriv->ctl_ob_prot_bytes =
22002 -               debugfs_create_file("ob_bytes_protected",
22003 -                                   S_IRUSR | S_IRGRP | S_IROTH,
22004 -                                   ctrlpriv->ctl, &perfmon->ob_prot_bytes,
22005 -                                   &caam_fops_u64_ro);
22006 -       ctrlpriv->ctl_ib_dec_bytes =
22007 -               debugfs_create_file("ib_bytes_decrypted",
22008 -                                   S_IRUSR | S_IRGRP | S_IROTH,
22009 -                                   ctrlpriv->ctl, &perfmon->ib_dec_bytes,
22010 -                                   &caam_fops_u64_ro);
22011 -       ctrlpriv->ctl_ib_valid_bytes =
22012 -               debugfs_create_file("ib_bytes_validated",
22013 -                                   S_IRUSR | S_IRGRP | S_IROTH,
22014 -                                   ctrlpriv->ctl, &perfmon->ib_valid_bytes,
22015 -                                   &caam_fops_u64_ro);
22016 +       debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
22017 +                           ctrlpriv->ctl, &perfmon->req_dequeued,
22018 +                           &caam_fops_u64_ro);
22019 +       debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
22020 +                           ctrlpriv->ctl, &perfmon->ob_enc_req,
22021 +                           &caam_fops_u64_ro);
22022 +       debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
22023 +                           ctrlpriv->ctl, &perfmon->ib_dec_req,
22024 +                           &caam_fops_u64_ro);
22025 +       debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
22026 +                           ctrlpriv->ctl, &perfmon->ob_enc_bytes,
22027 +                           &caam_fops_u64_ro);
22028 +       debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH,
22029 +                           ctrlpriv->ctl, &perfmon->ob_prot_bytes,
22030 +                           &caam_fops_u64_ro);
22031 +       debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
22032 +                           ctrlpriv->ctl, &perfmon->ib_dec_bytes,
22033 +                           &caam_fops_u64_ro);
22034 +       debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH,
22035 +                           ctrlpriv->ctl, &perfmon->ib_valid_bytes,
22036 +                           &caam_fops_u64_ro);
22037  
22038         /* Controller level - global status values */
22039 -       ctrlpriv->ctl_faultaddr =
22040 -               debugfs_create_file("fault_addr",
22041 -                                   S_IRUSR | S_IRGRP | S_IROTH,
22042 -                                   ctrlpriv->ctl, &perfmon->faultaddr,
22043 -                                   &caam_fops_u32_ro);
22044 -       ctrlpriv->ctl_faultdetail =
22045 -               debugfs_create_file("fault_detail",
22046 -                                   S_IRUSR | S_IRGRP | S_IROTH,
22047 -                                   ctrlpriv->ctl, &perfmon->faultdetail,
22048 -                                   &caam_fops_u32_ro);
22049 -       ctrlpriv->ctl_faultstatus =
22050 -               debugfs_create_file("fault_status",
22051 -                                   S_IRUSR | S_IRGRP | S_IROTH,
22052 -                                   ctrlpriv->ctl, &perfmon->status,
22053 -                                   &caam_fops_u32_ro);
22054 +       debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH,
22055 +                           ctrlpriv->ctl, &perfmon->faultaddr,
22056 +                           &caam_fops_u32_ro);
22057 +       debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH,
22058 +                           ctrlpriv->ctl, &perfmon->faultdetail,
22059 +                           &caam_fops_u32_ro);
22060 +       debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH,
22061 +                           ctrlpriv->ctl, &perfmon->status,
22062 +                           &caam_fops_u32_ro);
22063  
22064         /* Internal covering keys (useful in non-secure mode only) */
22065 -       ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
22066 +       ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
22067         ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
22068         ctrlpriv->ctl_kek = debugfs_create_blob("kek",
22069                                                 S_IRUSR |
22070 @@ -809,7 +790,7 @@ static int caam_probe(struct platform_device *pdev)
22071                                                 ctrlpriv->ctl,
22072                                                 &ctrlpriv->ctl_kek_wrap);
22073  
22074 -       ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
22075 +       ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
22076         ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
22077         ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
22078                                                  S_IRUSR |
22079 @@ -817,7 +798,7 @@ static int caam_probe(struct platform_device *pdev)
22080                                                  ctrlpriv->ctl,
22081                                                  &ctrlpriv->ctl_tkek_wrap);
22082  
22083 -       ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
22084 +       ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
22085         ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
22086         ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
22087                                                  S_IRUSR |
22088 @@ -828,13 +809,17 @@ static int caam_probe(struct platform_device *pdev)
22089         return 0;
22090  
22091  caam_remove:
22092 +#ifdef CONFIG_DEBUG_FS
22093 +       debugfs_remove_recursive(ctrlpriv->dfs_root);
22094 +#endif
22095         caam_remove(pdev);
22096         return ret;
22097  
22098  iounmap_ctrl:
22099         iounmap(ctrl);
22100  disable_caam_emi_slow:
22101 -       clk_disable_unprepare(ctrlpriv->caam_emi_slow);
22102 +       if (ctrlpriv->caam_emi_slow)
22103 +               clk_disable_unprepare(ctrlpriv->caam_emi_slow);
22104  disable_caam_aclk:
22105         clk_disable_unprepare(ctrlpriv->caam_aclk);
22106  disable_caam_mem:
22107 @@ -844,17 +829,6 @@ static int caam_probe(struct platform_device *pdev)
22108         return ret;
22109  }
22110  
22111 -static struct of_device_id caam_match[] = {
22112 -       {
22113 -               .compatible = "fsl,sec-v4.0",
22114 -       },
22115 -       {
22116 -               .compatible = "fsl,sec4.0",
22117 -       },
22118 -       {},
22119 -};
22120 -MODULE_DEVICE_TABLE(of, caam_match);
22121 -
22122  static struct platform_driver caam_driver = {
22123         .driver = {
22124                 .name = "caam",
22125 diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h
22126 index cac5402a..7e7bf68c 100644
22127 --- a/drivers/crypto/caam/ctrl.h
22128 +++ b/drivers/crypto/caam/ctrl.h
22129 @@ -10,4 +10,6 @@
22130  /* Prototypes for backend-level services exposed to APIs */
22131  int caam_get_era(void);
22132  
22133 +extern bool caam_dpaa2;
22134 +
22135  #endif /* CTRL_H */
22136 diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
22137 index 513b6646..a8c3be73 100644
22138 --- a/drivers/crypto/caam/desc.h
22139 +++ b/drivers/crypto/caam/desc.h
22140 @@ -22,12 +22,6 @@
22141  #define SEC4_SG_LEN_MASK       0x3fffffff      /* Excludes EXT and FINAL */
22142  #define SEC4_SG_OFFSET_MASK    0x00001fff
22143  
22144 -struct sec4_sg_entry {
22145 -       u64 ptr;
22146 -       u32 len;
22147 -       u32 bpid_offset;
22148 -};
22149 -
22150  /* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
22151  #define MAX_CAAM_DESCSIZE      64
22152  
22153 @@ -47,6 +41,7 @@ struct sec4_sg_entry {
22154  #define CMD_SEQ_LOAD           (0x03 << CMD_SHIFT)
22155  #define CMD_FIFO_LOAD          (0x04 << CMD_SHIFT)
22156  #define CMD_SEQ_FIFO_LOAD      (0x05 << CMD_SHIFT)
22157 +#define CMD_MOVEB              (0x07 << CMD_SHIFT)
22158  #define CMD_STORE              (0x0a << CMD_SHIFT)
22159  #define CMD_SEQ_STORE          (0x0b << CMD_SHIFT)
22160  #define CMD_FIFO_STORE         (0x0c << CMD_SHIFT)
22161 @@ -90,8 +85,8 @@ struct sec4_sg_entry {
22162  #define HDR_ZRO                        0x00008000
22163  
22164  /* Start Index or SharedDesc Length */
22165 -#define HDR_START_IDX_MASK     0x3f
22166  #define HDR_START_IDX_SHIFT    16
22167 +#define HDR_START_IDX_MASK     (0x3f << HDR_START_IDX_SHIFT)
22168  
22169  /* If shared descriptor header, 6-bit length */
22170  #define HDR_DESCLEN_SHR_MASK   0x3f
22171 @@ -121,10 +116,10 @@ struct sec4_sg_entry {
22172  #define HDR_PROP_DNR           0x00000800
22173  
22174  /* JobDesc/SharedDesc share property */
22175 -#define HDR_SD_SHARE_MASK      0x03
22176  #define HDR_SD_SHARE_SHIFT     8
22177 -#define HDR_JD_SHARE_MASK      0x07
22178 +#define HDR_SD_SHARE_MASK      (0x03 << HDR_SD_SHARE_SHIFT)
22179  #define HDR_JD_SHARE_SHIFT     8
22180 +#define HDR_JD_SHARE_MASK      (0x07 << HDR_JD_SHARE_SHIFT)
22181  
22182  #define HDR_SHARE_NEVER                (0x00 << HDR_SD_SHARE_SHIFT)
22183  #define HDR_SHARE_WAIT         (0x01 << HDR_SD_SHARE_SHIFT)
22184 @@ -235,7 +230,7 @@ struct sec4_sg_entry {
22185  #define LDST_SRCDST_WORD_DECO_MATH2    (0x0a << LDST_SRCDST_SHIFT)
22186  #define LDST_SRCDST_WORD_DECO_AAD_SZ   (0x0b << LDST_SRCDST_SHIFT)
22187  #define LDST_SRCDST_WORD_DECO_MATH3    (0x0b << LDST_SRCDST_SHIFT)
22188 -#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
22189 +#define LDST_SRCDST_WORD_CLASS1_IV_SZ  (0x0c << LDST_SRCDST_SHIFT)
22190  #define LDST_SRCDST_WORD_ALTDS_CLASS1  (0x0f << LDST_SRCDST_SHIFT)
22191  #define LDST_SRCDST_WORD_PKHA_A_SZ     (0x10 << LDST_SRCDST_SHIFT)
22192  #define LDST_SRCDST_WORD_PKHA_B_SZ     (0x11 << LDST_SRCDST_SHIFT)
22193 @@ -360,6 +355,7 @@ struct sec4_sg_entry {
22194  #define FIFOLD_TYPE_PK_N       (0x08 << FIFOLD_TYPE_SHIFT)
22195  #define FIFOLD_TYPE_PK_A       (0x0c << FIFOLD_TYPE_SHIFT)
22196  #define FIFOLD_TYPE_PK_B       (0x0d << FIFOLD_TYPE_SHIFT)
22197 +#define FIFOLD_TYPE_IFIFO      (0x0f << FIFOLD_TYPE_SHIFT)
22198  
22199  /* Other types. Need to OR in last/flush bits as desired */
22200  #define FIFOLD_TYPE_MSG_MASK   (0x38 << FIFOLD_TYPE_SHIFT)
22201 @@ -400,7 +396,7 @@ struct sec4_sg_entry {
22202  #define FIFOST_TYPE_PKHA_N      (0x08 << FIFOST_TYPE_SHIFT)
22203  #define FIFOST_TYPE_PKHA_A      (0x0c << FIFOST_TYPE_SHIFT)
22204  #define FIFOST_TYPE_PKHA_B      (0x0d << FIFOST_TYPE_SHIFT)
22205 -#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
22206 +#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
22207  #define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
22208  #define FIFOST_TYPE_PKHA_E_JKEK         (0x22 << FIFOST_TYPE_SHIFT)
22209  #define FIFOST_TYPE_PKHA_E_TKEK         (0x23 << FIFOST_TYPE_SHIFT)
22210 @@ -413,6 +409,7 @@ struct sec4_sg_entry {
22211  #define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
22212  #define FIFOST_TYPE_RNGSTORE    (0x34 << FIFOST_TYPE_SHIFT)
22213  #define FIFOST_TYPE_RNGFIFO     (0x35 << FIFOST_TYPE_SHIFT)
22214 +#define FIFOST_TYPE_METADATA    (0x3e << FIFOST_TYPE_SHIFT)
22215  #define FIFOST_TYPE_SKIP        (0x3f << FIFOST_TYPE_SHIFT)
22216  
22217  /*
22218 @@ -1107,8 +1104,8 @@ struct sec4_sg_entry {
22219  /* For non-protocol/alg-only op commands */
22220  #define OP_ALG_TYPE_SHIFT      24
22221  #define OP_ALG_TYPE_MASK       (0x7 << OP_ALG_TYPE_SHIFT)
22222 -#define OP_ALG_TYPE_CLASS1     2
22223 -#define OP_ALG_TYPE_CLASS2     4
22224 +#define OP_ALG_TYPE_CLASS1     (2 << OP_ALG_TYPE_SHIFT)
22225 +#define OP_ALG_TYPE_CLASS2     (4 << OP_ALG_TYPE_SHIFT)
22226  
22227  #define OP_ALG_ALGSEL_SHIFT    16
22228  #define OP_ALG_ALGSEL_MASK     (0xff << OP_ALG_ALGSEL_SHIFT)
22229 @@ -1249,7 +1246,7 @@ struct sec4_sg_entry {
22230  #define OP_ALG_PKMODE_MOD_PRIMALITY    0x00f
22231  
22232  /* PKHA mode copy-memory functions */
22233 -#define OP_ALG_PKMODE_SRC_REG_SHIFT    13
22234 +#define OP_ALG_PKMODE_SRC_REG_SHIFT    17
22235  #define OP_ALG_PKMODE_SRC_REG_MASK     (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
22236  #define OP_ALG_PKMODE_DST_REG_SHIFT    10
22237  #define OP_ALG_PKMODE_DST_REG_MASK     (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
22238 @@ -1445,10 +1442,11 @@ struct sec4_sg_entry {
22239  #define MATH_SRC1_REG2         (0x02 << MATH_SRC1_SHIFT)
22240  #define MATH_SRC1_REG3         (0x03 << MATH_SRC1_SHIFT)
22241  #define MATH_SRC1_IMM          (0x04 << MATH_SRC1_SHIFT)
22242 -#define MATH_SRC1_DPOVRD       (0x07 << MATH_SRC0_SHIFT)
22243 +#define MATH_SRC1_DPOVRD       (0x07 << MATH_SRC1_SHIFT)
22244  #define MATH_SRC1_INFIFO       (0x0a << MATH_SRC1_SHIFT)
22245  #define MATH_SRC1_OUTFIFO      (0x0b << MATH_SRC1_SHIFT)
22246  #define MATH_SRC1_ONE          (0x0c << MATH_SRC1_SHIFT)
22247 +#define MATH_SRC1_ZERO         (0x0f << MATH_SRC1_SHIFT)
22248  
22249  /* Destination selectors */
22250  #define MATH_DEST_SHIFT                8
22251 @@ -1629,4 +1627,31 @@ struct sec4_sg_entry {
22252  /* Frame Descriptor Command for Replacement Job Descriptor */
22253  #define FD_CMD_REPLACE_JOB_DESC                                0x20000000
22254  
22255 +/* CHA Control Register bits */
22256 +#define CCTRL_RESET_CHA_ALL          0x1
22257 +#define CCTRL_RESET_CHA_AESA         0x2
22258 +#define CCTRL_RESET_CHA_DESA         0x4
22259 +#define CCTRL_RESET_CHA_AFHA         0x8
22260 +#define CCTRL_RESET_CHA_KFHA         0x10
22261 +#define CCTRL_RESET_CHA_SF8A         0x20
22262 +#define CCTRL_RESET_CHA_PKHA         0x40
22263 +#define CCTRL_RESET_CHA_MDHA         0x80
22264 +#define CCTRL_RESET_CHA_CRCA         0x100
22265 +#define CCTRL_RESET_CHA_RNG          0x200
22266 +#define CCTRL_RESET_CHA_SF9A         0x400
22267 +#define CCTRL_RESET_CHA_ZUCE         0x800
22268 +#define CCTRL_RESET_CHA_ZUCA         0x1000
22269 +#define CCTRL_UNLOAD_PK_A0           0x10000
22270 +#define CCTRL_UNLOAD_PK_A1           0x20000
22271 +#define CCTRL_UNLOAD_PK_A2           0x40000
22272 +#define CCTRL_UNLOAD_PK_A3           0x80000
22273 +#define CCTRL_UNLOAD_PK_B0           0x100000
22274 +#define CCTRL_UNLOAD_PK_B1           0x200000
22275 +#define CCTRL_UNLOAD_PK_B2           0x400000
22276 +#define CCTRL_UNLOAD_PK_B3           0x800000
22277 +#define CCTRL_UNLOAD_PK_N            0x1000000
22278 +#define CCTRL_UNLOAD_PK_A            0x4000000
22279 +#define CCTRL_UNLOAD_PK_B            0x8000000
22280 +#define CCTRL_UNLOAD_SBOX            0x10000000
22281 +
22282  #endif /* DESC_H */
22283 diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
22284 index a8cd8a78..2d9dbeca 100644
22285 --- a/drivers/crypto/caam/desc_constr.h
22286 +++ b/drivers/crypto/caam/desc_constr.h
22287 @@ -4,6 +4,9 @@
22288   * Copyright 2008-2012 Freescale Semiconductor, Inc.
22289   */
22290  
22291 +#ifndef DESC_CONSTR_H
22292 +#define DESC_CONSTR_H
22293 +
22294  #include "desc.h"
22295  #include "regs.h"
22296  
22297 @@ -33,38 +36,39 @@
22298  
22299  extern bool caam_little_end;
22300  
22301 -static inline int desc_len(u32 *desc)
22302 +static inline int desc_len(u32 * const desc)
22303  {
22304         return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
22305  }
22306  
22307 -static inline int desc_bytes(void *desc)
22308 +static inline int desc_bytes(void * const desc)
22309  {
22310         return desc_len(desc) * CAAM_CMD_SZ;
22311  }
22312  
22313 -static inline u32 *desc_end(u32 *desc)
22314 +static inline u32 *desc_end(u32 * const desc)
22315  {
22316         return desc + desc_len(desc);
22317  }
22318  
22319 -static inline void *sh_desc_pdb(u32 *desc)
22320 +static inline void *sh_desc_pdb(u32 * const desc)
22321  {
22322         return desc + 1;
22323  }
22324  
22325 -static inline void init_desc(u32 *desc, u32 options)
22326 +static inline void init_desc(u32 * const desc, u32 options)
22327  {
22328         *desc = cpu_to_caam32((options | HDR_ONE) + 1);
22329  }
22330  
22331 -static inline void init_sh_desc(u32 *desc, u32 options)
22332 +static inline void init_sh_desc(u32 * const desc, u32 options)
22333  {
22334         PRINT_POS;
22335         init_desc(desc, CMD_SHARED_DESC_HDR | options);
22336  }
22337  
22338 -static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
22339 +static inline void init_sh_desc_pdb(u32 * const desc, u32 options,
22340 +                                   size_t pdb_bytes)
22341  {
22342         u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
22343  
22344 @@ -72,19 +76,20 @@ static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
22345                      options);
22346  }
22347  
22348 -static inline void init_job_desc(u32 *desc, u32 options)
22349 +static inline void init_job_desc(u32 * const desc, u32 options)
22350  {
22351         init_desc(desc, CMD_DESC_HDR | options);
22352  }
22353  
22354 -static inline void init_job_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
22355 +static inline void init_job_desc_pdb(u32 * const desc, u32 options,
22356 +                                    size_t pdb_bytes)
22357  {
22358         u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
22359  
22360         init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options);
22361  }
22362  
22363 -static inline void append_ptr(u32 *desc, dma_addr_t ptr)
22364 +static inline void append_ptr(u32 * const desc, dma_addr_t ptr)
22365  {
22366         dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
22367  
22368 @@ -94,8 +99,8 @@ static inline void append_ptr(u32 *desc, dma_addr_t ptr)
22369                                 CAAM_PTR_SZ / CAAM_CMD_SZ);
22370  }
22371  
22372 -static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
22373 -                                       u32 options)
22374 +static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr,
22375 +                                       int len, u32 options)
22376  {
22377         PRINT_POS;
22378         init_job_desc(desc, HDR_SHARED | options |
22379 @@ -103,7 +108,7 @@ static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
22380         append_ptr(desc, ptr);
22381  }
22382  
22383 -static inline void append_data(u32 *desc, void *data, int len)
22384 +static inline void append_data(u32 * const desc, void *data, int len)
22385  {
22386         u32 *offset = desc_end(desc);
22387  
22388 @@ -114,7 +119,7 @@ static inline void append_data(u32 *desc, void *data, int len)
22389                                 (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ);
22390  }
22391  
22392 -static inline void append_cmd(u32 *desc, u32 command)
22393 +static inline void append_cmd(u32 * const desc, u32 command)
22394  {
22395         u32 *cmd = desc_end(desc);
22396  
22397 @@ -125,7 +130,7 @@ static inline void append_cmd(u32 *desc, u32 command)
22398  
22399  #define append_u32 append_cmd
22400  
22401 -static inline void append_u64(u32 *desc, u64 data)
22402 +static inline void append_u64(u32 * const desc, u64 data)
22403  {
22404         u32 *offset = desc_end(desc);
22405  
22406 @@ -142,14 +147,14 @@ static inline void append_u64(u32 *desc, u64 data)
22407  }
22408  
22409  /* Write command without affecting header, and return pointer to next word */
22410 -static inline u32 *write_cmd(u32 *desc, u32 command)
22411 +static inline u32 *write_cmd(u32 * const desc, u32 command)
22412  {
22413         *desc = cpu_to_caam32(command);
22414  
22415         return desc + 1;
22416  }
22417  
22418 -static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
22419 +static inline void append_cmd_ptr(u32 * const desc, dma_addr_t ptr, int len,
22420                                   u32 command)
22421  {
22422         append_cmd(desc, command | len);
22423 @@ -157,7 +162,7 @@ static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
22424  }
22425  
22426  /* Write length after pointer, rather than inside command */
22427 -static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
22428 +static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr,
22429                                          unsigned int len, u32 command)
22430  {
22431         append_cmd(desc, command);
22432 @@ -166,7 +171,7 @@ static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
22433         append_cmd(desc, len);
22434  }
22435  
22436 -static inline void append_cmd_data(u32 *desc, void *data, int len,
22437 +static inline void append_cmd_data(u32 * const desc, void *data, int len,
22438                                    u32 command)
22439  {
22440         append_cmd(desc, command | IMMEDIATE | len);
22441 @@ -174,7 +179,7 @@ static inline void append_cmd_data(u32 *desc, void *data, int len,
22442  }
22443  
22444  #define APPEND_CMD_RET(cmd, op) \
22445 -static inline u32 *append_##cmd(u32 *desc, u32 options) \
22446 +static inline u32 *append_##cmd(u32 * const desc, u32 options) \
22447  { \
22448         u32 *cmd = desc_end(desc); \
22449         PRINT_POS; \
22450 @@ -183,14 +188,15 @@ static inline u32 *append_##cmd(u32 *desc, u32 options) \
22451  }
22452  APPEND_CMD_RET(jump, JUMP)
22453  APPEND_CMD_RET(move, MOVE)
22454 +APPEND_CMD_RET(moveb, MOVEB)
22455  
22456 -static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
22457 +static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
22458  {
22459         *jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) |
22460                                   (desc_len(desc) - (jump_cmd - desc)));
22461  }
22462  
22463 -static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
22464 +static inline void set_move_tgt_here(u32 * const desc, u32 *move_cmd)
22465  {
22466         u32 val = caam32_to_cpu(*move_cmd);
22467  
22468 @@ -200,7 +206,7 @@ static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
22469  }
22470  
22471  #define APPEND_CMD(cmd, op) \
22472 -static inline void append_##cmd(u32 *desc, u32 options) \
22473 +static inline void append_##cmd(u32 * const desc, u32 options) \
22474  { \
22475         PRINT_POS; \
22476         append_cmd(desc, CMD_##op | options); \
22477 @@ -208,7 +214,8 @@ static inline void append_##cmd(u32 *desc, u32 options) \
22478  APPEND_CMD(operation, OPERATION)
22479  
22480  #define APPEND_CMD_LEN(cmd, op) \
22481 -static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
22482 +static inline void append_##cmd(u32 * const desc, unsigned int len, \
22483 +                               u32 options) \
22484  { \
22485         PRINT_POS; \
22486         append_cmd(desc, CMD_##op | len | options); \
22487 @@ -220,8 +227,8 @@ APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
22488  APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
22489  
22490  #define APPEND_CMD_PTR(cmd, op) \
22491 -static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
22492 -                               u32 options) \
22493 +static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
22494 +                               unsigned int len, u32 options) \
22495  { \
22496         PRINT_POS; \
22497         append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
22498 @@ -231,8 +238,8 @@ APPEND_CMD_PTR(load, LOAD)
22499  APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
22500  APPEND_CMD_PTR(fifo_store, FIFO_STORE)
22501  
22502 -static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
22503 -                               u32 options)
22504 +static inline void append_store(u32 * const desc, dma_addr_t ptr,
22505 +                               unsigned int len, u32 options)
22506  {
22507         u32 cmd_src;
22508  
22509 @@ -249,7 +256,8 @@ static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
22510  }
22511  
22512  #define APPEND_SEQ_PTR_INTLEN(cmd, op) \
22513 -static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \
22514 +static inline void append_seq_##cmd##_ptr_intlen(u32 * const desc, \
22515 +                                                dma_addr_t ptr, \
22516                                                  unsigned int len, \
22517                                                  u32 options) \
22518  { \
22519 @@ -263,7 +271,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
22520  APPEND_SEQ_PTR_INTLEN(out, OUT)
22521  
22522  #define APPEND_CMD_PTR_TO_IMM(cmd, op) \
22523 -static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
22524 +static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
22525                                          unsigned int len, u32 options) \
22526  { \
22527         PRINT_POS; \
22528 @@ -273,7 +281,7 @@ APPEND_CMD_PTR_TO_IMM(load, LOAD);
22529  APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
22530  
22531  #define APPEND_CMD_PTR_EXTLEN(cmd, op) \
22532 -static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \
22533 +static inline void append_##cmd##_extlen(u32 * const desc, dma_addr_t ptr, \
22534                                          unsigned int len, u32 options) \
22535  { \
22536         PRINT_POS; \
22537 @@ -287,7 +295,7 @@ APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_OUT_PTR)
22538   * the size of its type
22539   */
22540  #define APPEND_CMD_PTR_LEN(cmd, op, type) \
22541 -static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \
22542 +static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
22543                                 type len, u32 options) \
22544  { \
22545         PRINT_POS; \
22546 @@ -304,7 +312,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32)
22547   * from length of immediate data provided, e.g., split keys
22548   */
22549  #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
22550 -static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
22551 +static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
22552                                          unsigned int data_len, \
22553                                          unsigned int len, u32 options) \
22554  { \
22555 @@ -315,7 +323,7 @@ static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
22556  APPEND_CMD_PTR_TO_IMM2(key, KEY);
22557  
22558  #define APPEND_CMD_RAW_IMM(cmd, op, type) \
22559 -static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
22560 +static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \
22561                                              u32 options) \
22562  { \
22563         PRINT_POS; \
22564 @@ -426,3 +434,66 @@ do { \
22565         APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
22566  #define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
22567         APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
22568 +
22569 +/**
22570 + * struct alginfo - Container for algorithm details
22571 + * @algtype: algorithm selector; for valid values, see documentation of the
22572 + *           functions where it is used.
22573 + * @keylen: length of the provided algorithm key, in bytes
22574 + * @keylen_pad: padded length of the provided algorithm key, in bytes
22575 + * @key: address where algorithm key resides; virtual address if key_inline
22576 + *       is true, dma (bus) address if key_inline is false.
22577 + * @key_inline: true - key can be inlined in the descriptor; false - key is
22578 + *              referenced by the descriptor
22579 + */
22580 +struct alginfo {
22581 +       u32 algtype;
22582 +       unsigned int keylen;
22583 +       unsigned int keylen_pad;
22584 +       union {
22585 +               dma_addr_t key_dma;
22586 +               void *key_virt;
22587 +       };
22588 +       bool key_inline;
22589 +};
22590 +
22591 +/**
22592 + * desc_inline_query() - Provide indications on which data items can be inlined
22593 + *                       and which shall be referenced in a shared descriptor.
22594 + * @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
22595 + *               excluding the data items to be inlined (or corresponding
22596 + *               pointer if an item is not inlined). Each cnstr_* function that
22597 + *               generates descriptors should have a define mentioning
22598 + *               corresponding length.
22599 + * @jd_len: Maximum length of the job descriptor(s) that will be used
22600 + *          together with the shared descriptor.
22601 + * @data_len: Array of lengths of the data items trying to be inlined
22602 + * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
22603 + *            otherwise.
22604 + * @count: Number of data items (size of @data_len array); must be <= 32
22605 + *
22606 + * Return: 0 if data can be inlined / referenced, negative value if not. If 0,
22607 + *         check @inl_mask for details.
22608 + */
22609 +static inline int desc_inline_query(unsigned int sd_base_len,
22610 +                                   unsigned int jd_len, unsigned int *data_len,
22611 +                                   u32 *inl_mask, unsigned int count)
22612 +{
22613 +       int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len);
22614 +       unsigned int i;
22615 +
22616 +       *inl_mask = 0;
22617 +       for (i = 0; (i < count) && (rem_bytes > 0); i++) {
22618 +               if (rem_bytes - (int)(data_len[i] +
22619 +                       (count - i - 1) * CAAM_PTR_SZ) >= 0) {
22620 +                       rem_bytes -= data_len[i];
22621 +                       *inl_mask |= (1 << i);
22622 +               } else {
22623 +                       rem_bytes -= CAAM_PTR_SZ;
22624 +               }
22625 +       }
22626 +
22627 +       return (rem_bytes >= 0) ? 0 : -1;
22628 +}
22629 +
22630 +#endif /* DESC_CONSTR_H */
22631 diff --git a/drivers/crypto/caam/dpseci.c b/drivers/crypto/caam/dpseci.c
22632 new file mode 100644
22633 index 00000000..410cd790
22634 --- /dev/null
22635 +++ b/drivers/crypto/caam/dpseci.c
22636 @@ -0,0 +1,859 @@
22637 +/*
22638 + * Copyright 2013-2016 Freescale Semiconductor Inc.
22639 + * Copyright 2017 NXP
22640 + *
22641 + * Redistribution and use in source and binary forms, with or without
22642 + * modification, are permitted provided that the following conditions are met:
22643 + *     * Redistributions of source code must retain the above copyright
22644 + *      notice, this list of conditions and the following disclaimer.
22645 + *     * Redistributions in binary form must reproduce the above copyright
22646 + *      notice, this list of conditions and the following disclaimer in the
22647 + *      documentation and/or other materials provided with the distribution.
22648 + *     * Neither the names of the above-listed copyright holders nor the
22649 + *      names of any contributors may be used to endorse or promote products
22650 + *      derived from this software without specific prior written permission.
22651 + *
22652 + *
22653 + * ALTERNATIVELY, this software may be distributed under the terms of the
22654 + * GNU General Public License ("GPL") as published by the Free Software
22655 + * Foundation, either version 2 of that License or (at your option) any
22656 + * later version.
22657 + *
22658 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22659 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22660 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22661 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
22662 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22663 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22664 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22665 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22666 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22667 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
22668 + * POSSIBILITY OF SUCH DAMAGE.
22669 + */
22670 +
22671 +#include "../../../drivers/staging/fsl-mc/include/mc-sys.h"
22672 +#include "../../../drivers/staging/fsl-mc/include/mc-cmd.h"
22673 +#include "../../../drivers/staging/fsl-mc/include/dpopr.h"
22674 +#include "dpseci.h"
22675 +#include "dpseci_cmd.h"
22676 +
22677 +/**
22678 + * dpseci_open() - Open a control session for the specified object
22679 + * @mc_io:     Pointer to MC portal's I/O object
22680 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22681 + * @dpseci_id: DPSECI unique ID
22682 + * @token:     Returned token; use in subsequent API calls
22683 + *
22684 + * This function can be used to open a control session for an already created
22685 + * object; an object may have been declared in the DPL or by calling the
22686 + * dpseci_create() function.
22687 + * This function returns a unique authentication token, associated with the
22688 + * specific object ID and the specific MC portal; this token must be used in all
22689 + * subsequent commands for this specific object.
22690 + *
22691 + * Return:     '0' on success, error code otherwise
22692 + */
22693 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
22694 +               u16 *token)
22695 +{
22696 +       struct mc_command cmd = { 0 };
22697 +       struct dpseci_cmd_open *cmd_params;
22698 +       int err;
22699 +
22700 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
22701 +                                         cmd_flags,
22702 +                                         0);
22703 +       cmd_params = (struct dpseci_cmd_open *)cmd.params;
22704 +       cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
22705 +       err = mc_send_command(mc_io, &cmd);
22706 +       if (err)
22707 +               return err;
22708 +
22709 +       *token = mc_cmd_hdr_read_token(&cmd);
22710 +
22711 +       return 0;
22712 +}
22713 +
22714 +/**
22715 + * dpseci_close() - Close the control session of the object
22716 + * @mc_io:     Pointer to MC portal's I/O object
22717 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22718 + * @token:     Token of DPSECI object
22719 + *
22720 + * After this function is called, no further operations are allowed on the
22721 + * object without opening a new control session.
22722 + *
22723 + * Return:     '0' on success, error code otherwise
22724 + */
22725 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
22726 +{
22727 +       struct mc_command cmd = { 0 };
22728 +
22729 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
22730 +                                         cmd_flags,
22731 +                                         token);
22732 +       return mc_send_command(mc_io, &cmd);
22733 +}
22734 +
22735 +/**
22736 + * dpseci_create() - Create the DPSECI object
22737 + * @mc_io:     Pointer to MC portal's I/O object
22738 + * @dprc_token:        Parent container token; '0' for default container
22739 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22740 + * @cfg:       Configuration structure
22741 + * @obj_id:    returned object id
22742 + *
22743 + * Create the DPSECI object, allocate required resources and perform required
22744 + * initialization.
22745 + *
22746 + * The object can be created either by declaring it in the DPL file, or by
22747 + * calling this function.
22748 + *
22749 + * The function accepts an authentication token of a parent container that this
22750 + * object should be assigned to. The token can be '0' so the object will be
22751 + * assigned to the default container.
22752 + * The newly created object can be opened with the returned object id and using
22753 + * the container's associated tokens and MC portals.
22754 + *
22755 + * Return:     '0' on success, error code otherwise
22756 + */
22757 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
22758 +                 const struct dpseci_cfg *cfg, u32 *obj_id)
22759 +{
22760 +       struct mc_command cmd = { 0 };
22761 +       struct dpseci_cmd_create *cmd_params;
22762 +       int i, err;
22763 +
22764 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
22765 +                                         cmd_flags,
22766 +                                         dprc_token);
22767 +       cmd_params = (struct dpseci_cmd_create *)cmd.params;
22768 +       for (i = 0; i < 8; i++)
22769 +               cmd_params->priorities[i] = cfg->priorities[i];
22770 +       cmd_params->num_tx_queues = cfg->num_tx_queues;
22771 +       cmd_params->num_rx_queues = cfg->num_rx_queues;
22772 +       cmd_params->options = cpu_to_le32(cfg->options);
22773 +       err = mc_send_command(mc_io, &cmd);
22774 +       if (err)
22775 +               return err;
22776 +
22777 +       *obj_id = mc_cmd_read_object_id(&cmd);
22778 +
22779 +       return 0;
22780 +}
22781 +
22782 +/**
22783 + * dpseci_destroy() - Destroy the DPSECI object and release all its resources
22784 + * @mc_io:     Pointer to MC portal's I/O object
22785 + * @dprc_token: Parent container token; '0' for default container
22786 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22787 + * @object_id: The object id; it must be a valid id within the container that
22788 + *             created this object
22789 + *
22790 + * The function accepts the authentication token of the parent container that
22791 + * created the object (not the one that currently owns the object). The object
22792 + * is searched within parent using the provided 'object_id'.
22793 + * All tokens to the object must be closed before calling destroy.
22794 + *
22795 + * Return:     '0' on success, error code otherwise
22796 + */
22797 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
22798 +                  u32 object_id)
22799 +{
22800 +       struct mc_command cmd = { 0 };
22801 +       struct dpseci_cmd_destroy *cmd_params;
22802 +
22803 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
22804 +                                         cmd_flags,
22805 +                                         dprc_token);
22806 +       cmd_params = (struct dpseci_cmd_destroy *)cmd.params;
22807 +       cmd_params->object_id = cpu_to_le32(object_id);
22808 +
22809 +       return mc_send_command(mc_io, &cmd);
22810 +}
22811 +
22812 +/**
22813 + * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
22814 + * @mc_io:     Pointer to MC portal's I/O object
22815 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22816 + * @token:     Token of DPSECI object
22817 + *
22818 + * Return:     '0' on success, error code otherwise
22819 + */
22820 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
22821 +{
22822 +       struct mc_command cmd = { 0 };
22823 +
22824 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
22825 +                                         cmd_flags,
22826 +                                         token);
22827 +       return mc_send_command(mc_io, &cmd);
22828 +}
22829 +
22830 +/**
22831 + * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames
22832 + * @mc_io:     Pointer to MC portal's I/O object
22833 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22834 + * @token:     Token of DPSECI object
22835 + *
22836 + * Return:     '0' on success, error code otherwise
22837 + */
22838 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
22839 +{
22840 +       struct mc_command cmd = { 0 };
22841 +
22842 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
22843 +                                         cmd_flags,
22844 +                                         token);
22845 +
22846 +       return mc_send_command(mc_io, &cmd);
22847 +}
22848 +
22849 +/**
22850 + * dpseci_is_enabled() - Check if the DPSECI is enabled.
22851 + * @mc_io:     Pointer to MC portal's I/O object
22852 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22853 + * @token:     Token of DPSECI object
22854 + * @en:                Returns '1' if object is enabled; '0' otherwise
22855 + *
22856 + * Return:     '0' on success, error code otherwise
22857 + */
22858 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22859 +                     int *en)
22860 +{
22861 +       struct mc_command cmd = { 0 };
22862 +       struct dpseci_rsp_is_enabled *rsp_params;
22863 +       int err;
22864 +
22865 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
22866 +                                         cmd_flags,
22867 +                                         token);
22868 +       err = mc_send_command(mc_io, &cmd);
22869 +       if (err)
22870 +               return err;
22871 +
22872 +       rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
22873 +       *en = le32_to_cpu(rsp_params->is_enabled);
22874 +
22875 +       return 0;
22876 +}
22877 +
22878 +/**
22879 + * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
22880 + * @mc_io:     Pointer to MC portal's I/O object
22881 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22882 + * @token:     Token of DPSECI object
22883 + *
22884 + * Return:     '0' on success, error code otherwise
22885 + */
22886 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
22887 +{
22888 +       struct mc_command cmd = { 0 };
22889 +
22890 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
22891 +                                         cmd_flags,
22892 +                                         token);
22893 +
22894 +       return mc_send_command(mc_io, &cmd);
22895 +}
22896 +
22897 +/**
22898 + * dpseci_get_irq_enable() - Get overall interrupt state
22899 + * @mc_io:     Pointer to MC portal's I/O object
22900 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22901 + * @token:     Token of DPSECI object
22902 + * @irq_index: The interrupt index to configure
22903 + * @en:                Returned Interrupt state - enable = 1, disable = 0
22904 + *
22905 + * Return:     '0' on success, error code otherwise
22906 + */
22907 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22908 +                         u8 irq_index, u8 *en)
22909 +{
22910 +       struct mc_command cmd = { 0 };
22911 +       struct dpseci_cmd_irq_enable *cmd_params;
22912 +       struct dpseci_rsp_get_irq_enable *rsp_params;
22913 +       int err;
22914 +
22915 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE,
22916 +                                         cmd_flags,
22917 +                                         token);
22918 +       cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
22919 +       cmd_params->irq_index = irq_index;
22920 +       err = mc_send_command(mc_io, &cmd);
22921 +       if (err)
22922 +               return err;
22923 +
22924 +       rsp_params = (struct dpseci_rsp_get_irq_enable *)cmd.params;
22925 +       *en = rsp_params->enable_state;
22926 +
22927 +       return 0;
22928 +}
22929 +
22930 +/**
22931 + * dpseci_set_irq_enable() - Set overall interrupt state.
22932 + * @mc_io:     Pointer to MC portal's I/O object
22933 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22934 + * @token:     Token of DPSECI object
22935 + * @irq_index: The interrupt index to configure
22936 + * @en:                Interrupt state - enable = 1, disable = 0
22937 + *
22938 + * Allows GPP software to control when interrupts are generated.
22939 + * Each interrupt can have up to 32 causes. The enable/disable control's the
22940 + * overall interrupt state. If the interrupt is disabled no causes will cause
22941 + * an interrupt.
22942 + *
22943 + * Return:     '0' on success, error code otherwise
22944 + */
22945 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22946 +                         u8 irq_index, u8 en)
22947 +{
22948 +       struct mc_command cmd = { 0 };
22949 +       struct dpseci_cmd_irq_enable *cmd_params;
22950 +
22951 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
22952 +                                         cmd_flags,
22953 +                                         token);
22954 +       cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
22955 +       cmd_params->irq_index = irq_index;
22956 +       cmd_params->enable_state = en;
22957 +
22958 +       return mc_send_command(mc_io, &cmd);
22959 +}
22960 +
22961 +/**
22962 + * dpseci_get_irq_mask() - Get interrupt mask.
22963 + * @mc_io:     Pointer to MC portal's I/O object
22964 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22965 + * @token:     Token of DPSECI object
22966 + * @irq_index: The interrupt index to configure
22967 + * @mask:      Returned event mask to trigger interrupt
22968 + *
22969 + * Every interrupt can have up to 32 causes and the interrupt model supports
22970 + * masking/unmasking each cause independently.
22971 + *
22972 + * Return:     '0' on success, error code otherwise
22973 + */
22974 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22975 +                       u8 irq_index, u32 *mask)
22976 +{
22977 +       struct mc_command cmd = { 0 };
22978 +       struct dpseci_cmd_irq_mask *cmd_params;
22979 +       int err;
22980 +
22981 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK,
22982 +                                         cmd_flags,
22983 +                                         token);
22984 +       cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
22985 +       cmd_params->irq_index = irq_index;
22986 +       err = mc_send_command(mc_io, &cmd);
22987 +       if (err)
22988 +               return err;
22989 +
22990 +       *mask = le32_to_cpu(cmd_params->mask);
22991 +
22992 +       return 0;
22993 +}
22994 +
22995 +/**
22996 + * dpseci_set_irq_mask() - Set interrupt mask.
22997 + * @mc_io:     Pointer to MC portal's I/O object
22998 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22999 + * @token:     Token of DPSECI object
23000 + * @irq_index: The interrupt index to configure
23001 + * @mask:      event mask to trigger interrupt;
23002 + *             each bit:
23003 + *                     0 = ignore event
23004 + *                     1 = consider event for asserting IRQ
23005 + *
23006 + * Every interrupt can have up to 32 causes and the interrupt model supports
23007 + * masking/unmasking each cause independently
23008 + *
23009 + * Return:     '0' on success, error code otherwise
23010 + */
23011 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23012 +                       u8 irq_index, u32 mask)
23013 +{
23014 +       struct mc_command cmd = { 0 };
23015 +       struct dpseci_cmd_irq_mask *cmd_params;
23016 +
23017 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
23018 +                                         cmd_flags,
23019 +                                         token);
23020 +       cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
23021 +       cmd_params->mask = cpu_to_le32(mask);
23022 +       cmd_params->irq_index = irq_index;
23023 +
23024 +       return mc_send_command(mc_io, &cmd);
23025 +}
23026 +
23027 +/**
23028 + * dpseci_get_irq_status() - Get the current status of any pending interrupts
23029 + * @mc_io:     Pointer to MC portal's I/O object
23030 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23031 + * @token:     Token of DPSECI object
23032 + * @irq_index: The interrupt index to configure
23033 + * @status:    Returned interrupts status - one bit per cause:
23034 + *                     0 = no interrupt pending
23035 + *                     1 = interrupt pending
23036 + *
23037 + * Return:     '0' on success, error code otherwise
23038 + */
23039 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23040 +                         u8 irq_index, u32 *status)
23041 +{
23042 +       struct mc_command cmd = { 0 };
23043 +       struct dpseci_cmd_irq_status *cmd_params;
23044 +       int err;
23045 +
23046 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS,
23047 +                                         cmd_flags,
23048 +                                         token);
23049 +       cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
23050 +       cmd_params->status = cpu_to_le32(*status);
23051 +       cmd_params->irq_index = irq_index;
23052 +       err = mc_send_command(mc_io, &cmd);
23053 +       if (err)
23054 +               return err;
23055 +
23056 +       *status = le32_to_cpu(cmd_params->status);
23057 +
23058 +       return 0;
23059 +}
23060 +
23061 +/**
23062 + * dpseci_clear_irq_status() - Clear a pending interrupt's status
23063 + * @mc_io:     Pointer to MC portal's I/O object
23064 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23065 + * @token:     Token of DPSECI object
23066 + * @irq_index: The interrupt index to configure
23067 + * @status:    bits to clear (W1C) - one bit per cause:
23068 + *                     0 = don't change
23069 + *                     1 = clear status bit
23070 + *
23071 + * Return:     '0' on success, error code otherwise
23072 + */
23073 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23074 +                           u8 irq_index, u32 status)
23075 +{
23076 +       struct mc_command cmd = { 0 };
23077 +       struct dpseci_cmd_irq_status *cmd_params;
23078 +
23079 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
23080 +                                         cmd_flags,
23081 +                                         token);
23082 +       cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
23083 +       cmd_params->status = cpu_to_le32(status);
23084 +       cmd_params->irq_index = irq_index;
23085 +
23086 +       return mc_send_command(mc_io, &cmd);
23087 +}
23088 +
23089 +/**
23090 + * dpseci_get_attributes() - Retrieve DPSECI attributes
23091 + * @mc_io:     Pointer to MC portal's I/O object
23092 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23093 + * @token:     Token of DPSECI object
23094 + * @attr:      Returned object's attributes
23095 + *
23096 + * Return:     '0' on success, error code otherwise
23097 + */
23098 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23099 +                         struct dpseci_attr *attr)
23100 +{
23101 +       struct mc_command cmd = { 0 };
23102 +       struct dpseci_rsp_get_attributes *rsp_params;
23103 +       int err;
23104 +
23105 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
23106 +                                         cmd_flags,
23107 +                                         token);
23108 +       err = mc_send_command(mc_io, &cmd);
23109 +       if (err)
23110 +               return err;
23111 +
23112 +       rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params;
23113 +       attr->id = le32_to_cpu(rsp_params->id);
23114 +       attr->num_tx_queues = rsp_params->num_tx_queues;
23115 +       attr->num_rx_queues = rsp_params->num_rx_queues;
23116 +       attr->options = le32_to_cpu(rsp_params->options);
23117 +
23118 +       return 0;
23119 +}
23120 +
23121 +/**
23122 + * dpseci_set_rx_queue() - Set Rx queue configuration
23123 + * @mc_io:     Pointer to MC portal's I/O object
23124 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23125 + * @token:     Token of DPSECI object
23126 + * @queue:     Select the queue relative to number of priorities configured at
23127 + *             DPSECI creation; use DPSECI_ALL_QUEUES to configure all
23128 + *             Rx queues identically.
23129 + * @cfg:       Rx queue configuration
23130 + *
23131 + * Return:     '0' on success, error code otherwise
23132 + */
23133 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23134 +                       u8 queue, const struct dpseci_rx_queue_cfg *cfg)
23135 +{
23136 +       struct mc_command cmd = { 0 };
23137 +       struct dpseci_cmd_queue *cmd_params;
23138 +
23139 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
23140 +                                         cmd_flags,
23141 +                                         token);
23142 +       cmd_params = (struct dpseci_cmd_queue *)cmd.params;
23143 +       cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
23144 +       cmd_params->priority = cfg->dest_cfg.priority;
23145 +       cmd_params->queue = queue;
23146 +       cmd_params->dest_type = cfg->dest_cfg.dest_type;
23147 +       cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
23148 +       cmd_params->options = cpu_to_le32(cfg->options);
23149 +       cmd_params->order_preservation_en =
23150 +               cpu_to_le32(cfg->order_preservation_en);
23151 +
23152 +       return mc_send_command(mc_io, &cmd);
23153 +}
23154 +
23155 +/**
23156 + * dpseci_get_rx_queue() - Retrieve Rx queue attributes
23157 + * @mc_io:     Pointer to MC portal's I/O object
23158 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23159 + * @token:     Token of DPSECI object
23160 + * @queue:     Select the queue relative to number of priorities configured at
23161 + *             DPSECI creation
23162 + * @attr:      Returned Rx queue attributes
23163 + *
23164 + * Return:     '0' on success, error code otherwise
23165 + */
23166 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23167 +                       u8 queue, struct dpseci_rx_queue_attr *attr)
23168 +{
23169 +       struct mc_command cmd = { 0 };
23170 +       struct dpseci_cmd_queue *cmd_params;
23171 +       int err;
23172 +
23173 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
23174 +                                         cmd_flags,
23175 +                                         token);
23176 +       cmd_params = (struct dpseci_cmd_queue *)cmd.params;
23177 +       cmd_params->queue = queue;
23178 +       err = mc_send_command(mc_io, &cmd);
23179 +       if (err)
23180 +               return err;
23181 +
23182 +       attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
23183 +       attr->dest_cfg.priority = cmd_params->priority;
23184 +       attr->dest_cfg.dest_type = cmd_params->dest_type;
23185 +       attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
23186 +       attr->fqid = le32_to_cpu(cmd_params->fqid);
23187 +       attr->order_preservation_en =
23188 +               le32_to_cpu(cmd_params->order_preservation_en);
23189 +
23190 +       return 0;
23191 +}
23192 +
23193 +/**
23194 + * dpseci_get_tx_queue() - Retrieve Tx queue attributes
23195 + * @mc_io:     Pointer to MC portal's I/O object
23196 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23197 + * @token:     Token of DPSECI object
23198 + * @queue:     Select the queue relative to number of priorities configured at
23199 + *             DPSECI creation
23200 + * @attr:      Returned Tx queue attributes
23201 + *
23202 + * Return:     '0' on success, error code otherwise
23203 + */
23204 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23205 +                       u8 queue, struct dpseci_tx_queue_attr *attr)
23206 +{
23207 +       struct mc_command cmd = { 0 };
23208 +       struct dpseci_cmd_queue *cmd_params;
23209 +       struct dpseci_rsp_get_tx_queue *rsp_params;
23210 +       int err;
23211 +
23212 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
23213 +                                         cmd_flags,
23214 +                                         token);
23215 +       cmd_params = (struct dpseci_cmd_queue *)cmd.params;
23216 +       cmd_params->queue = queue;
23217 +       err = mc_send_command(mc_io, &cmd);
23218 +       if (err)
23219 +               return err;
23220 +
23221 +       rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
23222 +       attr->fqid = le32_to_cpu(rsp_params->fqid);
23223 +       attr->priority = rsp_params->priority;
23224 +
23225 +       return 0;
23226 +}
23227 +
23228 +/**
23229 + * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes
23230 + * @mc_io:     Pointer to MC portal's I/O object
23231 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23232 + * @token:     Token of DPSECI object
23233 + * @attr:      Returned SEC attributes
23234 + *
23235 + * Return:     '0' on success, error code otherwise
23236 + */
23237 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23238 +                       struct dpseci_sec_attr *attr)
23239 +{
23240 +       struct mc_command cmd = { 0 };
23241 +       struct dpseci_rsp_get_sec_attr *rsp_params;
23242 +       int err;
23243 +
23244 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
23245 +                                         cmd_flags,
23246 +                                         token);
23247 +       err = mc_send_command(mc_io, &cmd);
23248 +       if (err)
23249 +               return err;
23250 +
23251 +       rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
23252 +       attr->ip_id = le16_to_cpu(rsp_params->ip_id);
23253 +       attr->major_rev = rsp_params->major_rev;
23254 +       attr->minor_rev = rsp_params->minor_rev;
23255 +       attr->era = rsp_params->era;
23256 +       attr->deco_num = rsp_params->deco_num;
23257 +       attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
23258 +       attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
23259 +       attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
23260 +       attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
23261 +       attr->crc_acc_num = rsp_params->crc_acc_num;
23262 +       attr->pk_acc_num = rsp_params->pk_acc_num;
23263 +       attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
23264 +       attr->rng_acc_num = rsp_params->rng_acc_num;
23265 +       attr->md_acc_num = rsp_params->md_acc_num;
23266 +       attr->arc4_acc_num = rsp_params->arc4_acc_num;
23267 +       attr->des_acc_num = rsp_params->des_acc_num;
23268 +       attr->aes_acc_num = rsp_params->aes_acc_num;
23269 +
23270 +       return 0;
23271 +}
23272 +
23273 +/**
23274 + * dpseci_get_sec_counters() - Retrieve SEC accelerator counters
23275 + * @mc_io:     Pointer to MC portal's I/O object
23276 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23277 + * @token:     Token of DPSECI object
23278 + * @counters:  Returned SEC counters
23279 + *
23280 + * Return:     '0' on success, error code otherwise
23281 + */
23282 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23283 +                           struct dpseci_sec_counters *counters)
23284 +{
23285 +       struct mc_command cmd = { 0 };
23286 +       struct dpseci_rsp_get_sec_counters *rsp_params;
23287 +       int err;
23288 +
23289 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
23290 +                                         cmd_flags,
23291 +                                         token);
23292 +       err = mc_send_command(mc_io, &cmd);
23293 +       if (err)
23294 +               return err;
23295 +
23296 +       rsp_params = (struct dpseci_rsp_get_sec_counters *)cmd.params;
23297 +       counters->dequeued_requests =
23298 +               le64_to_cpu(rsp_params->dequeued_requests);
23299 +       counters->ob_enc_requests = le64_to_cpu(rsp_params->ob_enc_requests);
23300 +       counters->ib_dec_requests = le64_to_cpu(rsp_params->ib_dec_requests);
23301 +       counters->ob_enc_bytes = le64_to_cpu(rsp_params->ob_enc_bytes);
23302 +       counters->ob_prot_bytes = le64_to_cpu(rsp_params->ob_prot_bytes);
23303 +       counters->ib_dec_bytes = le64_to_cpu(rsp_params->ib_dec_bytes);
23304 +       counters->ib_valid_bytes = le64_to_cpu(rsp_params->ib_valid_bytes);
23305 +
23306 +       return 0;
23307 +}
23308 +
23309 +/**
23310 + * dpseci_get_api_version() - Get Data Path SEC Interface API version
23311 + * @mc_io:     Pointer to MC portal's I/O object
23312 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23313 + * @major_ver: Major version of data path sec API
23314 + * @minor_ver: Minor version of data path sec API
23315 + *
23316 + * Return:     '0' on success, error code otherwise
23317 + */
23318 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
23319 +                          u16 *major_ver, u16 *minor_ver)
23320 +{
23321 +       struct mc_command cmd = { 0 };
23322 +       struct dpseci_rsp_get_api_version *rsp_params;
23323 +       int err;
23324 +
23325 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
23326 +                                         cmd_flags, 0);
23327 +       err = mc_send_command(mc_io, &cmd);
23328 +       if (err)
23329 +               return err;
23330 +
23331 +       rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
23332 +       *major_ver = le16_to_cpu(rsp_params->major);
23333 +       *minor_ver = le16_to_cpu(rsp_params->minor);
23334 +
23335 +       return 0;
23336 +}
23337 +
23338 +/**
23339 + * dpseci_set_opr() - Set Order Restoration configuration
23340 + * @mc_io:     Pointer to MC portal's I/O object
23341 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23342 + * @token:     Token of DPSECI object
23343 + * @index:     The queue index
23344 + * @options:   Configuration mode options; can be OPR_OPT_CREATE or
23345 + *             OPR_OPT_RETIRE
23346 + * @cfg:       Configuration options for the OPR
23347 + *
23348 + * Return:     '0' on success, error code otherwise
23349 + */
23350 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
23351 +                  u8 options, struct opr_cfg *cfg)
23352 +{
23353 +       struct mc_command cmd = { 0 };
23354 +       struct dpseci_cmd_opr *cmd_params;
23355 +
23356 +       cmd.header = mc_encode_cmd_header(
23357 +                       DPSECI_CMDID_SET_OPR,
23358 +                       cmd_flags,
23359 +                       token);
23360 +       cmd_params = (struct dpseci_cmd_opr *)cmd.params;
23361 +       cmd_params->index = index;
23362 +       cmd_params->options = options;
23363 +       cmd_params->oloe = cfg->oloe;
23364 +       cmd_params->oeane = cfg->oeane;
23365 +       cmd_params->olws = cfg->olws;
23366 +       cmd_params->oa = cfg->oa;
23367 +       cmd_params->oprrws = cfg->oprrws;
23368 +
23369 +       return mc_send_command(mc_io, &cmd);
23370 +}
23371 +
23372 +/**
23373 + * dpseci_get_opr() - Retrieve Order Restoration config and query
23374 + * @mc_io:     Pointer to MC portal's I/O object
23375 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23376 + * @token:     Token of DPSECI object
23377 + * @index:     The queue index
23378 + * @cfg:       Returned OPR configuration
23379 + * @qry:       Returned OPR query
23380 + *
23381 + * Return:     '0' on success, error code otherwise
23382 + */
23383 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
23384 +                  struct opr_cfg *cfg, struct opr_qry *qry)
23385 +{
23386 +       struct mc_command cmd = { 0 };
23387 +       struct dpseci_cmd_opr *cmd_params;
23388 +       struct dpseci_rsp_get_opr *rsp_params;
23389 +       int err;
23390 +
23391 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR,
23392 +                                         cmd_flags,
23393 +                                         token);
23394 +       cmd_params = (struct dpseci_cmd_opr *)cmd.params;
23395 +       cmd_params->index = index;
23396 +       err = mc_send_command(mc_io, &cmd);
23397 +       if (err)
23398 +               return err;
23399 +
23400 +       rsp_params = (struct dpseci_rsp_get_opr *)cmd.params;
23401 +       qry->rip = dpseci_get_field(rsp_params->rip_enable, OPR_RIP);
23402 +       qry->enable = dpseci_get_field(rsp_params->rip_enable, OPR_ENABLE);
23403 +       cfg->oloe = rsp_params->oloe;
23404 +       cfg->oeane = rsp_params->oeane;
23405 +       cfg->olws = rsp_params->olws;
23406 +       cfg->oa = rsp_params->oa;
23407 +       cfg->oprrws = rsp_params->oprrws;
23408 +       qry->nesn = le16_to_cpu(rsp_params->nesn);
23409 +       qry->ndsn = le16_to_cpu(rsp_params->ndsn);
23410 +       qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
23411 +       qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_TSEQ_NLIS);
23412 +       qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
23413 +       qry->hseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_HSEQ_NLIS);
23414 +       qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
23415 +       qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
23416 +       qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
23417 +       qry->opr_id = le16_to_cpu(rsp_params->opr_id);
23418 +
23419 +       return 0;
23420 +}
23421 +
23422 +/**
23423 + * dpseci_set_congestion_notification() - Set congestion group
23424 + *     notification configuration
23425 + * @mc_io:     Pointer to MC portal's I/O object
23426 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23427 + * @token:     Token of DPSECI object
23428 + * @cfg:       congestion notification configuration
23429 + *
23430 + * Return:     '0' on success, error code otherwise
23431 + */
23432 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
23433 +       u16 token, const struct dpseci_congestion_notification_cfg *cfg)
23434 +{
23435 +       struct mc_command cmd = { 0 };
23436 +       struct dpseci_cmd_congestion_notification *cmd_params;
23437 +
23438 +       cmd.header = mc_encode_cmd_header(
23439 +                       DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
23440 +                       cmd_flags,
23441 +                       token);
23442 +       cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
23443 +       cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
23444 +       cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
23445 +       cmd_params->priority = cfg->dest_cfg.priority;
23446 +       dpseci_set_field(cmd_params->options, CGN_DEST_TYPE,
23447 +                        cfg->dest_cfg.dest_type);
23448 +       dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units);
23449 +       cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
23450 +       cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
23451 +       cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
23452 +       cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
23453 +
23454 +       return mc_send_command(mc_io, &cmd);
23455 +}
23456 +
23457 +/**
23458 + * dpseci_get_congestion_notification() - Get congestion group notification
23459 + *     configuration
23460 + * @mc_io:     Pointer to MC portal's I/O object
23461 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23462 + * @token:     Token of DPSECI object
23463 + * @cfg:       congestion notification configuration
23464 + *
23465 + * Return:     '0' on success, error code otherwise
23466 + */
23467 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
23468 +       u16 token, struct dpseci_congestion_notification_cfg *cfg)
23469 +{
23470 +       struct mc_command cmd = { 0 };
23471 +       struct dpseci_cmd_congestion_notification *rsp_params;
23472 +       int err;
23473 +
23474 +       cmd.header = mc_encode_cmd_header(
23475 +                       DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
23476 +                       cmd_flags,
23477 +                       token);
23478 +       err = mc_send_command(mc_io, &cmd);
23479 +       if (err)
23480 +               return err;
23481 +
23482 +       rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
23483 +       cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
23484 +       cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
23485 +       cfg->dest_cfg.priority = rsp_params->priority;
23486 +       cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options,
23487 +                                                  CGN_DEST_TYPE);
23488 +       cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS);
23489 +       cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
23490 +       cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
23491 +       cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
23492 +       cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
23493 +
23494 +       return 0;
23495 +}
23496 diff --git a/drivers/crypto/caam/dpseci.h b/drivers/crypto/caam/dpseci.h
23497 new file mode 100644
23498 index 00000000..d37489c6
23499 --- /dev/null
23500 +++ b/drivers/crypto/caam/dpseci.h
23501 @@ -0,0 +1,395 @@
23502 +/*
23503 + * Copyright 2013-2016 Freescale Semiconductor Inc.
23504 + * Copyright 2017 NXP
23505 + *
23506 + * Redistribution and use in source and binary forms, with or without
23507 + * modification, are permitted provided that the following conditions are met:
23508 + *     * Redistributions of source code must retain the above copyright
23509 + *      notice, this list of conditions and the following disclaimer.
23510 + *     * Redistributions in binary form must reproduce the above copyright
23511 + *      notice, this list of conditions and the following disclaimer in the
23512 + *      documentation and/or other materials provided with the distribution.
23513 + *     * Neither the names of the above-listed copyright holders nor the
23514 + *      names of any contributors may be used to endorse or promote products
23515 + *      derived from this software without specific prior written permission.
23516 + *
23517 + *
23518 + * ALTERNATIVELY, this software may be distributed under the terms of the
23519 + * GNU General Public License ("GPL") as published by the Free Software
23520 + * Foundation, either version 2 of that License or (at your option) any
23521 + * later version.
23522 + *
23523 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23524 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23525 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23526 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
23527 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23528 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23529 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23530 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23531 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23532 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
23533 + * POSSIBILITY OF SUCH DAMAGE.
23534 + */
23535 +#ifndef _DPSECI_H_
23536 +#define _DPSECI_H_
23537 +
23538 +/*
23539 + * Data Path SEC Interface API
23540 + * Contains initialization APIs and runtime control APIs for DPSECI
23541 + */
23542 +
23543 +struct fsl_mc_io;
23544 +struct opr_cfg;
23545 +struct opr_qry;
23546 +
23547 +/**
23548 + * General DPSECI macros
23549 + */
23550 +
23551 +/**
23552 + * Maximum number of Tx/Rx priorities per DPSECI object
23553 + */
23554 +#define DPSECI_PRIO_NUM                8
23555 +
23556 +/**
23557 + * All queues considered; see dpseci_set_rx_queue()
23558 + */
23559 +#define DPSECI_ALL_QUEUES      (u8)(-1)
23560 +
23561 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
23562 +               u16 *token);
23563 +
23564 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
23565 +
23566 +/**
23567 + * Enable the Congestion Group support
23568 + */
23569 +#define DPSECI_OPT_HAS_CG              0x000020
23570 +
23571 +/**
23572 + * Enable the Order Restoration support
23573 + */
23574 +#define DPSECI_OPT_HAS_OPR             0x000040
23575 +
23576 +/**
23577 + * Order Point Records are shared for the entire DPSECI
23578 + */
23579 +#define DPSECI_OPT_OPR_SHARED          0x000080
23580 +
23581 +/**
23582 + * struct dpseci_cfg - Structure representing DPSECI configuration
23583 + * @options: Any combination of the following options:
23584 + *             DPSECI_OPT_HAS_CG
23585 + *             DPSECI_OPT_HAS_OPR
23586 + *             DPSECI_OPT_OPR_SHARED
23587 + * @num_tx_queues: num of queues towards the SEC
23588 + * @num_rx_queues: num of queues back from the SEC
23589 + * @priorities: Priorities for the SEC hardware processing;
23590 + *             each place in the array is the priority of the tx queue
23591 + *             towards the SEC;
23592 + *             valid priorities are configured with values 1-8;
23593 + */
23594 +struct dpseci_cfg {
23595 +       u32 options;
23596 +       u8 num_tx_queues;
23597 +       u8 num_rx_queues;
23598 +       u8 priorities[DPSECI_PRIO_NUM];
23599 +};
23600 +
23601 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
23602 +                 const struct dpseci_cfg *cfg, u32 *obj_id);
23603 +
23604 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
23605 +                  u32 object_id);
23606 +
23607 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
23608 +
23609 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
23610 +
23611 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23612 +                     int *en);
23613 +
23614 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
23615 +
23616 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23617 +                         u8 irq_index, u8 *en);
23618 +
23619 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23620 +                         u8 irq_index, u8 en);
23621 +
23622 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23623 +                       u8 irq_index, u32 *mask);
23624 +
23625 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23626 +                       u8 irq_index, u32 mask);
23627 +
23628 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23629 +                         u8 irq_index, u32 *status);
23630 +
23631 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23632 +                           u8 irq_index, u32 status);
23633 +
23634 +/**
23635 + * struct dpseci_attr - Structure representing DPSECI attributes
23636 + * @id: DPSECI object ID
23637 + * @num_tx_queues: number of queues towards the SEC
23638 + * @num_rx_queues: number of queues back from the SEC
23639 + * @options: any combination of the following options:
23640 + *             DPSECI_OPT_HAS_CG
23641 + *             DPSECI_OPT_HAS_OPR
23642 + *             DPSECI_OPT_OPR_SHARED
23643 + */
23644 +struct dpseci_attr {
23645 +       int id;
23646 +       u8 num_tx_queues;
23647 +       u8 num_rx_queues;
23648 +       u32 options;
23649 +};
23650 +
23651 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23652 +                         struct dpseci_attr *attr);
23653 +
23654 +/**
23655 + * enum dpseci_dest - DPSECI destination types
23656 + * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
23657 + *     and does not generate FQDAN notifications; user is expected to dequeue
23658 + *     from the queue based on polling or other user-defined method
23659 + * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
23660 + *     notifications to the specified DPIO; user is expected to dequeue from
23661 + *     the queue only after notification is received
23662 + * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
23663 + *     FQDAN notifications, but is connected to the specified DPCON object;
23664 + *     user is expected to dequeue from the DPCON channel
23665 + */
23666 +enum dpseci_dest {
23667 +       DPSECI_DEST_NONE = 0,
23668 +       DPSECI_DEST_DPIO,
23669 +       DPSECI_DEST_DPCON
23670 +};
23671 +
23672 +/**
23673 + * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
23674 + * @dest_type: Destination type
23675 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
23676 + * @priority: Priority selection within the DPIO or DPCON channel; valid values
23677 + *     are 0-1 or 0-7, depending on the number of priorities in that channel;
23678 + *     not relevant for 'DPSECI_DEST_NONE' option
23679 + */
23680 +struct dpseci_dest_cfg {
23681 +       enum dpseci_dest dest_type;
23682 +       int dest_id;
23683 +       u8 priority;
23684 +};
23685 +
23686 +/**
23687 + * DPSECI queue modification options
23688 + */
23689 +
23690 +/**
23691 + * Select to modify the user's context associated with the queue
23692 + */
23693 +#define DPSECI_QUEUE_OPT_USER_CTX              0x00000001
23694 +
23695 +/**
23696 + * Select to modify the queue's destination
23697 + */
23698 +#define DPSECI_QUEUE_OPT_DEST                  0x00000002
23699 +
23700 +/**
23701 + * Select to modify the queue's order preservation
23702 + */
23703 +#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION    0x00000004
23704 +
23705 +/**
23706 + * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
23707 + * @options: Flags representing the suggested modifications to the queue;
23708 + *     Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
23709 + * @order_preservation_en: order preservation configuration for the rx queue
23710 + * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
23711 + * @user_ctx: User context value provided in the frame descriptor of each
23712 + *     dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained
23713 + *     in 'options'
23714 + * @dest_cfg: Queue destination parameters; valid only if
23715 + *     'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
23716 + */
23717 +struct dpseci_rx_queue_cfg {
23718 +       u32 options;
23719 +       int order_preservation_en;
23720 +       u64 user_ctx;
23721 +       struct dpseci_dest_cfg dest_cfg;
23722 +};
23723 +
23724 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23725 +                       u8 queue, const struct dpseci_rx_queue_cfg *cfg);
23726 +
23727 +/**
23728 + * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
23729 + * @user_ctx: User context value provided in the frame descriptor of each
23730 + *     dequeued frame
23731 + * @order_preservation_en: Status of the order preservation configuration on the
23732 + *     queue
23733 + * @dest_cfg: Queue destination configuration
23734 + * @fqid: Virtual FQID value to be used for dequeue operations
23735 + */
23736 +struct dpseci_rx_queue_attr {
23737 +       u64 user_ctx;
23738 +       int order_preservation_en;
23739 +       struct dpseci_dest_cfg dest_cfg;
23740 +       u32 fqid;
23741 +};
23742 +
23743 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23744 +                       u8 queue, struct dpseci_rx_queue_attr *attr);
23745 +
23746 +/**
23747 + * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
23748 + * @fqid: Virtual FQID to be used for sending frames to SEC hardware
23749 + * @priority: SEC hardware processing priority for the queue
23750 + */
23751 +struct dpseci_tx_queue_attr {
23752 +       u32 fqid;
23753 +       u8 priority;
23754 +};
23755 +
23756 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23757 +                       u8 queue, struct dpseci_tx_queue_attr *attr);
23758 +
23759 +/**
23760 + * struct dpseci_sec_attr - Structure representing attributes of the SEC
23761 + *     hardware accelerator
23762 + * @ip_id: ID for SEC
23763 + * @major_rev: Major revision number for SEC
23764 + * @minor_rev: Minor revision number for SEC
23765 + * @era: SEC Era
23766 + * @deco_num: The number of copies of the DECO that are implemented in this
23767 + *     version of SEC
23768 + * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this
23769 + *     version of SEC
23770 + * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this
23771 + *     version of SEC
23772 + * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
23773 + *     implemented in this version of SEC
23774 + * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
23775 + *     implemented in this version of SEC
23776 + * @crc_acc_num: The number of copies of the CRC module that are implemented in
23777 + *     this version of SEC
23778 + * @pk_acc_num:  The number of copies of the Public Key module that are
23779 + *     implemented in this version of SEC
23780 + * @kasumi_acc_num: The number of copies of the Kasumi module that are
23781 + *     implemented in this version of SEC
23782 + * @rng_acc_num: The number of copies of the Random Number Generator that are
23783 + *     implemented in this version of SEC
23784 + * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
23785 + *     implemented in this version of SEC
23786 + * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
23787 + *     in this version of SEC
23788 + * @des_acc_num: The number of copies of the DES module that are implemented in
23789 + *     this version of SEC
23790 + * @aes_acc_num: The number of copies of the AES module that are implemented in
23791 + *     this version of SEC
23792 + **/
23793 +struct dpseci_sec_attr {
23794 +       u16 ip_id;
23795 +       u8 major_rev;
23796 +       u8 minor_rev;
23797 +       u8 era;
23798 +       u8 deco_num;
23799 +       u8 zuc_auth_acc_num;
23800 +       u8 zuc_enc_acc_num;
23801 +       u8 snow_f8_acc_num;
23802 +       u8 snow_f9_acc_num;
23803 +       u8 crc_acc_num;
23804 +       u8 pk_acc_num;
23805 +       u8 kasumi_acc_num;
23806 +       u8 rng_acc_num;
23807 +       u8 md_acc_num;
23808 +       u8 arc4_acc_num;
23809 +       u8 des_acc_num;
23810 +       u8 aes_acc_num;
23811 +};
23812 +
23813 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23814 +                       struct dpseci_sec_attr *attr);
23815 +
23816 +/**
23817 + * struct dpseci_sec_counters - Structure representing global SEC counters and
23818 + *                             not per dpseci counters
23819 + * @dequeued_requests: Number of Requests Dequeued
23820 + * @ob_enc_requests:   Number of Outbound Encrypt Requests
23821 + * @ib_dec_requests:   Number of Inbound Decrypt Requests
23822 + * @ob_enc_bytes:      Number of Outbound Bytes Encrypted
23823 + * @ob_prot_bytes:     Number of Outbound Bytes Protected
23824 + * @ib_dec_bytes:      Number of Inbound Bytes Decrypted
23825 + * @ib_valid_bytes:    Number of Inbound Bytes Validated
23826 + */
23827 +struct dpseci_sec_counters {
23828 +       u64 dequeued_requests;
23829 +       u64 ob_enc_requests;
23830 +       u64 ib_dec_requests;
23831 +       u64 ob_enc_bytes;
23832 +       u64 ob_prot_bytes;
23833 +       u64 ib_dec_bytes;
23834 +       u64 ib_valid_bytes;
23835 +};
23836 +
23837 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23838 +                           struct dpseci_sec_counters *counters);
23839 +
23840 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
23841 +                          u16 *major_ver, u16 *minor_ver);
23842 +
23843 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
23844 +                  u8 options, struct opr_cfg *cfg);
23845 +
23846 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
23847 +                  struct opr_cfg *cfg, struct opr_qry *qry);
23848 +
23849 +/**
23850 + * enum dpseci_congestion_unit - DPSECI congestion units
23851 + * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
23852 + * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
23853 + */
23854 +enum dpseci_congestion_unit {
23855 +       DPSECI_CONGESTION_UNIT_BYTES = 0,
23856 +       DPSECI_CONGESTION_UNIT_FRAMES
23857 +};
23858 +
23859 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER             0x00000001
23860 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT              0x00000002
23861 +#define DPSECI_CGN_MODE_COHERENT_WRITE                 0x00000004
23862 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER           0x00000008
23863 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT            0x00000010
23864 +#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED       0x00000020
23865 +
23866 +/**
23867 + * struct dpseci_congestion_notification_cfg - congestion notification
23868 + *     configuration
23869 + * @units: units type
23870 + * @threshold_entry: above this threshold we enter a congestion state.
23871 + *     set it to '0' to disable it
23872 + * @threshold_exit: below this threshold we exit the congestion state.
23873 + * @message_ctx: The context that will be part of the CSCN message
23874 + * @message_iova: I/O virtual address (must be in DMA-able memory),
23875 + *     must be 16B aligned;
23876 + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
23877 + * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
23878 + *     values
23879 + */
23880 +struct dpseci_congestion_notification_cfg {
23881 +       enum dpseci_congestion_unit units;
23882 +       u32 threshold_entry;
23883 +       u32 threshold_exit;
23884 +       u64 message_ctx;
23885 +       u64 message_iova;
23886 +       struct dpseci_dest_cfg dest_cfg;
23887 +       u16 notification_mode;
23888 +};
23889 +
23890 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
23891 +       u16 token, const struct dpseci_congestion_notification_cfg *cfg);
23892 +
23893 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
23894 +       u16 token, struct dpseci_congestion_notification_cfg *cfg);
23895 +
23896 +#endif /* _DPSECI_H_ */
23897 diff --git a/drivers/crypto/caam/dpseci_cmd.h b/drivers/crypto/caam/dpseci_cmd.h
23898 new file mode 100644
23899 index 00000000..7624315e
23900 --- /dev/null
23901 +++ b/drivers/crypto/caam/dpseci_cmd.h
23902 @@ -0,0 +1,261 @@
23903 +/*
23904 + * Copyright 2013-2016 Freescale Semiconductor Inc.
23905 + * Copyright 2017 NXP
23906 + *
23907 + * Redistribution and use in source and binary forms, with or without
23908 + * modification, are permitted provided that the following conditions are met:
23909 + *     * Redistributions of source code must retain the above copyright
23910 + *      notice, this list of conditions and the following disclaimer.
23911 + *     * Redistributions in binary form must reproduce the above copyright
23912 + *      notice, this list of conditions and the following disclaimer in the
23913 + *      documentation and/or other materials provided with the distribution.
23914 + *     * Neither the names of the above-listed copyright holders nor the
23915 + *      names of any contributors may be used to endorse or promote products
23916 + *      derived from this software without specific prior written permission.
23917 + *
23918 + *
23919 + * ALTERNATIVELY, this software may be distributed under the terms of the
23920 + * GNU General Public License ("GPL") as published by the Free Software
23921 + * Foundation, either version 2 of that License or (at your option) any
23922 + * later version.
23923 + *
23924 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23925 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23926 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23927 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
23928 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23929 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23930 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23931 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23932 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23933 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
23934 + * POSSIBILITY OF SUCH DAMAGE.
23935 + */
23936 +
23937 +#ifndef _DPSECI_CMD_H_
23938 +#define _DPSECI_CMD_H_
23939 +
23940 +/* DPSECI Version */
23941 +#define DPSECI_VER_MAJOR                               5
23942 +#define DPSECI_VER_MINOR                               1
23943 +
23944 +#define DPSECI_VER(maj, min)   (((maj) << 16) | (min))
23945 +#define DPSECI_VERSION         DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR)
23946 +
23947 +/* Command IDs */
23948 +
23949 +#define DPSECI_CMDID_CLOSE                              0x8001
23950 +#define DPSECI_CMDID_OPEN                               0x8091
23951 +#define DPSECI_CMDID_CREATE                             0x9092
23952 +#define DPSECI_CMDID_DESTROY                            0x9891
23953 +#define DPSECI_CMDID_GET_API_VERSION                    0xa091
23954 +
23955 +#define DPSECI_CMDID_ENABLE                             0x0021
23956 +#define DPSECI_CMDID_DISABLE                            0x0031
23957 +#define DPSECI_CMDID_GET_ATTR                           0x0041
23958 +#define DPSECI_CMDID_RESET                              0x0051
23959 +#define DPSECI_CMDID_IS_ENABLED                         0x0061
23960 +
23961 +#define DPSECI_CMDID_SET_IRQ_ENABLE                     0x0121
23962 +#define DPSECI_CMDID_GET_IRQ_ENABLE                     0x0131
23963 +#define DPSECI_CMDID_SET_IRQ_MASK                       0x0141
23964 +#define DPSECI_CMDID_GET_IRQ_MASK                       0x0151
23965 +#define DPSECI_CMDID_GET_IRQ_STATUS                     0x0161
23966 +#define DPSECI_CMDID_CLEAR_IRQ_STATUS                   0x0171
23967 +
23968 +#define DPSECI_CMDID_SET_RX_QUEUE                       0x1941
23969 +#define DPSECI_CMDID_GET_RX_QUEUE                       0x1961
23970 +#define DPSECI_CMDID_GET_TX_QUEUE                       0x1971
23971 +#define DPSECI_CMDID_GET_SEC_ATTR                       0x1981
23972 +#define DPSECI_CMDID_GET_SEC_COUNTERS                   0x1991
23973 +#define DPSECI_CMDID_SET_OPR                           0x19A1
23974 +#define DPSECI_CMDID_GET_OPR                           0x19B1
23975 +
23976 +#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION       0x1701
23977 +#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION       0x1711
23978 +
23979 +/* Macros for accessing command fields smaller than 1 byte */
23980 +#define DPSECI_MASK(field)     \
23981 +       GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1,     \
23982 +               DPSECI_##field##_SHIFT)
23983 +
23984 +#define dpseci_set_field(var, field, val)      \
23985 +       ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
23986 +
23987 +#define dpseci_get_field(var, field)   \
23988 +       (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
23989 +
23990 +struct dpseci_cmd_open {
23991 +       __le32 dpseci_id;
23992 +};
23993 +
23994 +struct dpseci_cmd_create {
23995 +       u8 priorities[8];
23996 +       u8 num_tx_queues;
23997 +       u8 num_rx_queues;
23998 +       __le16 pad;
23999 +       __le32 options;
24000 +};
24001 +
24002 +struct dpseci_cmd_destroy {
24003 +       __le32 object_id;
24004 +};
24005 +
24006 +struct dpseci_rsp_is_enabled {
24007 +       __le32 is_enabled;
24008 +};
24009 +
24010 +struct dpseci_cmd_irq_enable {
24011 +       u8 enable_state;
24012 +       u8 pad[3];
24013 +       u8 irq_index;
24014 +};
24015 +
24016 +struct dpseci_rsp_get_irq_enable {
24017 +       u8 enable_state;
24018 +};
24019 +
24020 +struct dpseci_cmd_irq_mask {
24021 +       __le32 mask;
24022 +       u8 irq_index;
24023 +};
24024 +
24025 +struct dpseci_cmd_irq_status {
24026 +       __le32 status;
24027 +       u8 irq_index;
24028 +};
24029 +
24030 +struct dpseci_rsp_get_attributes {
24031 +       __le32 id;
24032 +       __le32 pad0;
24033 +       u8 num_tx_queues;
24034 +       u8 num_rx_queues;
24035 +       u8 pad1[6];
24036 +       __le32 options;
24037 +};
24038 +
24039 +struct dpseci_cmd_queue {
24040 +       __le32 dest_id;
24041 +       u8 priority;
24042 +       u8 queue;
24043 +       u8 dest_type;
24044 +       u8 pad;
24045 +       __le64 user_ctx;
24046 +       union {
24047 +               __le32 options;
24048 +               __le32 fqid;
24049 +       };
24050 +       __le32 order_preservation_en;
24051 +};
24052 +
24053 +struct dpseci_rsp_get_tx_queue {
24054 +       __le32 pad;
24055 +       __le32 fqid;
24056 +       u8 priority;
24057 +};
24058 +
24059 +struct dpseci_rsp_get_sec_attr {
24060 +       __le16 ip_id;
24061 +       u8 major_rev;
24062 +       u8 minor_rev;
24063 +       u8 era;
24064 +       u8 pad0[3];
24065 +       u8 deco_num;
24066 +       u8 zuc_auth_acc_num;
24067 +       u8 zuc_enc_acc_num;
24068 +       u8 pad1;
24069 +       u8 snow_f8_acc_num;
24070 +       u8 snow_f9_acc_num;
24071 +       u8 crc_acc_num;
24072 +       u8 pad2;
24073 +       u8 pk_acc_num;
24074 +       u8 kasumi_acc_num;
24075 +       u8 rng_acc_num;
24076 +       u8 pad3;
24077 +       u8 md_acc_num;
24078 +       u8 arc4_acc_num;
24079 +       u8 des_acc_num;
24080 +       u8 aes_acc_num;
24081 +};
24082 +
24083 +struct dpseci_rsp_get_sec_counters {
24084 +       __le64 dequeued_requests;
24085 +       __le64 ob_enc_requests;
24086 +       __le64 ib_dec_requests;
24087 +       __le64 ob_enc_bytes;
24088 +       __le64 ob_prot_bytes;
24089 +       __le64 ib_dec_bytes;
24090 +       __le64 ib_valid_bytes;
24091 +};
24092 +
24093 +struct dpseci_rsp_get_api_version {
24094 +       __le16 major;
24095 +       __le16 minor;
24096 +};
24097 +
24098 +struct dpseci_cmd_opr {
24099 +       __le16 pad;
24100 +       u8 index;
24101 +       u8 options;
24102 +       u8 pad1[7];
24103 +       u8 oloe;
24104 +       u8 oeane;
24105 +       u8 olws;
24106 +       u8 oa;
24107 +       u8 oprrws;
24108 +};
24109 +
24110 +#define DPSECI_OPR_RIP_SHIFT           0
24111 +#define DPSECI_OPR_RIP_SIZE            1
24112 +#define DPSECI_OPR_ENABLE_SHIFT                1
24113 +#define DPSECI_OPR_ENABLE_SIZE         1
24114 +#define DPSECI_OPR_TSEQ_NLIS_SHIFT     1
24115 +#define DPSECI_OPR_TSEQ_NLIS_SIZE      1
24116 +#define DPSECI_OPR_HSEQ_NLIS_SHIFT     1
24117 +#define DPSECI_OPR_HSEQ_NLIS_SIZE      1
24118 +
24119 +struct dpseci_rsp_get_opr {
24120 +       __le64 pad;
24121 +       u8 rip_enable;
24122 +       u8 pad0[2];
24123 +       u8 oloe;
24124 +       u8 oeane;
24125 +       u8 olws;
24126 +       u8 oa;
24127 +       u8 oprrws;
24128 +       __le16 nesn;
24129 +       __le16 pad1;
24130 +       __le16 ndsn;
24131 +       __le16 pad2;
24132 +       __le16 ea_tseq;
24133 +       u8 tseq_nlis;
24134 +       u8 pad3;
24135 +       __le16 ea_hseq;
24136 +       u8 hseq_nlis;
24137 +       u8 pad4;
24138 +       __le16 ea_hptr;
24139 +       __le16 pad5;
24140 +       __le16 ea_tptr;
24141 +       __le16 pad6;
24142 +       __le16 opr_vid;
24143 +       __le16 pad7;
24144 +       __le16 opr_id;
24145 +};
24146 +
24147 +#define DPSECI_CGN_DEST_TYPE_SHIFT     0
24148 +#define DPSECI_CGN_DEST_TYPE_SIZE      4
24149 +#define DPSECI_CGN_UNITS_SHIFT         4
24150 +#define DPSECI_CGN_UNITS_SIZE          2
24151 +
24152 +struct dpseci_cmd_congestion_notification {
24153 +       __le32 dest_id;
24154 +       __le16 notification_mode;
24155 +       u8 priority;
24156 +       u8 options;
24157 +       __le64 message_iova;
24158 +       __le64 message_ctx;
24159 +       __le32 threshold_entry;
24160 +       __le32 threshold_exit;
24161 +};
24162 +
24163 +#endif /* _DPSECI_CMD_H_ */
24164 diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
24165 index 33e41ea8..31963397 100644
24166 --- a/drivers/crypto/caam/error.c
24167 +++ b/drivers/crypto/caam/error.c
24168 @@ -6,11 +6,54 @@
24169  
24170  #include "compat.h"
24171  #include "regs.h"
24172 -#include "intern.h"
24173  #include "desc.h"
24174 -#include "jr.h"
24175  #include "error.h"
24176  
24177 +#ifdef DEBUG
24178 +
24179 +#include <linux/highmem.h>
24180 +
24181 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
24182 +                 int rowsize, int groupsize, struct scatterlist *sg,
24183 +                 size_t tlen, bool ascii)
24184 +{
24185 +       struct scatterlist *it;
24186 +       void *it_page;
24187 +       size_t len;
24188 +       void *buf;
24189 +
24190 +       for (it = sg; it && tlen > 0 ; it = sg_next(sg)) {
24191 +               /*
24192 +                * make sure the scatterlist's page
24193 +                * has a valid virtual memory mapping
24194 +                */
24195 +               it_page = kmap_atomic(sg_page(it));
24196 +               if (unlikely(!it_page)) {
24197 +                       pr_err("caam_dump_sg: kmap failed\n");
24198 +                       return;
24199 +               }
24200 +
24201 +               buf = it_page + it->offset;
24202 +               len = min_t(size_t, tlen, it->length);
24203 +               print_hex_dump(level, prefix_str, prefix_type, rowsize,
24204 +                              groupsize, buf, len, ascii);
24205 +               tlen -= len;
24206 +
24207 +               kunmap_atomic(it_page);
24208 +       }
24209 +}
24210 +
24211 +#else
24212 +
24213 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
24214 +                 int rowsize, int groupsize, struct scatterlist *sg,
24215 +                 size_t tlen, bool ascii)
24216 +{}
24217 +
24218 +#endif
24219 +
24220 +EXPORT_SYMBOL(caam_dump_sg);
24221 +
24222  static const struct {
24223         u8 value;
24224         const char *error_text;
24225 @@ -69,6 +112,54 @@ static const struct {
24226         { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
24227  };
24228  
24229 +static const struct {
24230 +       u8 value;
24231 +       const char *error_text;
24232 +} qi_error_list[] = {
24233 +       { 0x1F, "Job terminated by FQ or ICID flush" },
24234 +       { 0x20, "FD format error"},
24235 +       { 0x21, "FD command format error"},
24236 +       { 0x23, "FL format error"},
24237 +       { 0x25, "CRJD specified in FD, but not enabled in FLC"},
24238 +       { 0x30, "Max. buffer size too small"},
24239 +       { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"},
24240 +       { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"},
24241 +       { 0x33, "Size over/underflow (allocate mode)"},
24242 +       { 0x34, "Size over/underflow (reuse mode)"},
24243 +       { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"},
24244 +       { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"},
24245 +       { 0x41, "SBC frame format not supported (allocate mode)"},
24246 +       { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"},
24247 +       { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"},
24248 +       { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"},
24249 +       { 0x45, "DHR correction underflow (reuse mode, single buffer format)"},
24250 +       { 0x46, "Annotation length exceeds offset (reuse mode)"},
24251 +       { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
24252 +       { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
24253 +       { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
24254 +       { 0x51, "Unsupported IF reuse mode"},
24255 +       { 0x52, "Unsupported FL use mode"},
24256 +       { 0x53, "Unsupported RJD use mode"},
24257 +       { 0x54, "Unsupported inline descriptor use mode"},
24258 +       { 0xC0, "Table buffer pool 0 depletion"},
24259 +       { 0xC1, "Table buffer pool 1 depletion"},
24260 +       { 0xC2, "Data buffer pool 0 depletion, no OF allocated"},
24261 +       { 0xC3, "Data buffer pool 1 depletion, no OF allocated"},
24262 +       { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"},
24263 +       { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"},
24264 +       { 0xD0, "FLC read error"},
24265 +       { 0xD1, "FL read error"},
24266 +       { 0xD2, "FL write error"},
24267 +       { 0xD3, "OF SGT write error"},
24268 +       { 0xD4, "PTA read error"},
24269 +       { 0xD5, "PTA write error"},
24270 +       { 0xD6, "OF SGT F-bit write error"},
24271 +       { 0xD7, "ASA write error"},
24272 +       { 0xE1, "FLC[ICR]=0 ICID error"},
24273 +       { 0xE2, "FLC[ICR]=1 ICID error"},
24274 +       { 0xE4, "source of ICID flush not trusted (BDI = 0)"},
24275 +};
24276 +
24277  static const char * const cha_id_list[] = {
24278         "",
24279         "AES",
24280 @@ -146,10 +237,9 @@ static void report_ccb_status(struct device *jrdev, const u32 status,
24281             strlen(rng_err_id_list[err_id])) {
24282                 /* RNG-only error */
24283                 err_str = rng_err_id_list[err_id];
24284 -       } else if (err_id < ARRAY_SIZE(err_id_list))
24285 +       } else {
24286                 err_str = err_id_list[err_id];
24287 -       else
24288 -               snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
24289 +       }
24290  
24291         /*
24292          * CCB ICV check failures are part of normal operation life;
24293 @@ -198,6 +288,27 @@ static void report_deco_status(struct device *jrdev, const u32 status,
24294                 status, error, idx_str, idx, err_str, err_err_code);
24295  }
24296  
24297 +static void report_qi_status(struct device *qidev, const u32 status,
24298 +                            const char *error)
24299 +{
24300 +       u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
24301 +       const char *err_str = "unidentified error value 0x";
24302 +       char err_err_code[3] = { 0 };
24303 +       int i;
24304 +
24305 +       for (i = 0; i < ARRAY_SIZE(qi_error_list); i++)
24306 +               if (qi_error_list[i].value == err_id)
24307 +                       break;
24308 +
24309 +       if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text)
24310 +               err_str = qi_error_list[i].error_text;
24311 +       else
24312 +               snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
24313 +
24314 +       dev_err(qidev, "%08x: %s: %s%s\n",
24315 +               status, error, err_str, err_err_code);
24316 +}
24317 +
24318  static void report_jr_status(struct device *jrdev, const u32 status,
24319                              const char *error)
24320  {
24321 @@ -212,7 +323,7 @@ static void report_cond_code_status(struct device *jrdev, const u32 status,
24322                 status, error, __func__);
24323  }
24324  
24325 -void caam_jr_strstatus(struct device *jrdev, u32 status)
24326 +void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
24327  {
24328         static const struct stat_src {
24329                 void (*report_ssed)(struct device *jrdev, const u32 status,
24330 @@ -224,7 +335,7 @@ void caam_jr_strstatus(struct device *jrdev, u32 status)
24331                 { report_ccb_status, "CCB" },
24332                 { report_jump_status, "Jump" },
24333                 { report_deco_status, "DECO" },
24334 -               { NULL, "Queue Manager Interface" },
24335 +               { report_qi_status, "Queue Manager Interface" },
24336                 { report_jr_status, "Job Ring" },
24337                 { report_cond_code_status, "Condition Code" },
24338                 { NULL, NULL },
24339 @@ -250,4 +361,4 @@ void caam_jr_strstatus(struct device *jrdev, u32 status)
24340         else
24341                 dev_err(jrdev, "%d: unknown error source\n", ssrc);
24342  }
24343 -EXPORT_SYMBOL(caam_jr_strstatus);
24344 +EXPORT_SYMBOL(caam_strstatus);
24345 diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
24346 index b6350b0d..751ddcac 100644
24347 --- a/drivers/crypto/caam/error.h
24348 +++ b/drivers/crypto/caam/error.h
24349 @@ -7,5 +7,13 @@
24350  #ifndef CAAM_ERROR_H
24351  #define CAAM_ERROR_H
24352  #define CAAM_ERROR_STR_MAX 302
24353 -void caam_jr_strstatus(struct device *jrdev, u32 status);
24354 +
24355 +void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
24356 +
24357 +#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
24358 +#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
24359 +
24360 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
24361 +                 int rowsize, int groupsize, struct scatterlist *sg,
24362 +                 size_t tlen, bool ascii);
24363  #endif /* CAAM_ERROR_H */
24364 diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
24365 index 5d4c0507..a5236125 100644
24366 --- a/drivers/crypto/caam/intern.h
24367 +++ b/drivers/crypto/caam/intern.h
24368 @@ -41,6 +41,7 @@ struct caam_drv_private_jr {
24369         struct device           *dev;
24370         int ridx;
24371         struct caam_job_ring __iomem *rregs;    /* JobR's register space */
24372 +       struct tasklet_struct irqtask;
24373         int irq;                        /* One per queue */
24374  
24375         /* Number of scatterlist crypt transforms active on the JobR */
24376 @@ -63,10 +64,9 @@ struct caam_drv_private_jr {
24377   * Driver-private storage for a single CAAM block instance
24378   */
24379  struct caam_drv_private {
24380 -
24381 -       struct device *dev;
24382 -       struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
24383 -       struct platform_device *pdev;
24384 +#ifdef CONFIG_CAAM_QI
24385 +       struct device *qidev;
24386 +#endif
24387  
24388         /* Physical-presence section */
24389         struct caam_ctrl __iomem *ctrl; /* controller region */
24390 @@ -102,11 +102,6 @@ struct caam_drv_private {
24391  #ifdef CONFIG_DEBUG_FS
24392         struct dentry *dfs_root;
24393         struct dentry *ctl; /* controller dir */
24394 -       struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req;
24395 -       struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes;
24396 -       struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes;
24397 -       struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus;
24398 -
24399         struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
24400         struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
24401  #endif
24402 @@ -114,4 +109,22 @@ struct caam_drv_private {
24403  
24404  void caam_jr_algapi_init(struct device *dev);
24405  void caam_jr_algapi_remove(struct device *dev);
24406 +
24407 +#ifdef CONFIG_DEBUG_FS
24408 +static int caam_debugfs_u64_get(void *data, u64 *val)
24409 +{
24410 +       *val = caam64_to_cpu(*(u64 *)data);
24411 +       return 0;
24412 +}
24413 +
24414 +static int caam_debugfs_u32_get(void *data, u64 *val)
24415 +{
24416 +       *val = caam32_to_cpu(*(u32 *)data);
24417 +       return 0;
24418 +}
24419 +
24420 +DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
24421 +DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
24422 +#endif
24423 +
24424  #endif /* INTERN_H */
24425 diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
24426 index 757c27f9..00e87094 100644
24427 --- a/drivers/crypto/caam/jr.c
24428 +++ b/drivers/crypto/caam/jr.c
24429 @@ -9,6 +9,7 @@
24430  #include <linux/of_address.h>
24431  
24432  #include "compat.h"
24433 +#include "ctrl.h"
24434  #include "regs.h"
24435  #include "jr.h"
24436  #include "desc.h"
24437 @@ -22,6 +23,14 @@ struct jr_driver_data {
24438  
24439  static struct jr_driver_data driver_data;
24440  
24441 +static int jr_driver_probed;
24442 +
24443 +int caam_jr_driver_probed(void)
24444 +{
24445 +       return jr_driver_probed;
24446 +}
24447 +EXPORT_SYMBOL(caam_jr_driver_probed);
24448 +
24449  static int caam_reset_hw_jr(struct device *dev)
24450  {
24451         struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
24452 @@ -73,6 +82,8 @@ static int caam_jr_shutdown(struct device *dev)
24453  
24454         ret = caam_reset_hw_jr(dev);
24455  
24456 +       tasklet_kill(&jrp->irqtask);
24457 +
24458         /* Release interrupt */
24459         free_irq(jrp->irq, dev);
24460  
24461 @@ -116,6 +127,8 @@ static int caam_jr_remove(struct platform_device *pdev)
24462                 dev_err(jrdev, "Failed to shut down job ring\n");
24463         irq_dispose_mapping(jrpriv->irq);
24464  
24465 +       jr_driver_probed--;
24466 +
24467         return ret;
24468  }
24469  
24470 @@ -128,7 +141,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
24471  
24472         /*
24473          * Check the output ring for ready responses, kick
24474 -        * the threaded irq if jobs done.
24475 +        * tasklet if jobs done.
24476          */
24477         irqstate = rd_reg32(&jrp->rregs->jrintstatus);
24478         if (!irqstate)
24479 @@ -150,13 +163,18 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
24480         /* Have valid interrupt at this point, just ACK and trigger */
24481         wr_reg32(&jrp->rregs->jrintstatus, irqstate);
24482  
24483 -       return IRQ_WAKE_THREAD;
24484 +       preempt_disable();
24485 +       tasklet_schedule(&jrp->irqtask);
24486 +       preempt_enable();
24487 +
24488 +       return IRQ_HANDLED;
24489  }
24490  
24491 -static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
24492 +/* Deferred service handler, run as interrupt-fired tasklet */
24493 +static void caam_jr_dequeue(unsigned long devarg)
24494  {
24495         int hw_idx, sw_idx, i, head, tail;
24496 -       struct device *dev = st_dev;
24497 +       struct device *dev = (struct device *)devarg;
24498         struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
24499         void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
24500         u32 *userdesc, userstatus;
24501 @@ -230,8 +248,6 @@ static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
24502  
24503         /* reenable / unmask IRQs */
24504         clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
24505 -
24506 -       return IRQ_HANDLED;
24507  }
24508  
24509  /**
24510 @@ -274,6 +290,36 @@ struct device *caam_jr_alloc(void)
24511  }
24512  EXPORT_SYMBOL(caam_jr_alloc);
24513  
24514 +/**
24515 + * caam_jridx_alloc() - Alloc a specific job ring based on its index.
24516 + *
24517 + * returns :  pointer to the newly allocated physical
24518 + *           JobR dev can be written to if successful.
24519 + **/
24520 +struct device *caam_jridx_alloc(int idx)
24521 +{
24522 +       struct caam_drv_private_jr *jrpriv;
24523 +       struct device *dev = ERR_PTR(-ENODEV);
24524 +
24525 +       spin_lock(&driver_data.jr_alloc_lock);
24526 +
24527 +       if (list_empty(&driver_data.jr_list))
24528 +               goto end;
24529 +
24530 +       list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
24531 +               if (jrpriv->ridx == idx) {
24532 +                       atomic_inc(&jrpriv->tfm_count);
24533 +                       dev = jrpriv->dev;
24534 +                       break;
24535 +               }
24536 +       }
24537 +
24538 +end:
24539 +       spin_unlock(&driver_data.jr_alloc_lock);
24540 +       return dev;
24541 +}
24542 +EXPORT_SYMBOL(caam_jridx_alloc);
24543 +
24544  /**
24545   * caam_jr_free() - Free the Job Ring
24546   * @rdev     - points to the dev that identifies the Job ring to
24547 @@ -389,10 +435,11 @@ static int caam_jr_init(struct device *dev)
24548  
24549         jrp = dev_get_drvdata(dev);
24550  
24551 +       tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
24552 +
24553         /* Connect job ring interrupt handler. */
24554 -       error = request_threaded_irq(jrp->irq, caam_jr_interrupt,
24555 -                                    caam_jr_threadirq, IRQF_SHARED,
24556 -                                    dev_name(dev), dev);
24557 +       error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
24558 +                           dev_name(dev), dev);
24559         if (error) {
24560                 dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
24561                         jrp->ridx, jrp->irq);
24562 @@ -454,6 +501,7 @@ static int caam_jr_init(struct device *dev)
24563  out_free_irq:
24564         free_irq(jrp->irq, dev);
24565  out_kill_deq:
24566 +       tasklet_kill(&jrp->irqtask);
24567         return error;
24568  }
24569  
24570 @@ -489,15 +537,28 @@ static int caam_jr_probe(struct platform_device *pdev)
24571                 return -ENOMEM;
24572         }
24573  
24574 -       jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
24575 +       jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
24576  
24577 -       if (sizeof(dma_addr_t) == sizeof(u64))
24578 -               if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
24579 -                       dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40));
24580 +       if (sizeof(dma_addr_t) == sizeof(u64)) {
24581 +               if (caam_dpaa2)
24582 +                       error = dma_set_mask_and_coherent(jrdev,
24583 +                                                         DMA_BIT_MASK(49));
24584 +               else if (of_device_is_compatible(nprop,
24585 +                                                "fsl,sec-v5.0-job-ring"))
24586 +                       error = dma_set_mask_and_coherent(jrdev,
24587 +                                                         DMA_BIT_MASK(40));
24588                 else
24589 -                       dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36));
24590 -       else
24591 -               dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
24592 +                       error = dma_set_mask_and_coherent(jrdev,
24593 +                                                         DMA_BIT_MASK(36));
24594 +       } else {
24595 +               error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
24596 +       }
24597 +       if (error) {
24598 +               dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n",
24599 +                       error);
24600 +               iounmap(ctrl);
24601 +               return error;
24602 +       }
24603  
24604         /* Identify the interrupt */
24605         jrpriv->irq = irq_of_parse_and_map(nprop, 0);
24606 @@ -517,10 +578,12 @@ static int caam_jr_probe(struct platform_device *pdev)
24607  
24608         atomic_set(&jrpriv->tfm_count, 0);
24609  
24610 +       jr_driver_probed++;
24611 +
24612         return 0;
24613  }
24614  
24615 -static struct of_device_id caam_jr_match[] = {
24616 +static const struct of_device_id caam_jr_match[] = {
24617         {
24618                 .compatible = "fsl,sec-v4.0-job-ring",
24619         },
24620 diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h
24621 index 97113a6d..ee4d31c9 100644
24622 --- a/drivers/crypto/caam/jr.h
24623 +++ b/drivers/crypto/caam/jr.h
24624 @@ -8,7 +8,9 @@
24625  #define JR_H
24626  
24627  /* Prototypes for backend-level services exposed to APIs */
24628 +int caam_jr_driver_probed(void);
24629  struct device *caam_jr_alloc(void);
24630 +struct device *caam_jridx_alloc(int idx);
24631  void caam_jr_free(struct device *rdev);
24632  int caam_jr_enqueue(struct device *dev, u32 *desc,
24633                     void (*cbk)(struct device *dev, u32 *desc, u32 status,
24634 diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
24635 index 3ce1d5cd..a523ed77 100644
24636 --- a/drivers/crypto/caam/key_gen.c
24637 +++ b/drivers/crypto/caam/key_gen.c
24638 @@ -41,15 +41,29 @@ Split key generation-----------------------------------------------
24639  [06] 0x64260028    fifostr: class2 mdsplit-jdk len=40
24640                         @0xffe04000
24641  */
24642 -int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
24643 -                 int split_key_pad_len, const u8 *key_in, u32 keylen,
24644 -                 u32 alg_op)
24645 +int gen_split_key(struct device *jrdev, u8 *key_out,
24646 +                 struct alginfo * const adata, const u8 *key_in, u32 keylen,
24647 +                 int max_keylen)
24648  {
24649         u32 *desc;
24650         struct split_key_result result;
24651         dma_addr_t dma_addr_in, dma_addr_out;
24652         int ret = -ENOMEM;
24653  
24654 +       adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK);
24655 +       adata->keylen_pad = split_key_pad_len(adata->algtype &
24656 +                                             OP_ALG_ALGSEL_MASK);
24657 +
24658 +#ifdef DEBUG
24659 +       dev_err(jrdev, "split keylen %d split keylen padded %d\n",
24660 +               adata->keylen, adata->keylen_pad);
24661 +       print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
24662 +                      DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
24663 +#endif
24664 +
24665 +       if (adata->keylen_pad > max_keylen)
24666 +               return -EINVAL;
24667 +
24668         desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
24669         if (!desc) {
24670                 dev_err(jrdev, "unable to allocate key input memory\n");
24671 @@ -63,7 +77,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
24672                 goto out_free;
24673         }
24674  
24675 -       dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
24676 +       dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad,
24677                                       DMA_FROM_DEVICE);
24678         if (dma_mapping_error(jrdev, dma_addr_out)) {
24679                 dev_err(jrdev, "unable to map key output memory\n");
24680 @@ -74,7 +88,9 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
24681         append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
24682  
24683         /* Sets MDHA up into an HMAC-INIT */
24684 -       append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT);
24685 +       append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
24686 +                        OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
24687 +                        OP_ALG_AS_INIT);
24688  
24689         /*
24690          * do a FIFO_LOAD of zero, this will trigger the internal key expansion
24691 @@ -87,7 +103,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
24692          * FIFO_STORE with the explicit split-key content store
24693          * (0x26 output type)
24694          */
24695 -       append_fifo_store(desc, dma_addr_out, split_key_len,
24696 +       append_fifo_store(desc, dma_addr_out, adata->keylen,
24697                           LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
24698  
24699  #ifdef DEBUG
24700 @@ -108,11 +124,11 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
24701  #ifdef DEBUG
24702                 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
24703                                DUMP_PREFIX_ADDRESS, 16, 4, key_out,
24704 -                              split_key_pad_len, 1);
24705 +                              adata->keylen_pad, 1);
24706  #endif
24707         }
24708  
24709 -       dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
24710 +       dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad,
24711                          DMA_FROM_DEVICE);
24712  out_unmap_in:
24713         dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
24714 diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
24715 index c5588f6d..851a7c86 100644
24716 --- a/drivers/crypto/caam/key_gen.h
24717 +++ b/drivers/crypto/caam/key_gen.h
24718 @@ -5,6 +5,36 @@
24719   *
24720   */
24721  
24722 +/**
24723 + * split_key_len - Compute MDHA split key length for a given algorithm
24724 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
24725 + *        SHA224, SHA384, SHA512.
24726 + *
24727 + * Return: MDHA split key length
24728 + */
24729 +static inline u32 split_key_len(u32 hash)
24730 +{
24731 +       /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
24732 +       static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
24733 +       u32 idx;
24734 +
24735 +       idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
24736 +
24737 +       return (u32)(mdpadlen[idx] * 2);
24738 +}
24739 +
24740 +/**
24741 + * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
24742 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
24743 + *        SHA224, SHA384, SHA512.
24744 + *
24745 + * Return: MDHA split key pad length
24746 + */
24747 +static inline u32 split_key_pad_len(u32 hash)
24748 +{
24749 +       return ALIGN(split_key_len(hash), 16);
24750 +}
24751 +
24752  struct split_key_result {
24753         struct completion completion;
24754         int err;
24755 @@ -12,6 +42,6 @@ struct split_key_result {
24756  
24757  void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
24758  
24759 -int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
24760 -                   int split_key_pad_len, const u8 *key_in, u32 keylen,
24761 -                   u32 alg_op);
24762 +int gen_split_key(struct device *jrdev, u8 *key_out,
24763 +                 struct alginfo * const adata, const u8 *key_in, u32 keylen,
24764 +                 int max_keylen);
24765 diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h
24766 index aaa00dd1..31e59963 100644
24767 --- a/drivers/crypto/caam/pdb.h
24768 +++ b/drivers/crypto/caam/pdb.h
24769 @@ -483,6 +483,8 @@ struct dsa_verify_pdb {
24770  #define RSA_PDB_E_MASK          (0xFFF << RSA_PDB_E_SHIFT)
24771  #define RSA_PDB_D_SHIFT         12
24772  #define RSA_PDB_D_MASK          (0xFFF << RSA_PDB_D_SHIFT)
24773 +#define RSA_PDB_Q_SHIFT         12
24774 +#define RSA_PDB_Q_MASK          (0xFFF << RSA_PDB_Q_SHIFT)
24775  
24776  #define RSA_PDB_SGF_F           (0x8 << RSA_PDB_SGF_SHIFT)
24777  #define RSA_PDB_SGF_G           (0x4 << RSA_PDB_SGF_SHIFT)
24778 @@ -490,6 +492,8 @@ struct dsa_verify_pdb {
24779  #define RSA_PRIV_PDB_SGF_G      (0x8 << RSA_PDB_SGF_SHIFT)
24780  
24781  #define RSA_PRIV_KEY_FRM_1      0
24782 +#define RSA_PRIV_KEY_FRM_2      1
24783 +#define RSA_PRIV_KEY_FRM_3      2
24784  
24785  /**
24786   * RSA Encrypt Protocol Data Block
24787 @@ -525,4 +529,62 @@ struct rsa_priv_f1_pdb {
24788         dma_addr_t      d_dma;
24789  } __packed;
24790  
24791 +/**
24792 + * RSA Decrypt PDB - Private Key Form #2
24793 + * @sgf     : scatter-gather field
24794 + * @g_dma   : dma address of encrypted input data
24795 + * @f_dma   : dma address of output data
24796 + * @d_dma   : dma address of RSA private exponent
24797 + * @p_dma   : dma address of RSA prime factor p of RSA modulus n
24798 + * @q_dma   : dma address of RSA prime factor q of RSA modulus n
24799 + * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
24800 + *            as internal state buffer. It is assumed to be as long as p.
24801 + * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
24802 + *            as internal state buffer. It is assumed to be as long as q.
24803 + * @p_q_len : length in bytes of first two prime factors of the RSA modulus n
24804 + */
24805 +struct rsa_priv_f2_pdb {
24806 +       u32             sgf;
24807 +       dma_addr_t      g_dma;
24808 +       dma_addr_t      f_dma;
24809 +       dma_addr_t      d_dma;
24810 +       dma_addr_t      p_dma;
24811 +       dma_addr_t      q_dma;
24812 +       dma_addr_t      tmp1_dma;
24813 +       dma_addr_t      tmp2_dma;
24814 +       u32             p_q_len;
24815 +} __packed;
24816 +
24817 +/**
24818 + * RSA Decrypt PDB - Private Key Form #3
24819 + * This is the RSA Chinese Reminder Theorem (CRT) form for two prime factors of
24820 + * the RSA modulus.
24821 + * @sgf     : scatter-gather field
24822 + * @g_dma   : dma address of encrypted input data
24823 + * @f_dma   : dma address of output data
24824 + * @c_dma   : dma address of RSA CRT coefficient
24825 + * @p_dma   : dma address of RSA prime factor p of RSA modulus n
24826 + * @q_dma   : dma address of RSA prime factor q of RSA modulus n
24827 + * @dp_dma  : dma address of RSA CRT exponent of RSA prime factor p
24828 + * @dp_dma  : dma address of RSA CRT exponent of RSA prime factor q
24829 + * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
24830 + *            as internal state buffer. It is assumed to be as long as p.
24831 + * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
24832 + *            as internal state buffer. It is assumed to be as long as q.
24833 + * @p_q_len : length in bytes of first two prime factors of the RSA modulus n
24834 + */
24835 +struct rsa_priv_f3_pdb {
24836 +       u32             sgf;
24837 +       dma_addr_t      g_dma;
24838 +       dma_addr_t      f_dma;
24839 +       dma_addr_t      c_dma;
24840 +       dma_addr_t      p_dma;
24841 +       dma_addr_t      q_dma;
24842 +       dma_addr_t      dp_dma;
24843 +       dma_addr_t      dq_dma;
24844 +       dma_addr_t      tmp1_dma;
24845 +       dma_addr_t      tmp2_dma;
24846 +       u32             p_q_len;
24847 +} __packed;
24848 +
24849  #endif
24850 diff --git a/drivers/crypto/caam/pkc_desc.c b/drivers/crypto/caam/pkc_desc.c
24851 index 4e4183e6..9e2ce6fe 100644
24852 --- a/drivers/crypto/caam/pkc_desc.c
24853 +++ b/drivers/crypto/caam/pkc_desc.c
24854 @@ -34,3 +34,39 @@ void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb)
24855         append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
24856                          RSA_PRIV_KEY_FRM_1);
24857  }
24858 +
24859 +/* Descriptor for RSA Private operation - Private Key Form #2 */
24860 +void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb)
24861 +{
24862 +       init_job_desc_pdb(desc, 0, sizeof(*pdb));
24863 +       append_cmd(desc, pdb->sgf);
24864 +       append_ptr(desc, pdb->g_dma);
24865 +       append_ptr(desc, pdb->f_dma);
24866 +       append_ptr(desc, pdb->d_dma);
24867 +       append_ptr(desc, pdb->p_dma);
24868 +       append_ptr(desc, pdb->q_dma);
24869 +       append_ptr(desc, pdb->tmp1_dma);
24870 +       append_ptr(desc, pdb->tmp2_dma);
24871 +       append_cmd(desc, pdb->p_q_len);
24872 +       append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
24873 +                        RSA_PRIV_KEY_FRM_2);
24874 +}
24875 +
24876 +/* Descriptor for RSA Private operation - Private Key Form #3 */
24877 +void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb)
24878 +{
24879 +       init_job_desc_pdb(desc, 0, sizeof(*pdb));
24880 +       append_cmd(desc, pdb->sgf);
24881 +       append_ptr(desc, pdb->g_dma);
24882 +       append_ptr(desc, pdb->f_dma);
24883 +       append_ptr(desc, pdb->c_dma);
24884 +       append_ptr(desc, pdb->p_dma);
24885 +       append_ptr(desc, pdb->q_dma);
24886 +       append_ptr(desc, pdb->dp_dma);
24887 +       append_ptr(desc, pdb->dq_dma);
24888 +       append_ptr(desc, pdb->tmp1_dma);
24889 +       append_ptr(desc, pdb->tmp2_dma);
24890 +       append_cmd(desc, pdb->p_q_len);
24891 +       append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
24892 +                        RSA_PRIV_KEY_FRM_3);
24893 +}
24894 diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
24895 new file mode 100644
24896 index 00000000..48185d55
24897 --- /dev/null
24898 +++ b/drivers/crypto/caam/qi.c
24899 @@ -0,0 +1,797 @@
24900 +/*
24901 + * CAAM/SEC 4.x QI transport/backend driver
24902 + * Queue Interface backend functionality
24903 + *
24904 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
24905 + * Copyright 2016-2017 NXP
24906 + */
24907 +
24908 +#include <linux/cpumask.h>
24909 +#include <linux/kthread.h>
24910 +#include <linux/fsl_qman.h>
24911 +
24912 +#include "regs.h"
24913 +#include "qi.h"
24914 +#include "desc.h"
24915 +#include "intern.h"
24916 +#include "desc_constr.h"
24917 +
24918 +#define PREHDR_RSLS_SHIFT      31
24919 +
24920 +/*
24921 + * Use a reasonable backlog of frames (per CPU) as congestion threshold,
24922 + * so that resources used by the in-flight buffers do not become a memory hog.
24923 + */
24924 +#define MAX_RSP_FQ_BACKLOG_PER_CPU     256
24925 +
24926 +#define CAAM_QI_ENQUEUE_RETRIES        10000
24927 +
24928 +#define CAAM_NAPI_WEIGHT       63
24929 +
24930 +/*
24931 + * caam_napi - struct holding CAAM NAPI-related params
24932 + * @irqtask: IRQ task for QI backend
24933 + * @p: QMan portal
24934 + */
24935 +struct caam_napi {
24936 +       struct napi_struct irqtask;
24937 +       struct qman_portal *p;
24938 +};
24939 +
24940 +/*
24941 + * caam_qi_pcpu_priv - percpu private data structure to main list of pending
24942 + *                     responses expected on each cpu.
24943 + * @caam_napi: CAAM NAPI params
24944 + * @net_dev: netdev used by NAPI
24945 + * @rsp_fq: response FQ from CAAM
24946 + */
24947 +struct caam_qi_pcpu_priv {
24948 +       struct caam_napi caam_napi;
24949 +       struct net_device net_dev;
24950 +       struct qman_fq *rsp_fq;
24951 +} ____cacheline_aligned;
24952 +
24953 +static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
24954 +static DEFINE_PER_CPU(int, last_cpu);
24955 +
24956 +/*
24957 + * caam_qi_priv - CAAM QI backend private params
24958 + * @cgr: QMan congestion group
24959 + * @qi_pdev: platform device for QI backend
24960 + */
24961 +struct caam_qi_priv {
24962 +       struct qman_cgr cgr;
24963 +       struct platform_device *qi_pdev;
24964 +};
24965 +
24966 +static struct caam_qi_priv qipriv ____cacheline_aligned;
24967 +
24968 +/*
24969 + * This is written by only one core - the one that initialized the CGR - and
24970 + * read by multiple cores (all the others).
24971 + */
24972 +bool caam_congested __read_mostly;
24973 +EXPORT_SYMBOL(caam_congested);
24974 +
24975 +#ifdef CONFIG_DEBUG_FS
24976 +/*
24977 + * This is a counter for the number of times the congestion group (where all
24978 + * the request and response queueus are) reached congestion. Incremented
24979 + * each time the congestion callback is called with congested == true.
24980 + */
24981 +static u64 times_congested;
24982 +#endif
24983 +
24984 +/*
24985 + * CPU from where the module initialised. This is required because QMan driver
24986 + * requires CGRs to be removed from same CPU from where they were originally
24987 + * allocated.
24988 + */
24989 +static int mod_init_cpu;
24990 +
24991 +/*
24992 + * This is a a cache of buffers, from which the users of CAAM QI driver
24993 + * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
24994 + * doing malloc on the hotpath.
24995 + * NOTE: A more elegant solution would be to have some headroom in the frames
24996 + *       being processed. This could be added by the dpaa-ethernet driver.
24997 + *       This would pose a problem for userspace application processing which
24998 + *       cannot know of this limitation. So for now, this will work.
24999 + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
25000 + */
25001 +static struct kmem_cache *qi_cache;
25002 +
25003 +int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
25004 +{
25005 +       struct qm_fd fd;
25006 +       int ret;
25007 +       int num_retries = 0;
25008 +
25009 +       fd.cmd = 0;
25010 +       fd.format = qm_fd_compound;
25011 +       fd.cong_weight = req->fd_sgt[1].length;
25012 +       fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
25013 +                             DMA_BIDIRECTIONAL);
25014 +       if (dma_mapping_error(qidev, fd.addr)) {
25015 +               dev_err(qidev, "DMA mapping error for QI enqueue request\n");
25016 +               return -EIO;
25017 +       }
25018 +
25019 +       do {
25020 +               ret = qman_enqueue(req->drv_ctx->req_fq, &fd, 0);
25021 +               if (likely(!ret))
25022 +                       return 0;
25023 +
25024 +               if (ret != -EBUSY)
25025 +                       break;
25026 +               num_retries++;
25027 +       } while (num_retries < CAAM_QI_ENQUEUE_RETRIES);
25028 +
25029 +       dev_err(qidev, "qman_enqueue failed: %d\n", ret);
25030 +
25031 +       return ret;
25032 +}
25033 +EXPORT_SYMBOL(caam_qi_enqueue);
25034 +
25035 +static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
25036 +                          const struct qm_mr_entry *msg)
25037 +{
25038 +       const struct qm_fd *fd;
25039 +       struct caam_drv_req *drv_req;
25040 +       struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
25041 +
25042 +       fd = &msg->ern.fd;
25043 +
25044 +       if (fd->format != qm_fd_compound) {
25045 +               dev_err(qidev, "Non-compound FD from CAAM\n");
25046 +               return;
25047 +       }
25048 +
25049 +       drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
25050 +       if (!drv_req) {
25051 +               dev_err(qidev,
25052 +                       "Can't find original request for CAAM response\n");
25053 +               return;
25054 +       }
25055 +
25056 +       dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
25057 +                        sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
25058 +
25059 +       drv_req->cbk(drv_req, -EIO);
25060 +}
25061 +
25062 +static struct qman_fq *create_caam_req_fq(struct device *qidev,
25063 +                                         struct qman_fq *rsp_fq,
25064 +                                         dma_addr_t hwdesc,
25065 +                                         int fq_sched_flag)
25066 +{
25067 +       int ret;
25068 +       struct qman_fq *req_fq;
25069 +       struct qm_mcc_initfq opts;
25070 +
25071 +       req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC);
25072 +       if (!req_fq)
25073 +               return ERR_PTR(-ENOMEM);
25074 +
25075 +       req_fq->cb.ern = caam_fq_ern_cb;
25076 +       req_fq->cb.fqs = NULL;
25077 +
25078 +       ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
25079 +                               QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_LOCKED,
25080 +                            req_fq);
25081 +       if (ret) {
25082 +               dev_err(qidev, "Failed to create session req FQ\n");
25083 +               goto create_req_fq_fail;
25084 +       }
25085 +
25086 +       opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
25087 +                      QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
25088 +                      QM_INITFQ_WE_CGID;
25089 +       opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE;
25090 +       opts.fqd.dest.channel = qm_channel_caam;
25091 +       opts.fqd.dest.wq = 2;
25092 +       opts.fqd.context_b = qman_fq_fqid(rsp_fq);
25093 +       opts.fqd.context_a.hi = upper_32_bits(hwdesc);
25094 +       opts.fqd.context_a.lo = lower_32_bits(hwdesc);
25095 +       opts.fqd.cgid = qipriv.cgr.cgrid;
25096 +
25097 +       ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
25098 +       if (ret) {
25099 +               dev_err(qidev, "Failed to init session req FQ\n");
25100 +               goto init_req_fq_fail;
25101 +       }
25102 +
25103 +       dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
25104 +               smp_processor_id());
25105 +       return req_fq;
25106 +
25107 +init_req_fq_fail:
25108 +       qman_destroy_fq(req_fq, 0);
25109 +create_req_fq_fail:
25110 +       kfree(req_fq);
25111 +       return ERR_PTR(ret);
25112 +}
25113 +
25114 +static int empty_retired_fq(struct device *qidev, struct qman_fq *fq)
25115 +{
25116 +       int ret;
25117 +
25118 +       ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT |
25119 +                                   QMAN_VOLATILE_FLAG_FINISH,
25120 +                                   QM_VDQCR_PRECEDENCE_VDQCR |
25121 +                                   QM_VDQCR_NUMFRAMES_TILLEMPTY);
25122 +       if (ret) {
25123 +               dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid);
25124 +               return ret;
25125 +       }
25126 +
25127 +       do {
25128 +               struct qman_portal *p;
25129 +
25130 +               p = qman_get_affine_portal(smp_processor_id());
25131 +               qman_p_poll_dqrr(p, 16);
25132 +       } while (fq->flags & QMAN_FQ_STATE_NE);
25133 +
25134 +       return 0;
25135 +}
25136 +
25137 +static int kill_fq(struct device *qidev, struct qman_fq *fq)
25138 +{
25139 +       u32 flags;
25140 +       int ret;
25141 +
25142 +       ret = qman_retire_fq(fq, &flags);
25143 +       if (ret < 0) {
25144 +               dev_err(qidev, "qman_retire_fq failed: %d\n", ret);
25145 +               return ret;
25146 +       }
25147 +
25148 +       if (!ret)
25149 +               goto empty_fq;
25150 +
25151 +       /* Async FQ retirement condition */
25152 +       if (ret == 1) {
25153 +               /* Retry till FQ gets in retired state */
25154 +               do {
25155 +                       msleep(20);
25156 +               } while (fq->state != qman_fq_state_retired);
25157 +
25158 +               WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS);
25159 +               WARN_ON(fq->flags & QMAN_FQ_STATE_ORL);
25160 +       }
25161 +
25162 +empty_fq:
25163 +       if (fq->flags & QMAN_FQ_STATE_NE) {
25164 +               ret = empty_retired_fq(qidev, fq);
25165 +               if (ret) {
25166 +                       dev_err(qidev, "empty_retired_fq fail for FQ: %u\n",
25167 +                               fq->fqid);
25168 +                       return ret;
25169 +               }
25170 +       }
25171 +
25172 +       ret = qman_oos_fq(fq);
25173 +       if (ret)
25174 +               dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
25175 +
25176 +       qman_destroy_fq(fq, 0);
25177 +       kfree(fq);
25178 +
25179 +       return ret;
25180 +}
25181 +
25182 +static int empty_caam_fq(struct qman_fq *fq)
25183 +{
25184 +       int ret;
25185 +       struct qm_mcr_queryfq_np np;
25186 +
25187 +       /* Wait till the older CAAM FQ get empty */
25188 +       do {
25189 +               ret = qman_query_fq_np(fq, &np);
25190 +               if (ret)
25191 +                       return ret;
25192 +
25193 +               if (!np.frm_cnt)
25194 +                       break;
25195 +
25196 +               msleep(20);
25197 +       } while (1);
25198 +
25199 +       /*
25200 +        * Give extra time for pending jobs from this FQ in holding tanks
25201 +        * to get processed
25202 +        */
25203 +       msleep(20);
25204 +       return 0;
25205 +}
25206 +
25207 +int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
25208 +{
25209 +       int ret;
25210 +       u32 num_words;
25211 +       struct qman_fq *new_fq, *old_fq;
25212 +       struct device *qidev = drv_ctx->qidev;
25213 +
25214 +       num_words = desc_len(sh_desc);
25215 +       if (num_words > MAX_SDLEN) {
25216 +               dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
25217 +               return -EINVAL;
25218 +       }
25219 +
25220 +       /* Note down older req FQ */
25221 +       old_fq = drv_ctx->req_fq;
25222 +
25223 +       /* Create a new req FQ in parked state */
25224 +       new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
25225 +                                   drv_ctx->context_a, 0);
25226 +       if (unlikely(IS_ERR_OR_NULL(new_fq))) {
25227 +               dev_err(qidev, "FQ allocation for shdesc update failed\n");
25228 +               return PTR_ERR(new_fq);
25229 +       }
25230 +
25231 +       /* Hook up new FQ to context so that new requests keep queuing */
25232 +       drv_ctx->req_fq = new_fq;
25233 +
25234 +       /* Empty and remove the older FQ */
25235 +       ret = empty_caam_fq(old_fq);
25236 +       if (ret) {
25237 +               dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
25238 +
25239 +               /* We can revert to older FQ */
25240 +               drv_ctx->req_fq = old_fq;
25241 +
25242 +               if (kill_fq(qidev, new_fq))
25243 +                       dev_warn(qidev, "New CAAM FQ kill failed\n");
25244 +
25245 +               return ret;
25246 +       }
25247 +
25248 +       /*
25249 +        * Re-initialise pre-header. Set RSLS and SDLEN.
25250 +        * Update the shared descriptor for driver context.
25251 +        */
25252 +       drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
25253 +                                          num_words);
25254 +       memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
25255 +       dma_sync_single_for_device(qidev, drv_ctx->context_a,
25256 +                                  sizeof(drv_ctx->sh_desc) +
25257 +                                  sizeof(drv_ctx->prehdr),
25258 +                                  DMA_BIDIRECTIONAL);
25259 +
25260 +       /* Put the new FQ in scheduled state */
25261 +       ret = qman_schedule_fq(new_fq);
25262 +       if (ret) {
25263 +               dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);
25264 +
25265 +               /*
25266 +                * We can kill new FQ and revert to old FQ.
25267 +                * Since the desc is already modified, it is success case
25268 +                */
25269 +
25270 +               drv_ctx->req_fq = old_fq;
25271 +
25272 +               if (kill_fq(qidev, new_fq))
25273 +                       dev_warn(qidev, "New CAAM FQ kill failed\n");
25274 +       } else if (kill_fq(qidev, old_fq)) {
25275 +               dev_warn(qidev, "Old CAAM FQ kill failed\n");
25276 +       }
25277 +
25278 +       return 0;
25279 +}
25280 +EXPORT_SYMBOL(caam_drv_ctx_update);
25281 +
25282 +struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
25283 +                                      int *cpu,
25284 +                                      u32 *sh_desc)
25285 +{
25286 +       size_t size;
25287 +       u32 num_words;
25288 +       dma_addr_t hwdesc;
25289 +       struct caam_drv_ctx *drv_ctx;
25290 +       const cpumask_t *cpus = qman_affine_cpus();
25291 +
25292 +       num_words = desc_len(sh_desc);
25293 +       if (num_words > MAX_SDLEN) {
25294 +               dev_err(qidev, "Invalid descriptor len: %d words\n",
25295 +                       num_words);
25296 +               return ERR_PTR(-EINVAL);
25297 +       }
25298 +
25299 +       drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC);
25300 +       if (!drv_ctx)
25301 +               return ERR_PTR(-ENOMEM);
25302 +
25303 +       /*
25304 +        * Initialise pre-header - set RSLS and SDLEN - and shared descriptor
25305 +        * and dma-map them.
25306 +        */
25307 +       drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
25308 +                                          num_words);
25309 +       memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
25310 +       size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
25311 +       hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
25312 +                               DMA_BIDIRECTIONAL);
25313 +       if (dma_mapping_error(qidev, hwdesc)) {
25314 +               dev_err(qidev, "DMA map error for preheader + shdesc\n");
25315 +               kfree(drv_ctx);
25316 +               return ERR_PTR(-ENOMEM);
25317 +       }
25318 +       drv_ctx->context_a = hwdesc;
25319 +
25320 +       /* If given CPU does not own the portal, choose another one that does */
25321 +       if (!cpumask_test_cpu(*cpu, cpus)) {
25322 +               int *pcpu = &get_cpu_var(last_cpu);
25323 +
25324 +               *pcpu = cpumask_next(*pcpu, cpus);
25325 +               if (*pcpu >= nr_cpu_ids)
25326 +                       *pcpu = cpumask_first(cpus);
25327 +               *cpu = *pcpu;
25328 +
25329 +               put_cpu_var(last_cpu);
25330 +       }
25331 +       drv_ctx->cpu = *cpu;
25332 +
25333 +       /* Find response FQ hooked with this CPU */
25334 +       drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
25335 +
25336 +       /* Attach request FQ */
25337 +       drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
25338 +                                            QMAN_INITFQ_FLAG_SCHED);
25339 +       if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) {
25340 +               dev_err(qidev, "create_caam_req_fq failed\n");
25341 +               dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
25342 +               kfree(drv_ctx);
25343 +               return ERR_PTR(-ENOMEM);
25344 +       }
25345 +
25346 +       drv_ctx->qidev = qidev;
25347 +       return drv_ctx;
25348 +}
25349 +EXPORT_SYMBOL(caam_drv_ctx_init);
25350 +
25351 +void *qi_cache_alloc(gfp_t flags)
25352 +{
25353 +       return kmem_cache_alloc(qi_cache, flags);
25354 +}
25355 +EXPORT_SYMBOL(qi_cache_alloc);
25356 +
25357 +void qi_cache_free(void *obj)
25358 +{
25359 +       kmem_cache_free(qi_cache, obj);
25360 +}
25361 +EXPORT_SYMBOL(qi_cache_free);
25362 +
25363 +static int caam_qi_poll(struct napi_struct *napi, int budget)
25364 +{
25365 +       struct caam_napi *np = container_of(napi, struct caam_napi, irqtask);
25366 +
25367 +       int cleaned = qman_p_poll_dqrr(np->p, budget);
25368 +
25369 +       if (cleaned < budget) {
25370 +               napi_complete(napi);
25371 +               qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
25372 +       }
25373 +
25374 +       return cleaned;
25375 +}
25376 +
25377 +void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
25378 +{
25379 +       if (IS_ERR_OR_NULL(drv_ctx))
25380 +               return;
25381 +
25382 +       /* Remove request FQ */
25383 +       if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq))
25384 +               dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n");
25385 +
25386 +       dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a,
25387 +                        sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr),
25388 +                        DMA_BIDIRECTIONAL);
25389 +       kfree(drv_ctx);
25390 +}
25391 +EXPORT_SYMBOL(caam_drv_ctx_rel);
25392 +
25393 +int caam_qi_shutdown(struct device *qidev)
25394 +{
25395 +       int i, ret;
25396 +       struct caam_qi_priv *priv = dev_get_drvdata(qidev);
25397 +       const cpumask_t *cpus = qman_affine_cpus();
25398 +       struct cpumask old_cpumask = current->cpus_allowed;
25399 +
25400 +       for_each_cpu(i, cpus) {
25401 +               struct napi_struct *irqtask;
25402 +
25403 +               irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask;
25404 +               napi_disable(irqtask);
25405 +               netif_napi_del(irqtask);
25406 +
25407 +               if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
25408 +                       dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
25409 +       }
25410 +
25411 +       /*
25412 +        * QMan driver requires CGRs to be deleted from same CPU from where they
25413 +        * were instantiated. Hence we get the module removal execute from the
25414 +        * same CPU from where it was originally inserted.
25415 +        */
25416 +       set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
25417 +
25418 +       ret = qman_delete_cgr(&priv->cgr);
25419 +       if (ret)
25420 +               dev_err(qidev, "Deletion of CGR failed: %d\n", ret);
25421 +       else
25422 +               qman_release_cgrid(priv->cgr.cgrid);
25423 +
25424 +       kmem_cache_destroy(qi_cache);
25425 +
25426 +       /* Now that we're done with the CGRs, restore the cpus allowed mask */
25427 +       set_cpus_allowed_ptr(current, &old_cpumask);
25428 +
25429 +       platform_device_unregister(priv->qi_pdev);
25430 +       return ret;
25431 +}
25432 +
25433 +static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
25434 +{
25435 +       caam_congested = congested;
25436 +
25437 +       if (congested) {
25438 +#ifdef CONFIG_DEBUG_FS
25439 +               times_congested++;
25440 +#endif
25441 +               pr_debug_ratelimited("CAAM entered congestion\n");
25442 +
25443 +       } else {
25444 +               pr_debug_ratelimited("CAAM exited congestion\n");
25445 +       }
25446 +}
25447 +
25448 +static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
25449 +{
25450 +       /*
25451 +        * In case of threaded ISR, for RT kernels in_irq() does not return
25452 +        * appropriate value, so use in_serving_softirq to distinguish between
25453 +        * softirq and irq contexts.
25454 +        */
25455 +       if (unlikely(in_irq() || !in_serving_softirq())) {
25456 +               /* Disable QMan IRQ source and invoke NAPI */
25457 +               qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
25458 +               np->p = p;
25459 +               napi_schedule(&np->irqtask);
25460 +               return 1;
25461 +       }
25462 +       return 0;
25463 +}
25464 +
25465 +static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
25466 +                                                   struct qman_fq *rsp_fq,
25467 +                                                   const struct qm_dqrr_entry *dqrr)
25468 +{
25469 +       struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
25470 +       struct caam_drv_req *drv_req;
25471 +       const struct qm_fd *fd;
25472 +       struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
25473 +
25474 +       if (caam_qi_napi_schedule(p, caam_napi))
25475 +               return qman_cb_dqrr_stop;
25476 +
25477 +       fd = &dqrr->fd;
25478 +       if (unlikely(fd->status))
25479 +               dev_err(qidev, "Error: %#x in CAAM response FD\n", fd->status);
25480 +
25481 +       if (unlikely(fd->format != fd->format)) {
25482 +               dev_err(qidev, "Non-compound FD from CAAM\n");
25483 +               return qman_cb_dqrr_consume;
25484 +       }
25485 +
25486 +       drv_req = (struct caam_drv_req *)phys_to_virt(fd->addr);
25487 +       if (unlikely(!drv_req)) {
25488 +               dev_err(qidev,
25489 +                       "Can't find original request for caam response\n");
25490 +               return qman_cb_dqrr_consume;
25491 +       }
25492 +
25493 +       dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
25494 +                        sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
25495 +
25496 +       drv_req->cbk(drv_req, fd->status);
25497 +       return qman_cb_dqrr_consume;
25498 +}
25499 +
25500 +static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
25501 +{
25502 +       struct qm_mcc_initfq opts;
25503 +       struct qman_fq *fq;
25504 +       int ret;
25505 +
25506 +       fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA);
25507 +       if (!fq)
25508 +               return -ENOMEM;
25509 +
25510 +       fq->cb.dqrr = caam_rsp_fq_dqrr_cb;
25511 +
25512 +       ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE |
25513 +                            QMAN_FQ_FLAG_DYNAMIC_FQID, fq);
25514 +       if (ret) {
25515 +               dev_err(qidev, "Rsp FQ create failed\n");
25516 +               kfree(fq);
25517 +               return -ENODEV;
25518 +       }
25519 +
25520 +       opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
25521 +               QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
25522 +               QM_INITFQ_WE_CGID;
25523 +       opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING | QM_FQCTRL_CPCSTASH |
25524 +                          QM_FQCTRL_CGE;
25525 +       opts.fqd.dest.channel = qman_affine_channel(cpu);
25526 +       opts.fqd.dest.wq = 3;
25527 +       opts.fqd.cgid = qipriv.cgr.cgrid;
25528 +       opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
25529 +                                               QM_STASHING_EXCL_DATA;
25530 +       opts.fqd.context_a.stashing.data_cl = 1;
25531 +       opts.fqd.context_a.stashing.context_cl = 1;
25532 +
25533 +       ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
25534 +       if (ret) {
25535 +               dev_err(qidev, "Rsp FQ init failed\n");
25536 +               kfree(fq);
25537 +               return -ENODEV;
25538 +       }
25539 +
25540 +       per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
25541 +
25542 +       dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
25543 +       return 0;
25544 +}
25545 +
25546 +static int init_cgr(struct device *qidev)
25547 +{
25548 +       int ret;
25549 +       struct qm_mcc_initcgr opts;
25550 +       const u64 cpus = *(u64 *)qman_affine_cpus();
25551 +       const int num_cpus = hweight64(cpus);
25552 +       const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;
25553 +
25554 +       ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
25555 +       if (ret) {
25556 +               dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret);
25557 +               return ret;
25558 +       }
25559 +
25560 +       qipriv.cgr.cb = cgr_cb;
25561 +       memset(&opts, 0, sizeof(opts));
25562 +       opts.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | QM_CGR_WE_MODE;
25563 +       opts.cgr.cscn_en = QM_CGR_EN;
25564 +       opts.cgr.mode = QMAN_CGR_MODE_FRAME;
25565 +       qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
25566 +
25567 +       ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts);
25568 +       if (ret) {
25569 +               dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret,
25570 +                       qipriv.cgr.cgrid);
25571 +               return ret;
25572 +       }
25573 +
25574 +       dev_dbg(qidev, "Congestion threshold set to %llu\n", val);
25575 +       return 0;
25576 +}
25577 +
25578 +static int alloc_rsp_fqs(struct device *qidev)
25579 +{
25580 +       int ret, i;
25581 +       const cpumask_t *cpus = qman_affine_cpus();
25582 +
25583 +       /*Now create response FQs*/
25584 +       for_each_cpu(i, cpus) {
25585 +               ret = alloc_rsp_fq_cpu(qidev, i);
25586 +               if (ret) {
25587 +                       dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i);
25588 +                       return ret;
25589 +               }
25590 +       }
25591 +
25592 +       return 0;
25593 +}
25594 +
25595 +static void free_rsp_fqs(void)
25596 +{
25597 +       int i;
25598 +       const cpumask_t *cpus = qman_affine_cpus();
25599 +
25600 +       for_each_cpu(i, cpus)
25601 +               kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
25602 +}
25603 +
25604 +int caam_qi_init(struct platform_device *caam_pdev)
25605 +{
25606 +       int err, i;
25607 +       struct platform_device *qi_pdev;
25608 +       struct device *ctrldev = &caam_pdev->dev, *qidev;
25609 +       struct caam_drv_private *ctrlpriv;
25610 +       const cpumask_t *cpus = qman_affine_cpus();
25611 +       struct cpumask old_cpumask = current->cpus_allowed;
25612 +       static struct platform_device_info qi_pdev_info = {
25613 +               .name = "caam_qi",
25614 +               .id = PLATFORM_DEVID_NONE
25615 +       };
25616 +
25617 +       /*
25618 +        * QMAN requires CGRs to be removed from same CPU+portal from where it
25619 +        * was originally allocated. Hence we need to note down the
25620 +        * initialisation CPU and use the same CPU for module exit.
25621 +        * We select the first CPU to from the list of portal owning CPUs.
25622 +        * Then we pin module init to this CPU.
25623 +        */
25624 +       mod_init_cpu = cpumask_first(cpus);
25625 +       set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
25626 +
25627 +       qi_pdev_info.parent = ctrldev;
25628 +       qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
25629 +       qi_pdev = platform_device_register_full(&qi_pdev_info);
25630 +       if (IS_ERR(qi_pdev))
25631 +               return PTR_ERR(qi_pdev);
25632 +       arch_setup_dma_ops(&qi_pdev->dev, 0, 0, NULL, true);
25633 +
25634 +       ctrlpriv = dev_get_drvdata(ctrldev);
25635 +       qidev = &qi_pdev->dev;
25636 +
25637 +       qipriv.qi_pdev = qi_pdev;
25638 +       dev_set_drvdata(qidev, &qipriv);
25639 +
25640 +       /* Initialize the congestion detection */
25641 +       err = init_cgr(qidev);
25642 +       if (err) {
25643 +               dev_err(qidev, "CGR initialization failed: %d\n", err);
25644 +               platform_device_unregister(qi_pdev);
25645 +               return err;
25646 +       }
25647 +
25648 +       /* Initialise response FQs */
25649 +       err = alloc_rsp_fqs(qidev);
25650 +       if (err) {
25651 +               dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
25652 +               free_rsp_fqs();
25653 +               platform_device_unregister(qi_pdev);
25654 +               return err;
25655 +       }
25656 +
25657 +       /*
25658 +        * Enable the NAPI contexts on each of the core which has an affine
25659 +        * portal.
25660 +        */
25661 +       for_each_cpu(i, cpus) {
25662 +               struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i);
25663 +               struct caam_napi *caam_napi = &priv->caam_napi;
25664 +               struct napi_struct *irqtask = &caam_napi->irqtask;
25665 +               struct net_device *net_dev = &priv->net_dev;
25666 +
25667 +               net_dev->dev = *qidev;
25668 +               INIT_LIST_HEAD(&net_dev->napi_list);
25669 +
25670 +               netif_napi_add(net_dev, irqtask, caam_qi_poll,
25671 +                              CAAM_NAPI_WEIGHT);
25672 +
25673 +               napi_enable(irqtask);
25674 +       }
25675 +
25676 +       /* Hook up QI device to parent controlling caam device */
25677 +       ctrlpriv->qidev = qidev;
25678 +
25679 +       qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
25680 +                                    SLAB_CACHE_DMA, NULL);
25681 +       if (!qi_cache) {
25682 +               dev_err(qidev, "Can't allocate CAAM cache\n");
25683 +               free_rsp_fqs();
25684 +               platform_device_unregister(qi_pdev);
25685 +               return -ENOMEM;
25686 +       }
25687 +
25688 +       /* Done with the CGRs; restore the cpus allowed mask */
25689 +       set_cpus_allowed_ptr(current, &old_cpumask);
25690 +#ifdef CONFIG_DEBUG_FS
25691 +       debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
25692 +                           &times_congested, &caam_fops_u64_ro);
25693 +#endif
25694 +       dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
25695 +       return 0;
25696 +}
25697 diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
25698 new file mode 100644
25699 index 00000000..0c2e68b3
25700 --- /dev/null
25701 +++ b/drivers/crypto/caam/qi.h
25702 @@ -0,0 +1,204 @@
25703 +/*
25704 + * Public definitions for the CAAM/QI (Queue Interface) backend.
25705 + *
25706 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
25707 + * Copyright 2016-2017 NXP
25708 + */
25709 +
25710 +#ifndef __QI_H__
25711 +#define __QI_H__
25712 +
25713 +#include <linux/fsl_qman.h>
25714 +#include "compat.h"
25715 +#include "desc.h"
25716 +#include "desc_constr.h"
25717 +
25718 +/*
25719 + * CAAM hardware constructs a job descriptor which points to a shared descriptor
25720 + * (as pointed by context_a of to-CAAM FQ).
25721 + * When the job descriptor is executed by DECO, the whole job descriptor
25722 + * together with shared descriptor gets loaded in DECO buffer, which is
25723 + * 64 words (each 32-bit) long.
25724 + *
25725 + * The job descriptor constructed by CAAM hardware has the following layout:
25726 + *
25727 + *     HEADER          (1 word)
25728 + *     Shdesc ptr      (1 or 2 words)
25729 + *     SEQ_OUT_PTR     (1 word)
25730 + *     Out ptr         (1 or 2 words)
25731 + *     Out length      (1 word)
25732 + *     SEQ_IN_PTR      (1 word)
25733 + *     In ptr          (1 or 2 words)
25734 + *     In length       (1 word)
25735 + *
25736 + * The shdesc ptr is used to fetch shared descriptor contents into DECO buffer.
25737 + *
25738 + * Apart from shdesc contents, the total number of words that get loaded in DECO
25739 + * buffer are '8' or '11'. The remaining words in DECO buffer can be used for
25740 + * storing shared descriptor.
25741 + */
25742 +#define MAX_SDLEN      ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
25743 +
25744 +/* Length of a single buffer in the QI driver memory cache */
25745 +#define CAAM_QI_MEMCACHE_SIZE  768
25746 +
25747 +extern bool caam_congested __read_mostly;
25748 +
25749 +/*
25750 + * This is the request structure the driver application should fill while
25751 + * submitting a job to driver.
25752 + */
25753 +struct caam_drv_req;
25754 +
25755 +/*
25756 + * caam_qi_cbk - application's callback function invoked by the driver when the
25757 + *               request has been successfully processed.
25758 + * @drv_req: original request that was submitted
25759 + * @status: completion status of request (0 - success, non-zero - error code)
25760 + */
25761 +typedef void (*caam_qi_cbk)(struct caam_drv_req *drv_req, u32 status);
25762 +
25763 +enum optype {
25764 +       ENCRYPT,
25765 +       DECRYPT,
25766 +       GIVENCRYPT,
25767 +       NUM_OP
25768 +};
25769 +
25770 +/**
25771 + * caam_drv_ctx - CAAM/QI backend driver context
25772 + *
25773 + * The jobs are processed by the driver against a driver context.
25774 + * With every cryptographic context, a driver context is attached.
25775 + * The driver context contains data for private use by driver.
25776 + * For the applications, this is an opaque structure.
25777 + *
25778 + * @prehdr: preheader placed before shrd desc
25779 + * @sh_desc: shared descriptor
25780 + * @context_a: shared descriptor dma address
25781 + * @req_fq: to-CAAM request frame queue
25782 + * @rsp_fq: from-CAAM response frame queue
25783 + * @cpu: cpu on which to receive CAAM response
25784 + * @op_type: operation type
25785 + * @qidev: device pointer for CAAM/QI backend
25786 + */
25787 +struct caam_drv_ctx {
25788 +       u32 prehdr[2];
25789 +       u32 sh_desc[MAX_SDLEN];
25790 +       dma_addr_t context_a;
25791 +       struct qman_fq *req_fq;
25792 +       struct qman_fq *rsp_fq;
25793 +       int cpu;
25794 +       enum optype op_type;
25795 +       struct device *qidev;
25796 +} ____cacheline_aligned;
25797 +
25798 +/**
25799 + * caam_drv_req - The request structure the driver application should fill while
25800 + *                submitting a job to driver.
25801 + * @fd_sgt: QMan S/G pointing to output (fd_sgt[0]) and input (fd_sgt[1])
25802 + *          buffers.
25803 + * @cbk: callback function to invoke when job is completed
25804 + * @app_ctx: arbitrary context attached with request by the application
25805 + *
25806 + * The fields mentioned below should not be used by application.
25807 + * These are for private use by driver.
25808 + *
25809 + * @hdr__: linked list header to maintain list of outstanding requests to CAAM
25810 + * @hwaddr: DMA address for the S/G table.
25811 + */
25812 +struct caam_drv_req {
25813 +       struct qm_sg_entry fd_sgt[2];
25814 +       struct caam_drv_ctx *drv_ctx;
25815 +       caam_qi_cbk cbk;
25816 +       void *app_ctx;
25817 +} ____cacheline_aligned;
25818 +
25819 +/**
25820 + * caam_drv_ctx_init - Initialise a CAAM/QI driver context
25821 + *
25822 + * A CAAM/QI driver context must be attached with each cryptographic context.
25823 + * This function allocates memory for CAAM/QI context and returns a handle to
25824 + * the application. This handle must be submitted along with each enqueue
25825 + * request to the driver by the application.
25826 + *
25827 + * @cpu: CPU where the application prefers to the driver to receive CAAM
25828 + *       responses. The request completion callback would be issued from this
25829 + *       CPU.
25830 + * @sh_desc: shared descriptor pointer to be attached with CAAM/QI driver
25831 + *           context.
25832 + *
25833 + * Returns a driver context on success or negative error code on failure.
25834 + */
25835 +struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, int *cpu,
25836 +                                      u32 *sh_desc);
25837 +
25838 +/**
25839 + * caam_qi_enqueue - Submit a request to QI backend driver.
25840 + *
25841 + * The request structure must be properly filled as described above.
25842 + *
25843 + * @qidev: device pointer for QI backend
25844 + * @req: CAAM QI request structure
25845 + *
25846 + * Returns 0 on success or negative error code on failure.
25847 + */
25848 +int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req);
25849 +
25850 +/**
25851 + * caam_drv_ctx_busy - Check if there are too many jobs pending with CAAM
25852 + *                    or too many CAAM responses are pending to be processed.
25853 + * @drv_ctx: driver context for which job is to be submitted
25854 + *
25855 + * Returns caam congestion status 'true/false'
25856 + */
25857 +bool caam_drv_ctx_busy(struct caam_drv_ctx *drv_ctx);
25858 +
25859 +/**
25860 + * caam_drv_ctx_update - Update QI driver context
25861 + *
25862 + * Invoked when shared descriptor is required to be change in driver context.
25863 + *
25864 + * @drv_ctx: driver context to be updated
25865 + * @sh_desc: new shared descriptor pointer to be updated in QI driver context
25866 + *
25867 + * Returns 0 on success or negative error code on failure.
25868 + */
25869 +int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
25870 +
25871 +/**
25872 + * caam_drv_ctx_rel - Release a QI driver context
25873 + * @drv_ctx: context to be released
25874 + */
25875 +void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
25876 +
25877 +int caam_qi_init(struct platform_device *pdev);
25878 +int caam_qi_shutdown(struct device *dev);
25879 +
25880 +/**
25881 + * qi_cache_alloc - Allocate buffers from CAAM-QI cache
25882 + *
25883 + * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) needs data which has
25884 + * to be allocated on the hotpath. Instead of using malloc, one can use the
25885 + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
25886 + * will have a size of 256B, which is sufficient for hosting 16 SG entries.
25887 + *
25888 + * @flags: flags that would be used for the equivalent malloc(..) call
25889 + *
25890 + * Returns a pointer to a retrieved buffer on success or NULL on failure.
25891 + */
25892 +void *qi_cache_alloc(gfp_t flags);
25893 +
25894 +/**
25895 + * qi_cache_free - Frees buffers allocated from CAAM-QI cache
25896 + *
25897 + * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) no longer needs
25898 + * the buffer previously allocated by a qi_cache_alloc call.
25899 + * No checking is being done, the call is a passthrough call to
25900 + * kmem_cache_free(...)
25901 + *
25902 + * @obj: object previously allocated using qi_cache_alloc()
25903 + */
25904 +void qi_cache_free(void *obj);
25905 +
25906 +#endif /* __QI_H__ */
25907 diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
25908 index 84d2f838..74eb8c6c 100644
25909 --- a/drivers/crypto/caam/regs.h
25910 +++ b/drivers/crypto/caam/regs.h
25911 @@ -2,6 +2,7 @@
25912   * CAAM hardware register-level view
25913   *
25914   * Copyright 2008-2011 Freescale Semiconductor, Inc.
25915 + * Copyright 2017 NXP
25916   */
25917  
25918  #ifndef REGS_H
25919 @@ -67,6 +68,7 @@
25920   */
25921  
25922  extern bool caam_little_end;
25923 +extern bool caam_imx;
25924  
25925  #define caam_to_cpu(len)                               \
25926  static inline u##len caam##len ## _to_cpu(u##len val)  \
25927 @@ -154,13 +156,10 @@ static inline u64 rd_reg64(void __iomem *reg)
25928  #else /* CONFIG_64BIT */
25929  static inline void wr_reg64(void __iomem *reg, u64 data)
25930  {
25931 -#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
25932 -       if (caam_little_end) {
25933 +       if (!caam_imx && caam_little_end) {
25934                 wr_reg32((u32 __iomem *)(reg) + 1, data >> 32);
25935                 wr_reg32((u32 __iomem *)(reg), data);
25936 -       } else
25937 -#endif
25938 -       {
25939 +       } else {
25940                 wr_reg32((u32 __iomem *)(reg), data >> 32);
25941                 wr_reg32((u32 __iomem *)(reg) + 1, data);
25942         }
25943 @@ -168,41 +167,40 @@ static inline void wr_reg64(void __iomem *reg, u64 data)
25944  
25945  static inline u64 rd_reg64(void __iomem *reg)
25946  {
25947 -#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
25948 -       if (caam_little_end)
25949 +       if (!caam_imx && caam_little_end)
25950                 return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 |
25951                         (u64)rd_reg32((u32 __iomem *)(reg)));
25952 -       else
25953 -#endif
25954 -               return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
25955 -                       (u64)rd_reg32((u32 __iomem *)(reg) + 1));
25956 +
25957 +       return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
25958 +               (u64)rd_reg32((u32 __iomem *)(reg) + 1));
25959  }
25960  #endif /* CONFIG_64BIT  */
25961  
25962 +static inline u64 cpu_to_caam_dma64(dma_addr_t value)
25963 +{
25964 +       if (caam_imx)
25965 +               return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) |
25966 +                        (u64)cpu_to_caam32(upper_32_bits(value)));
25967 +
25968 +       return cpu_to_caam64(value);
25969 +}
25970 +
25971 +static inline u64 caam_dma64_to_cpu(u64 value)
25972 +{
25973 +       if (caam_imx)
25974 +               return (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) |
25975 +                        (u64)caam32_to_cpu(upper_32_bits(value)));
25976 +
25977 +       return caam64_to_cpu(value);
25978 +}
25979 +
25980  #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
25981 -#ifdef CONFIG_SOC_IMX7D
25982 -#define cpu_to_caam_dma(value) \
25983 -               (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
25984 -                 (u64)cpu_to_caam32(upper_32_bits(value)))
25985 -#define caam_dma_to_cpu(value) \
25986 -               (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | \
25987 -                 (u64)caam32_to_cpu(upper_32_bits(value)))
25988 -#else
25989 -#define cpu_to_caam_dma(value) cpu_to_caam64(value)
25990 -#define caam_dma_to_cpu(value) caam64_to_cpu(value)
25991 -#endif /* CONFIG_SOC_IMX7D */
25992 +#define cpu_to_caam_dma(value) cpu_to_caam_dma64(value)
25993 +#define caam_dma_to_cpu(value) caam_dma64_to_cpu(value)
25994  #else
25995  #define cpu_to_caam_dma(value) cpu_to_caam32(value)
25996  #define caam_dma_to_cpu(value) caam32_to_cpu(value)
25997 -#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT  */
25998 -
25999 -#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
26000 -#define cpu_to_caam_dma64(value) \
26001 -               (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
26002 -                (u64)cpu_to_caam32(upper_32_bits(value)))
26003 -#else
26004 -#define cpu_to_caam_dma64(value) cpu_to_caam64(value)
26005 -#endif
26006 +#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
26007  
26008  /*
26009   * jr_outentry
26010 @@ -293,6 +291,7 @@ struct caam_perfmon {
26011         u32 cha_rev_ls;         /* CRNR - CHA Rev No. Least significant half*/
26012  #define CTPR_MS_QI_SHIFT       25
26013  #define CTPR_MS_QI_MASK                (0x1ull << CTPR_MS_QI_SHIFT)
26014 +#define CTPR_MS_DPAA2          BIT(13)
26015  #define CTPR_MS_VIRT_EN_INCL   0x00000001
26016  #define CTPR_MS_VIRT_EN_POR    0x00000002
26017  #define CTPR_MS_PG_SZ_MASK     0x10
26018 @@ -628,6 +627,8 @@ struct caam_job_ring {
26019  #define JRSTA_DECOERR_INVSIGN       0x86
26020  #define JRSTA_DECOERR_DSASIGN       0x87
26021  
26022 +#define JRSTA_QIERR_ERROR_MASK      0x00ff
26023 +
26024  #define JRSTA_CCBERR_JUMP           0x08000000
26025  #define JRSTA_CCBERR_INDEX_MASK     0xff00
26026  #define JRSTA_CCBERR_INDEX_SHIFT    8
26027 diff --git a/drivers/crypto/caam/sg_sw_qm.h b/drivers/crypto/caam/sg_sw_qm.h
26028 new file mode 100644
26029 index 00000000..3b3cabc4
26030 --- /dev/null
26031 +++ b/drivers/crypto/caam/sg_sw_qm.h
26032 @@ -0,0 +1,126 @@
26033 +/*
26034 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
26035 + * Copyright 2016-2017 NXP
26036 + *
26037 + * Redistribution and use in source and binary forms, with or without
26038 + * modification, are permitted provided that the following conditions are met:
26039 + *     * Redistributions of source code must retain the above copyright
26040 + *       notice, this list of conditions and the following disclaimer.
26041 + *     * Redistributions in binary form must reproduce the above copyright
26042 + *       notice, this list of conditions and the following disclaimer in the
26043 + *       documentation and/or other materials provided with the distribution.
26044 + *     * Neither the name of Freescale Semiconductor nor the
26045 + *       names of its contributors may be used to endorse or promote products
26046 + *       derived from this software without specific prior written permission.
26047 + *
26048 + *
26049 + * ALTERNATIVELY, this software may be distributed under the terms of the
26050 + * GNU General Public License ("GPL") as published by the Free Software
26051 + * Foundation, either version 2 of that License or (at your option) any
26052 + * later version.
26053 + *
26054 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
26055 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26056 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26057 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
26058 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26059 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26060 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26061 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26062 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26063 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26064 + */
26065 +
26066 +#ifndef __SG_SW_QM_H
26067 +#define __SG_SW_QM_H
26068 +
26069 +#include <linux/fsl_qman.h>
26070 +#include "regs.h"
26071 +
26072 +static inline void cpu_to_hw_sg(struct qm_sg_entry *qm_sg_ptr)
26073 +{
26074 +       dma_addr_t addr = qm_sg_ptr->opaque;
26075 +
26076 +       qm_sg_ptr->opaque = cpu_to_caam64(addr);
26077 +       qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
26078 +}
26079 +
26080 +static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
26081 +                                 u32 len, u16 offset)
26082 +{
26083 +       qm_sg_ptr->addr = dma;
26084 +       qm_sg_ptr->length = len;
26085 +       qm_sg_ptr->__reserved2 = 0;
26086 +       qm_sg_ptr->bpid = 0;
26087 +       qm_sg_ptr->__reserved3 = 0;
26088 +       qm_sg_ptr->offset = offset & QM_SG_OFFSET_MASK;
26089 +
26090 +       cpu_to_hw_sg(qm_sg_ptr);
26091 +}
26092 +
26093 +static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
26094 +                                   dma_addr_t dma, u32 len, u16 offset)
26095 +{
26096 +       qm_sg_ptr->extension = 0;
26097 +       qm_sg_ptr->final = 0;
26098 +       __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
26099 +}
26100 +
26101 +static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
26102 +                                        dma_addr_t dma, u32 len, u16 offset)
26103 +{
26104 +       qm_sg_ptr->extension = 0;
26105 +       qm_sg_ptr->final = 1;
26106 +       __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
26107 +}
26108 +
26109 +static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
26110 +                                       dma_addr_t dma, u32 len, u16 offset)
26111 +{
26112 +       qm_sg_ptr->extension = 1;
26113 +       qm_sg_ptr->final = 0;
26114 +       __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
26115 +}
26116 +
26117 +static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
26118 +                                            dma_addr_t dma, u32 len,
26119 +                                            u16 offset)
26120 +{
26121 +       qm_sg_ptr->extension = 1;
26122 +       qm_sg_ptr->final = 1;
26123 +       __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
26124 +}
26125 +
26126 +/*
26127 + * convert scatterlist to h/w link table format
26128 + * but does not have final bit; instead, returns last entry
26129 + */
26130 +static inline struct qm_sg_entry *
26131 +sg_to_qm_sg(struct scatterlist *sg, int sg_count,
26132 +           struct qm_sg_entry *qm_sg_ptr, u16 offset)
26133 +{
26134 +       while (sg_count && sg) {
26135 +               dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
26136 +                                sg_dma_len(sg), offset);
26137 +               qm_sg_ptr++;
26138 +               sg = sg_next(sg);
26139 +               sg_count--;
26140 +       }
26141 +       return qm_sg_ptr - 1;
26142 +}
26143 +
26144 +/*
26145 + * convert scatterlist to h/w link table format
26146 + * scatterlist must have been previously dma mapped
26147 + */
26148 +static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
26149 +                                   struct qm_sg_entry *qm_sg_ptr, u16 offset)
26150 +{
26151 +       qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
26152 +
26153 +       qm_sg_ptr->sgt_efl = caam32_to_cpu(qm_sg_ptr->sgt_efl);
26154 +       qm_sg_ptr->final = 1;
26155 +       qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
26156 +}
26157 +
26158 +#endif /* __SG_SW_QM_H */
26159 diff --git a/drivers/crypto/caam/sg_sw_qm2.h b/drivers/crypto/caam/sg_sw_qm2.h
26160 new file mode 100644
26161 index 00000000..31b44075
26162 --- /dev/null
26163 +++ b/drivers/crypto/caam/sg_sw_qm2.h
26164 @@ -0,0 +1,81 @@
26165 +/*
26166 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
26167 + * Copyright 2017 NXP
26168 + *
26169 + * Redistribution and use in source and binary forms, with or without
26170 + * modification, are permitted provided that the following conditions are met:
26171 + *     * Redistributions of source code must retain the above copyright
26172 + *      notice, this list of conditions and the following disclaimer.
26173 + *     * Redistributions in binary form must reproduce the above copyright
26174 + *      notice, this list of conditions and the following disclaimer in the
26175 + *      documentation and/or other materials provided with the distribution.
26176 + *     * Neither the names of the above-listed copyright holders nor the
26177 + *      names of any contributors may be used to endorse or promote products
26178 + *      derived from this software without specific prior written permission.
26179 + *
26180 + *
26181 + * ALTERNATIVELY, this software may be distributed under the terms of the
26182 + * GNU General Public License ("GPL") as published by the Free Software
26183 + * Foundation, either version 2 of that License or (at your option) any
26184 + * later version.
26185 + *
26186 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26187 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26188 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26189 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
26190 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26191 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26192 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26193 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26194 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26195 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26196 + * POSSIBILITY OF SUCH DAMAGE.
26197 + */
26198 +
26199 +#ifndef _SG_SW_QM2_H_
26200 +#define _SG_SW_QM2_H_
26201 +
26202 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
26203 +
26204 +static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr,
26205 +                                   dma_addr_t dma, u32 len, u16 offset)
26206 +{
26207 +       dpaa2_sg_set_addr(qm_sg_ptr, dma);
26208 +       dpaa2_sg_set_format(qm_sg_ptr, dpaa2_sg_single);
26209 +       dpaa2_sg_set_final(qm_sg_ptr, false);
26210 +       dpaa2_sg_set_len(qm_sg_ptr, len);
26211 +       dpaa2_sg_set_bpid(qm_sg_ptr, 0);
26212 +       dpaa2_sg_set_offset(qm_sg_ptr, offset);
26213 +}
26214 +
26215 +/*
26216 + * convert scatterlist to h/w link table format
26217 + * but does not have final bit; instead, returns last entry
26218 + */
26219 +static inline struct dpaa2_sg_entry *
26220 +sg_to_qm_sg(struct scatterlist *sg, int sg_count,
26221 +           struct dpaa2_sg_entry *qm_sg_ptr, u16 offset)
26222 +{
26223 +       while (sg_count && sg) {
26224 +               dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
26225 +                                sg_dma_len(sg), offset);
26226 +               qm_sg_ptr++;
26227 +               sg = sg_next(sg);
26228 +               sg_count--;
26229 +       }
26230 +       return qm_sg_ptr - 1;
26231 +}
26232 +
26233 +/*
26234 + * convert scatterlist to h/w link table format
26235 + * scatterlist must have been previously dma mapped
26236 + */
26237 +static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
26238 +                                   struct dpaa2_sg_entry *qm_sg_ptr,
26239 +                                   u16 offset)
26240 +{
26241 +       qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
26242 +       dpaa2_sg_set_final(qm_sg_ptr, true);
26243 +}
26244 +
26245 +#endif /* _SG_SW_QM2_H_ */
26246 diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
26247 index 41cd5a35..936b1b63 100644
26248 --- a/drivers/crypto/caam/sg_sw_sec4.h
26249 +++ b/drivers/crypto/caam/sg_sw_sec4.h
26250 @@ -5,9 +5,19 @@
26251   *
26252   */
26253  
26254 +#ifndef _SG_SW_SEC4_H_
26255 +#define _SG_SW_SEC4_H_
26256 +
26257 +#include "ctrl.h"
26258  #include "regs.h"
26259 +#include "sg_sw_qm2.h"
26260 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
26261  
26262 -struct sec4_sg_entry;
26263 +struct sec4_sg_entry {
26264 +       u64 ptr;
26265 +       u32 len;
26266 +       u32 bpid_offset;
26267 +};
26268  
26269  /*
26270   * convert single dma address to h/w link table format
26271 @@ -15,9 +25,15 @@ struct sec4_sg_entry;
26272  static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
26273                                       dma_addr_t dma, u32 len, u16 offset)
26274  {
26275 -       sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
26276 -       sec4_sg_ptr->len = cpu_to_caam32(len);
26277 -       sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK);
26278 +       if (caam_dpaa2) {
26279 +               dma_to_qm_sg_one((struct dpaa2_sg_entry *)sec4_sg_ptr, dma, len,
26280 +                                offset);
26281 +       } else {
26282 +               sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
26283 +               sec4_sg_ptr->len = cpu_to_caam32(len);
26284 +               sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset &
26285 +                                                        SEC4_SG_OFFSET_MASK);
26286 +       }
26287  #ifdef DEBUG
26288         print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
26289                        DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
26290 @@ -43,6 +59,14 @@ sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
26291         return sec4_sg_ptr - 1;
26292  }
26293  
26294 +static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr)
26295 +{
26296 +       if (caam_dpaa2)
26297 +               dpaa2_sg_set_final((struct dpaa2_sg_entry *)sec4_sg_ptr, true);
26298 +       else
26299 +               sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
26300 +}
26301 +
26302  /*
26303   * convert scatterlist to h/w link table format
26304   * scatterlist must have been previously dma mapped
26305 @@ -52,31 +76,7 @@ static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
26306                                       u16 offset)
26307  {
26308         sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
26309 -       sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
26310 -}
26311 -
26312 -static inline struct sec4_sg_entry *sg_to_sec4_sg_len(
26313 -       struct scatterlist *sg, unsigned int total,
26314 -       struct sec4_sg_entry *sec4_sg_ptr)
26315 -{
26316 -       do {
26317 -               unsigned int len = min(sg_dma_len(sg), total);
26318 -
26319 -               dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), len, 0);
26320 -               sec4_sg_ptr++;
26321 -               sg = sg_next(sg);
26322 -               total -= len;
26323 -       } while (total);
26324 -       return sec4_sg_ptr - 1;
26325 +       sg_to_sec4_set_last(sec4_sg_ptr);
26326  }
26327  
26328 -/* derive number of elements in scatterlist, but return 0 for 1 */
26329 -static inline int sg_count(struct scatterlist *sg_list, int nbytes)
26330 -{
26331 -       int sg_nents = sg_nents_for_len(sg_list, nbytes);
26332 -
26333 -       if (likely(sg_nents == 1))
26334 -               return 0;
26335 -
26336 -       return sg_nents;
26337 -}
26338 +#endif /* _SG_SW_SEC4_H_ */
26339 diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
26340 index ef5d394f..cc8deece 100644
26341 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c
26342 +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
26343 @@ -516,7 +516,7 @@ static int rsi_probe(struct usb_interface *pfunction,
26344  
26345  /**
26346   * rsi_disconnect() - This function performs the reverse of the probe function,
26347 - *                   it deintialize the driver structure.
26348 + *                   it deinitialize the driver structure.
26349   * @pfunction: Pointer to the USB interface structure.
26350   *
26351   * Return: None.
26352 diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
26353 index defffa75..ec88ed9c 100644
26354 --- a/drivers/staging/wilc1000/linux_wlan.c
26355 +++ b/drivers/staging/wilc1000/linux_wlan.c
26356 @@ -211,7 +211,7 @@ static void deinit_irq(struct net_device *dev)
26357         vif = netdev_priv(dev);
26358         wilc = vif->wilc;
26359  
26360 -       /* Deintialize IRQ */
26361 +       /* Deinitialize IRQ */
26362         if (wilc->dev_irq_num) {
26363                 free_irq(wilc->dev_irq_num, wilc);
26364                 gpio_free(wilc->gpio);
26365 diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
26366 index 60d8b055..02d3e721 100644
26367 --- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
26368 +++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
26369 @@ -2359,7 +2359,7 @@ int wilc_deinit_host_int(struct net_device *net)
26370                 del_timer_sync(&wilc_during_ip_timer);
26371  
26372         if (s32Error)
26373 -               netdev_err(net, "Error while deintializing host interface\n");
26374 +               netdev_err(net, "Error while deinitializing host interface\n");
26375  
26376         return s32Error;
26377  }
26378 diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
26379 new file mode 100644
26380 index 00000000..e328b524
26381 --- /dev/null
26382 +++ b/include/crypto/acompress.h
26383 @@ -0,0 +1,269 @@
26384 +/*
26385 + * Asynchronous Compression operations
26386 + *
26387 + * Copyright (c) 2016, Intel Corporation
26388 + * Authors: Weigang Li <weigang.li@intel.com>
26389 + *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
26390 + *
26391 + * This program is free software; you can redistribute it and/or modify it
26392 + * under the terms of the GNU General Public License as published by the Free
26393 + * Software Foundation; either version 2 of the License, or (at your option)
26394 + * any later version.
26395 + *
26396 + */
26397 +#ifndef _CRYPTO_ACOMP_H
26398 +#define _CRYPTO_ACOMP_H
26399 +#include <linux/crypto.h>
26400 +
26401 +#define CRYPTO_ACOMP_ALLOC_OUTPUT      0x00000001
26402 +
26403 +/**
26404 + * struct acomp_req - asynchronous (de)compression request
26405 + *
26406 + * @base:      Common attributes for asynchronous crypto requests
26407 + * @src:       Source Data
26408 + * @dst:       Destination data
26409 + * @slen:      Size of the input buffer
26410 + * @dlen:      Size of the output buffer and number of bytes produced
26411 + * @flags:     Internal flags
26412 + * @__ctx:     Start of private context data
26413 + */
26414 +struct acomp_req {
26415 +       struct crypto_async_request base;
26416 +       struct scatterlist *src;
26417 +       struct scatterlist *dst;
26418 +       unsigned int slen;
26419 +       unsigned int dlen;
26420 +       u32 flags;
26421 +       void *__ctx[] CRYPTO_MINALIGN_ATTR;
26422 +};
26423 +
26424 +/**
26425 + * struct crypto_acomp - user-instantiated objects which encapsulate
26426 + * algorithms and core processing logic
26427 + *
26428 + * @compress:          Function performs a compress operation
26429 + * @decompress:                Function performs a de-compress operation
26430 + * @dst_free:          Frees destination buffer if allocated inside the
26431 + *                     algorithm
26432 + * @reqsize:           Context size for (de)compression requests
26433 + * @base:              Common crypto API algorithm data structure
26434 + */
26435 +struct crypto_acomp {
26436 +       int (*compress)(struct acomp_req *req);
26437 +       int (*decompress)(struct acomp_req *req);
26438 +       void (*dst_free)(struct scatterlist *dst);
26439 +       unsigned int reqsize;
26440 +       struct crypto_tfm base;
26441 +};
26442 +
26443 +/**
26444 + * struct acomp_alg - asynchronous compression algorithm
26445 + *
26446 + * @compress:  Function performs a compress operation
26447 + * @decompress:        Function performs a de-compress operation
26448 + * @dst_free:  Frees destination buffer if allocated inside the algorithm
26449 + * @init:      Initialize the cryptographic transformation object.
26450 + *             This function is used to initialize the cryptographic
26451 + *             transformation object. This function is called only once at
26452 + *             the instantiation time, right after the transformation context
26453 + *             was allocated. In case the cryptographic hardware has some
26454 + *             special requirements which need to be handled by software, this
26455 + *             function shall check for the precise requirement of the
26456 + *             transformation and put any software fallbacks in place.
26457 + * @exit:      Deinitialize the cryptographic transformation object. This is a
26458 + *             counterpart to @init, used to remove various changes set in
26459 + *             @init.
26460 + *
26461 + * @reqsize:   Context size for (de)compression requests
26462 + * @base:      Common crypto API algorithm data structure
26463 + */
26464 +struct acomp_alg {
26465 +       int (*compress)(struct acomp_req *req);
26466 +       int (*decompress)(struct acomp_req *req);
26467 +       void (*dst_free)(struct scatterlist *dst);
26468 +       int (*init)(struct crypto_acomp *tfm);
26469 +       void (*exit)(struct crypto_acomp *tfm);
26470 +       unsigned int reqsize;
26471 +       struct crypto_alg base;
26472 +};
26473 +
26474 +/**
26475 + * DOC: Asynchronous Compression API
26476 + *
26477 + * The Asynchronous Compression API is used with the algorithms of type
26478 + * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto)
26479 + */
26480 +
26481 +/**
26482 + * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle
26483 + * @alg_name:  is the cra_name / name or cra_driver_name / driver name of the
26484 + *             compression algorithm e.g. "deflate"
26485 + * @type:      specifies the type of the algorithm
26486 + * @mask:      specifies the mask for the algorithm
26487 + *
26488 + * Allocate a handle for a compression algorithm. The returned struct
26489 + * crypto_acomp is the handle that is required for any subsequent
26490 + * API invocation for the compression operations.
26491 + *
26492 + * Return:     allocated handle in case of success; IS_ERR() is true in case
26493 + *             of an error, PTR_ERR() returns the error code.
26494 + */
26495 +struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
26496 +                                       u32 mask);
26497 +
26498 +static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
26499 +{
26500 +       return &tfm->base;
26501 +}
26502 +
26503 +static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
26504 +{
26505 +       return container_of(alg, struct acomp_alg, base);
26506 +}
26507 +
26508 +static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
26509 +{
26510 +       return container_of(tfm, struct crypto_acomp, base);
26511 +}
26512 +
26513 +static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
26514 +{
26515 +       return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
26516 +}
26517 +
26518 +static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
26519 +{
26520 +       return tfm->reqsize;
26521 +}
26522 +
26523 +static inline void acomp_request_set_tfm(struct acomp_req *req,
26524 +                                        struct crypto_acomp *tfm)
26525 +{
26526 +       req->base.tfm = crypto_acomp_tfm(tfm);
26527 +}
26528 +
26529 +static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
26530 +{
26531 +       return __crypto_acomp_tfm(req->base.tfm);
26532 +}
26533 +
26534 +/**
26535 + * crypto_free_acomp() -- free ACOMPRESS tfm handle
26536 + *
26537 + * @tfm:       ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
26538 + */
26539 +static inline void crypto_free_acomp(struct crypto_acomp *tfm)
26540 +{
26541 +       crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm));
26542 +}
26543 +
26544 +static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
26545 +{
26546 +       type &= ~CRYPTO_ALG_TYPE_MASK;
26547 +       type |= CRYPTO_ALG_TYPE_ACOMPRESS;
26548 +       mask |= CRYPTO_ALG_TYPE_MASK;
26549 +
26550 +       return crypto_has_alg(alg_name, type, mask);
26551 +}
26552 +
26553 +/**
26554 + * acomp_request_alloc() -- allocates asynchronous (de)compression request
26555 + *
26556 + * @tfm:       ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
26557 + *
26558 + * Return:     allocated handle in case of success or NULL in case of an error
26559 + */
26560 +struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm);
26561 +
26562 +/**
26563 + * acomp_request_free() -- zeroize and free asynchronous (de)compression
26564 + *                        request as well as the output buffer if allocated
26565 + *                        inside the algorithm
26566 + *
26567 + * @req:       request to free
26568 + */
26569 +void acomp_request_free(struct acomp_req *req);
26570 +
26571 +/**
26572 + * acomp_request_set_callback() -- Sets an asynchronous callback
26573 + *
26574 + * Callback will be called when an asynchronous operation on a given
26575 + * request is finished.
26576 + *
26577 + * @req:       request that the callback will be set for
26578 + * @flgs:      specify for instance if the operation may backlog
26579 + * @cmlp:      callback which will be called
26580 + * @data:      private data used by the caller
26581 + */
26582 +static inline void acomp_request_set_callback(struct acomp_req *req,
26583 +                                             u32 flgs,
26584 +                                             crypto_completion_t cmpl,
26585 +                                             void *data)
26586 +{
26587 +       req->base.complete = cmpl;
26588 +       req->base.data = data;
26589 +       req->base.flags = flgs;
26590 +}
26591 +
26592 +/**
26593 + * acomp_request_set_params() -- Sets request parameters
26594 + *
26595 + * Sets parameters required by an acomp operation
26596 + *
26597 + * @req:       asynchronous compress request
26598 + * @src:       pointer to input buffer scatterlist
26599 + * @dst:       pointer to output buffer scatterlist. If this is NULL, the
26600 + *             acomp layer will allocate the output memory
26601 + * @slen:      size of the input buffer
26602 + * @dlen:      size of the output buffer. If dst is NULL, this can be used by
26603 + *             the user to specify the maximum amount of memory to allocate
26604 + */
26605 +static inline void acomp_request_set_params(struct acomp_req *req,
26606 +                                           struct scatterlist *src,
26607 +                                           struct scatterlist *dst,
26608 +                                           unsigned int slen,
26609 +                                           unsigned int dlen)
26610 +{
26611 +       req->src = src;
26612 +       req->dst = dst;
26613 +       req->slen = slen;
26614 +       req->dlen = dlen;
26615 +
26616 +       if (!req->dst)
26617 +               req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
26618 +}
26619 +
26620 +/**
26621 + * crypto_acomp_compress() -- Invoke asynchronous compress operation
26622 + *
26623 + * Function invokes the asynchronous compress operation
26624 + *
26625 + * @req:       asynchronous compress request
26626 + *
26627 + * Return:     zero on success; error code in case of error
26628 + */
26629 +static inline int crypto_acomp_compress(struct acomp_req *req)
26630 +{
26631 +       struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
26632 +
26633 +       return tfm->compress(req);
26634 +}
26635 +
26636 +/**
26637 + * crypto_acomp_decompress() -- Invoke asynchronous decompress operation
26638 + *
26639 + * Function invokes the asynchronous decompress operation
26640 + *
26641 + * @req:       asynchronous compress request
26642 + *
26643 + * Return:     zero on success; error code in case of error
26644 + */
26645 +static inline int crypto_acomp_decompress(struct acomp_req *req)
26646 +{
26647 +       struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
26648 +
26649 +       return tfm->decompress(req);
26650 +}
26651 +
26652 +#endif
26653 diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
26654 new file mode 100644
26655 index 00000000..1de2b5af
26656 --- /dev/null
26657 +++ b/include/crypto/internal/acompress.h
26658 @@ -0,0 +1,81 @@
26659 +/*
26660 + * Asynchronous Compression operations
26661 + *
26662 + * Copyright (c) 2016, Intel Corporation
26663 + * Authors: Weigang Li <weigang.li@intel.com>
26664 + *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
26665 + *
26666 + * This program is free software; you can redistribute it and/or modify it
26667 + * under the terms of the GNU General Public License as published by the Free
26668 + * Software Foundation; either version 2 of the License, or (at your option)
26669 + * any later version.
26670 + *
26671 + */
26672 +#ifndef _CRYPTO_ACOMP_INT_H
26673 +#define _CRYPTO_ACOMP_INT_H
26674 +#include <crypto/acompress.h>
26675 +
26676 +/*
26677 + * Transform internal helpers.
26678 + */
26679 +static inline void *acomp_request_ctx(struct acomp_req *req)
26680 +{
26681 +       return req->__ctx;
26682 +}
26683 +
26684 +static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm)
26685 +{
26686 +       return tfm->base.__crt_ctx;
26687 +}
26688 +
26689 +static inline void acomp_request_complete(struct acomp_req *req,
26690 +                                         int err)
26691 +{
26692 +       req->base.complete(&req->base, err);
26693 +}
26694 +
26695 +static inline const char *acomp_alg_name(struct crypto_acomp *tfm)
26696 +{
26697 +       return crypto_acomp_tfm(tfm)->__crt_alg->cra_name;
26698 +}
26699 +
26700 +static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm)
26701 +{
26702 +       struct acomp_req *req;
26703 +
26704 +       req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL);
26705 +       if (likely(req))
26706 +               acomp_request_set_tfm(req, tfm);
26707 +       return req;
26708 +}
26709 +
26710 +static inline void __acomp_request_free(struct acomp_req *req)
26711 +{
26712 +       kzfree(req);
26713 +}
26714 +
26715 +/**
26716 + * crypto_register_acomp() -- Register asynchronous compression algorithm
26717 + *
26718 + * Function registers an implementation of an asynchronous
26719 + * compression algorithm
26720 + *
26721 + * @alg:       algorithm definition
26722 + *
26723 + * Return:     zero on success; error code in case of error
26724 + */
26725 +int crypto_register_acomp(struct acomp_alg *alg);
26726 +
26727 +/**
26728 + * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm
26729 + *
26730 + * Function unregisters an implementation of an asynchronous
26731 + * compression algorithm
26732 + *
26733 + * @alg:       algorithm definition
26734 + *
26735 + * Return:     zero on success; error code in case of error
26736 + */
26737 +int crypto_unregister_acomp(struct acomp_alg *alg);
26738 +
26739 +#endif
26740 diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
26741 new file mode 100644
26742 index 00000000..3fda3c56
26743 --- /dev/null
26744 +++ b/include/crypto/internal/scompress.h
26745 @@ -0,0 +1,136 @@
26746 +/*
26747 + * Synchronous Compression operations
26748 + *
26749 + * Copyright 2015 LG Electronics Inc.
26750 + * Copyright (c) 2016, Intel Corporation
26751 + * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
26752 + *
26753 + * This program is free software; you can redistribute it and/or modify it
26754 + * under the terms of the GNU General Public License as published by the Free
26755 + * Software Foundation; either version 2 of the License, or (at your option)
26756 + * any later version.
26757 + *
26758 + */
26759 +#ifndef _CRYPTO_SCOMP_INT_H
26760 +#define _CRYPTO_SCOMP_INT_H
26761 +#include <linux/crypto.h>
26762 +
26763 +#define SCOMP_SCRATCH_SIZE     131072
26764 +
26765 +struct crypto_scomp {
26766 +       struct crypto_tfm base;
26767 +};
26768 +
26769 +/**
26770 + * struct scomp_alg - synchronous compression algorithm
26771 + *
26772 + * @alloc_ctx: Function allocates algorithm specific context
26773 + * @free_ctx:  Function frees context allocated with alloc_ctx
26774 + * @compress:  Function performs a compress operation
26775 + * @decompress:        Function performs a de-compress operation
26776 + * @init:      Initialize the cryptographic transformation object.
26777 + *             This function is used to initialize the cryptographic
26778 + *             transformation object. This function is called only once at
26779 + *             the instantiation time, right after the transformation context
26780 + *             was allocated. In case the cryptographic hardware has some
26781 + *             special requirements which need to be handled by software, this
26782 + *             function shall check for the precise requirement of the
26783 + *             transformation and put any software fallbacks in place.
26784 + * @exit:      Deinitialize the cryptographic transformation object. This is a
26785 + *             counterpart to @init, used to remove various changes set in
26786 + *             @init.
26787 + * @base:      Common crypto API algorithm data structure
26788 + */
26789 +struct scomp_alg {
26790 +       void *(*alloc_ctx)(struct crypto_scomp *tfm);
26791 +       void (*free_ctx)(struct crypto_scomp *tfm, void *ctx);
26792 +       int (*compress)(struct crypto_scomp *tfm, const u8 *src,
26793 +                       unsigned int slen, u8 *dst, unsigned int *dlen,
26794 +                       void *ctx);
26795 +       int (*decompress)(struct crypto_scomp *tfm, const u8 *src,
26796 +                         unsigned int slen, u8 *dst, unsigned int *dlen,
26797 +                         void *ctx);
26798 +       struct crypto_alg base;
26799 +};
26800 +
26801 +static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg)
26802 +{
26803 +       return container_of(alg, struct scomp_alg, base);
26804 +}
26805 +
26806 +static inline struct crypto_scomp *__crypto_scomp_tfm(struct crypto_tfm *tfm)
26807 +{
26808 +       return container_of(tfm, struct crypto_scomp, base);
26809 +}
26810 +
26811 +static inline struct crypto_tfm *crypto_scomp_tfm(struct crypto_scomp *tfm)
26812 +{
26813 +       return &tfm->base;
26814 +}
26815 +
26816 +static inline void crypto_free_scomp(struct crypto_scomp *tfm)
26817 +{
26818 +       crypto_destroy_tfm(tfm, crypto_scomp_tfm(tfm));
26819 +}
26820 +
26821 +static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm)
26822 +{
26823 +       return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg);
26824 +}
26825 +
26826 +static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm)
26827 +{
26828 +       return crypto_scomp_alg(tfm)->alloc_ctx(tfm);
26829 +}
26830 +
26831 +static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm,
26832 +                                        void *ctx)
26833 +{
26834 +       return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx);
26835 +}
26836 +
26837 +static inline int crypto_scomp_compress(struct crypto_scomp *tfm,
26838 +                                       const u8 *src, unsigned int slen,
26839 +                                       u8 *dst, unsigned int *dlen, void *ctx)
26840 +{
26841 +       return crypto_scomp_alg(tfm)->compress(tfm, src, slen, dst, dlen, ctx);
26842 +}
26843 +
26844 +static inline int crypto_scomp_decompress(struct crypto_scomp *tfm,
26845 +                                         const u8 *src, unsigned int slen,
26846 +                                         u8 *dst, unsigned int *dlen,
26847 +                                         void *ctx)
26848 +{
26849 +       return crypto_scomp_alg(tfm)->decompress(tfm, src, slen, dst, dlen,
26850 +                                                ctx);
26851 +}
26852 +
26853 +int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
26854 +struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
26855 +void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
26856 +
26857 +/**
26858 + * crypto_register_scomp() -- Register synchronous compression algorithm
26859 + *
26860 + * Function registers an implementation of a synchronous
26861 + * compression algorithm
26862 + *
26863 + * @alg:       algorithm definition
26864 + *
26865 + * Return: zero on success; error code in case of error
26866 + */
26867 +int crypto_register_scomp(struct scomp_alg *alg);
26868 +
26869 +/**
26870 + * crypto_unregister_scomp() -- Unregister synchronous compression algorithm
26871 + *
26872 + * Function unregisters an implementation of a synchronous
26873 + * compression algorithm
26874 + *
26875 + * @alg:       algorithm definition
26876 + *
26877 + * Return: zero on success; error code in case of error
26878 + */
26879 +int crypto_unregister_scomp(struct scomp_alg *alg);
26880 +
26881 +#endif
26882 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
26883 index 7cee5551..8348d83d 100644
26884 --- a/include/linux/crypto.h
26885 +++ b/include/linux/crypto.h
26886 @@ -50,6 +50,8 @@
26887  #define CRYPTO_ALG_TYPE_SKCIPHER       0x00000005
26888  #define CRYPTO_ALG_TYPE_GIVCIPHER      0x00000006
26889  #define CRYPTO_ALG_TYPE_KPP            0x00000008
26890 +#define CRYPTO_ALG_TYPE_ACOMPRESS      0x0000000a
26891 +#define CRYPTO_ALG_TYPE_SCOMPRESS      0x0000000b
26892  #define CRYPTO_ALG_TYPE_RNG            0x0000000c
26893  #define CRYPTO_ALG_TYPE_AKCIPHER       0x0000000d
26894  #define CRYPTO_ALG_TYPE_DIGEST         0x0000000e
26895 @@ -60,6 +62,7 @@
26896  #define CRYPTO_ALG_TYPE_HASH_MASK      0x0000000e
26897  #define CRYPTO_ALG_TYPE_AHASH_MASK     0x0000000e
26898  #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
26899 +#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e
26900  
26901  #define CRYPTO_ALG_LARVAL              0x00000010
26902  #define CRYPTO_ALG_DEAD                        0x00000020
26903 diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h
26904 index 79b5ded2..11d21fce 100644
26905 --- a/include/uapi/linux/cryptouser.h
26906 +++ b/include/uapi/linux/cryptouser.h
26907 @@ -46,6 +46,7 @@ enum crypto_attr_type_t {
26908         CRYPTOCFGA_REPORT_CIPHER,       /* struct crypto_report_cipher */
26909         CRYPTOCFGA_REPORT_AKCIPHER,     /* struct crypto_report_akcipher */
26910         CRYPTOCFGA_REPORT_KPP,          /* struct crypto_report_kpp */
26911 +       CRYPTOCFGA_REPORT_ACOMP,        /* struct crypto_report_acomp */
26912         __CRYPTOCFGA_MAX
26913  
26914  #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
26915 @@ -112,5 +113,9 @@ struct crypto_report_kpp {
26916         char type[CRYPTO_MAX_NAME];
26917  };
26918  
26919 +struct crypto_report_acomp {
26920 +       char type[CRYPTO_MAX_NAME];
26921 +};
26922 +
26923  #define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
26924                                sizeof(struct crypto_report_blkcipher))
26925 diff --git a/scripts/spelling.txt b/scripts/spelling.txt
26926 index 163c720d..8392f89c 100644
26927 --- a/scripts/spelling.txt
26928 +++ b/scripts/spelling.txt
26929 @@ -305,6 +305,9 @@ defintion||definition
26930  defintions||definitions
26931  defualt||default
26932  defult||default
26933 +deintializing||deinitializing
26934 +deintialize||deinitialize
26935 +deintialized||deinitialized
26936  deivce||device
26937  delared||declared
26938  delare||declare
26939 diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
26940 index 504c7cd7..d8577374 100644
26941 --- a/sound/soc/amd/acp-pcm-dma.c
26942 +++ b/sound/soc/amd/acp-pcm-dma.c
26943 @@ -506,7 +506,7 @@ static int acp_init(void __iomem *acp_mmio)
26944         return 0;
26945  }
26946  
26947 -/* Deintialize ACP */
26948 +/* Deinitialize ACP */
26949  static int acp_deinit(void __iomem *acp_mmio)
26950  {
26951         u32 val;
26952 -- 
26953 2.14.1
26954