kernel: bump 4.9 to 4.9.72
[oweals/openwrt.git] / target / linux / layerscape / patches-4.9 / 804-crypto-support-layerscape.patch
1 From 9c9579d76ccd6e738ab98c9b4c73c168912cdb8a Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Wed, 27 Sep 2017 15:02:01 +0800
4 Subject: [PATCH] crypto: support layerscape
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 This is a integrated patch for layerscape sec support.
10
11 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
12 Signed-off-by: Fabio Estevam <festevam@gmail.com>
13 Signed-off-by: Arnd Bergmann <arnd@arndb.de>
14 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
15 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
16 Signed-off-by: Eric Biggers <ebiggers@google.com>
17 Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
18 Signed-off-by: Xulin Sun <xulin.sun@windriver.com>
19 Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
20 Signed-off-by: Marcus Folkesson <marcus.folkesson@gmail.com>
21 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
22 Signed-off-by: Andrew Lutomirski <luto@kernel.org>
23 Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
24 Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
25 Signed-off-by: Marcelo Cerri <marcelo.cerri@canonical.com>
26 Signed-off-by: Arvind Yadav <arvind.yadav.cs@gmail.com>
27 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
28 Signed-off-by: Laura Abbott <labbott@redhat.com>
29 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
30 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
31 ---
32  crypto/Kconfig                                    |   30 +
33  crypto/Makefile                                   |    4 +
34  crypto/acompress.c                                |  169 +
35  crypto/algboss.c                                  |   12 +-
36  crypto/crypto_user.c                              |   19 +
37  crypto/scompress.c                                |  356 ++
38  crypto/tcrypt.c                                   |   17 +-
39  crypto/testmgr.c                                  | 1701 ++++----
40  crypto/testmgr.h                                  | 1125 +++---
41  crypto/tls.c                                      |  607 +++
42  drivers/crypto/caam/Kconfig                       |   72 +-
43  drivers/crypto/caam/Makefile                      |   15 +-
44  drivers/crypto/caam/caamalg.c                     | 2125 +++-------
45  drivers/crypto/caam/caamalg_desc.c                | 1913 +++++++++
46  drivers/crypto/caam/caamalg_desc.h                |  127 +
47  drivers/crypto/caam/caamalg_qi.c                  | 2877 +++++++++++++
48  drivers/crypto/caam/caamalg_qi2.c                 | 4428 +++++++++++++++++++++
49  drivers/crypto/caam/caamalg_qi2.h                 |  265 ++
50  drivers/crypto/caam/caamhash.c                    |  521 +--
51  drivers/crypto/caam/caampkc.c                     |  471 ++-
52  drivers/crypto/caam/caampkc.h                     |   58 +
53  drivers/crypto/caam/caamrng.c                     |   16 +-
54  drivers/crypto/caam/compat.h                      |    1 +
55  drivers/crypto/caam/ctrl.c                        |  356 +-
56  drivers/crypto/caam/ctrl.h                        |    2 +
57  drivers/crypto/caam/desc.h                        |   55 +-
58  drivers/crypto/caam/desc_constr.h                 |  139 +-
59  drivers/crypto/caam/dpseci.c                      |  859 ++++
60  drivers/crypto/caam/dpseci.h                      |  395 ++
61  drivers/crypto/caam/dpseci_cmd.h                  |  261 ++
62  drivers/crypto/caam/error.c                       |  127 +-
63  drivers/crypto/caam/error.h                       |   10 +-
64  drivers/crypto/caam/intern.h                      |   31 +-
65  drivers/crypto/caam/jr.c                          |   97 +-
66  drivers/crypto/caam/jr.h                          |    2 +
67  drivers/crypto/caam/key_gen.c                     |   32 +-
68  drivers/crypto/caam/key_gen.h                     |   36 +-
69  drivers/crypto/caam/pdb.h                         |   62 +
70  drivers/crypto/caam/pkc_desc.c                    |   36 +
71  drivers/crypto/caam/qi.c                          |  797 ++++
72  drivers/crypto/caam/qi.h                          |  204 +
73  drivers/crypto/caam/regs.h                        |   63 +-
74  drivers/crypto/caam/sg_sw_qm.h                    |  126 +
75  drivers/crypto/caam/sg_sw_qm2.h                   |   81 +
76  drivers/crypto/caam/sg_sw_sec4.h                  |   60 +-
77  drivers/net/wireless/rsi/rsi_91x_usb.c            |    2 +-
78  drivers/staging/wilc1000/linux_wlan.c             |    2 +-
79  drivers/staging/wilc1000/wilc_wfi_cfgoperations.c |    2 +-
80  include/crypto/acompress.h                        |  269 ++
81  include/crypto/internal/acompress.h               |   81 +
82  include/crypto/internal/scompress.h               |  136 +
83  include/linux/crypto.h                            |    3 +
84  include/uapi/linux/cryptouser.h                   |    5 +
85  scripts/spelling.txt                              |    3 +
86  sound/soc/amd/acp-pcm-dma.c                       |    2 +-
87  55 files changed, 17310 insertions(+), 3955 deletions(-)
88  create mode 100644 crypto/acompress.c
89  create mode 100644 crypto/scompress.c
90  create mode 100644 crypto/tls.c
91  create mode 100644 drivers/crypto/caam/caamalg_desc.c
92  create mode 100644 drivers/crypto/caam/caamalg_desc.h
93  create mode 100644 drivers/crypto/caam/caamalg_qi.c
94  create mode 100644 drivers/crypto/caam/caamalg_qi2.c
95  create mode 100644 drivers/crypto/caam/caamalg_qi2.h
96  create mode 100644 drivers/crypto/caam/dpseci.c
97  create mode 100644 drivers/crypto/caam/dpseci.h
98  create mode 100644 drivers/crypto/caam/dpseci_cmd.h
99  create mode 100644 drivers/crypto/caam/qi.c
100  create mode 100644 drivers/crypto/caam/qi.h
101  create mode 100644 drivers/crypto/caam/sg_sw_qm.h
102  create mode 100644 drivers/crypto/caam/sg_sw_qm2.h
103  create mode 100644 include/crypto/acompress.h
104  create mode 100644 include/crypto/internal/acompress.h
105  create mode 100644 include/crypto/internal/scompress.h
106
107 --- a/crypto/Kconfig
108 +++ b/crypto/Kconfig
109 @@ -102,6 +102,15 @@ config CRYPTO_KPP
110         select CRYPTO_ALGAPI
111         select CRYPTO_KPP2
112  
113 +config CRYPTO_ACOMP2
114 +       tristate
115 +       select CRYPTO_ALGAPI2
116 +
117 +config CRYPTO_ACOMP
118 +       tristate
119 +       select CRYPTO_ALGAPI
120 +       select CRYPTO_ACOMP2
121 +
122  config CRYPTO_RSA
123         tristate "RSA algorithm"
124         select CRYPTO_AKCIPHER
125 @@ -138,6 +147,7 @@ config CRYPTO_MANAGER2
126         select CRYPTO_BLKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
127         select CRYPTO_AKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
128         select CRYPTO_KPP2 if !CRYPTO_MANAGER_DISABLE_TESTS
129 +       select CRYPTO_ACOMP2 if !CRYPTO_MANAGER_DISABLE_TESTS
130  
131  config CRYPTO_USER
132         tristate "Userspace cryptographic algorithm configuration"
133 @@ -295,6 +305,26 @@ config CRYPTO_ECHAINIV
134           a sequence number xored with a salt.  This is the default
135           algorithm for CBC.
136  
137 +config CRYPTO_TLS
138 +       tristate "TLS support"
139 +       select CRYPTO_AEAD
140 +       select CRYPTO_BLKCIPHER
141 +       select CRYPTO_MANAGER
142 +       select CRYPTO_HASH
143 +       select CRYPTO_NULL
144 +       select CRYPTO_AUTHENC
145 +       help
146 +         Support for TLS 1.0 record encryption and decryption
147 +
148 +         This module adds support for encryption/decryption of TLS 1.0 frames
149 +         using blockcipher algorithms. The name of the resulting algorithm is
150 +         "tls10(hmac(<digest>),cbc(<cipher>))". By default, the generic base
151 +         algorithms are used (e.g. aes-generic, sha1-generic), but hardware
152 +         accelerated versions will be used automatically if available.
153 +
154 +         User-space applications (OpenSSL, GnuTLS) can offload TLS 1.0
155 +         operations through AF_ALG or cryptodev interfaces
156 +
157  comment "Block modes"
158  
159  config CRYPTO_CBC
160 --- a/crypto/Makefile
161 +++ b/crypto/Makefile
162 @@ -51,6 +51,9 @@ rsa_generic-y += rsa_helper.o
163  rsa_generic-y += rsa-pkcs1pad.o
164  obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
165  
166 +obj-$(CONFIG_CRYPTO_ACOMP2) += acompress.o
167 +obj-$(CONFIG_CRYPTO_ACOMP2) += scompress.o
168 +
169  cryptomgr-y := algboss.o testmgr.o
170  
171  obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
172 @@ -115,6 +118,7 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_ge
173  obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
174  obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
175  obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
176 +obj-$(CONFIG_CRYPTO_TLS) += tls.o
177  obj-$(CONFIG_CRYPTO_LZO) += lzo.o
178  obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
179  obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
180 --- /dev/null
181 +++ b/crypto/acompress.c
182 @@ -0,0 +1,169 @@
183 +/*
184 + * Asynchronous Compression operations
185 + *
186 + * Copyright (c) 2016, Intel Corporation
187 + * Authors: Weigang Li <weigang.li@intel.com>
188 + *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
189 + *
190 + * This program is free software; you can redistribute it and/or modify it
191 + * under the terms of the GNU General Public License as published by the Free
192 + * Software Foundation; either version 2 of the License, or (at your option)
193 + * any later version.
194 + *
195 + */
196 +#include <linux/errno.h>
197 +#include <linux/kernel.h>
198 +#include <linux/module.h>
199 +#include <linux/seq_file.h>
200 +#include <linux/slab.h>
201 +#include <linux/string.h>
202 +#include <linux/crypto.h>
203 +#include <crypto/algapi.h>
204 +#include <linux/cryptouser.h>
205 +#include <net/netlink.h>
206 +#include <crypto/internal/acompress.h>
207 +#include <crypto/internal/scompress.h>
208 +#include "internal.h"
209 +
210 +static const struct crypto_type crypto_acomp_type;
211 +
212 +#ifdef CONFIG_NET
213 +static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
214 +{
215 +       struct crypto_report_acomp racomp;
216 +
217 +       strncpy(racomp.type, "acomp", sizeof(racomp.type));
218 +
219 +       if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
220 +                   sizeof(struct crypto_report_acomp), &racomp))
221 +               goto nla_put_failure;
222 +       return 0;
223 +
224 +nla_put_failure:
225 +       return -EMSGSIZE;
226 +}
227 +#else
228 +static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
229 +{
230 +       return -ENOSYS;
231 +}
232 +#endif
233 +
234 +static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
235 +       __attribute__ ((unused));
236 +
237 +static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
238 +{
239 +       seq_puts(m, "type         : acomp\n");
240 +}
241 +
242 +static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
243 +{
244 +       struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
245 +       struct acomp_alg *alg = crypto_acomp_alg(acomp);
246 +
247 +       alg->exit(acomp);
248 +}
249 +
250 +static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
251 +{
252 +       struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
253 +       struct acomp_alg *alg = crypto_acomp_alg(acomp);
254 +
255 +       if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
256 +               return crypto_init_scomp_ops_async(tfm);
257 +
258 +       acomp->compress = alg->compress;
259 +       acomp->decompress = alg->decompress;
260 +       acomp->dst_free = alg->dst_free;
261 +       acomp->reqsize = alg->reqsize;
262 +
263 +       if (alg->exit)
264 +               acomp->base.exit = crypto_acomp_exit_tfm;
265 +
266 +       if (alg->init)
267 +               return alg->init(acomp);
268 +
269 +       return 0;
270 +}
271 +
272 +static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
273 +{
274 +       int extsize = crypto_alg_extsize(alg);
275 +
276 +       if (alg->cra_type != &crypto_acomp_type)
277 +               extsize += sizeof(struct crypto_scomp *);
278 +
279 +       return extsize;
280 +}
281 +
282 +static const struct crypto_type crypto_acomp_type = {
283 +       .extsize = crypto_acomp_extsize,
284 +       .init_tfm = crypto_acomp_init_tfm,
285 +#ifdef CONFIG_PROC_FS
286 +       .show = crypto_acomp_show,
287 +#endif
288 +       .report = crypto_acomp_report,
289 +       .maskclear = ~CRYPTO_ALG_TYPE_MASK,
290 +       .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
291 +       .type = CRYPTO_ALG_TYPE_ACOMPRESS,
292 +       .tfmsize = offsetof(struct crypto_acomp, base),
293 +};
294 +
295 +struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
296 +                                       u32 mask)
297 +{
298 +       return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask);
299 +}
300 +EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
301 +
302 +struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp)
303 +{
304 +       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
305 +       struct acomp_req *req;
306 +
307 +       req = __acomp_request_alloc(acomp);
308 +       if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type))
309 +               return crypto_acomp_scomp_alloc_ctx(req);
310 +
311 +       return req;
312 +}
313 +EXPORT_SYMBOL_GPL(acomp_request_alloc);
314 +
315 +void acomp_request_free(struct acomp_req *req)
316 +{
317 +       struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
318 +       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
319 +
320 +       if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
321 +               crypto_acomp_scomp_free_ctx(req);
322 +
323 +       if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) {
324 +               acomp->dst_free(req->dst);
325 +               req->dst = NULL;
326 +       }
327 +
328 +       __acomp_request_free(req);
329 +}
330 +EXPORT_SYMBOL_GPL(acomp_request_free);
331 +
332 +int crypto_register_acomp(struct acomp_alg *alg)
333 +{
334 +       struct crypto_alg *base = &alg->base;
335 +
336 +       base->cra_type = &crypto_acomp_type;
337 +       base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
338 +       base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
339 +
340 +       return crypto_register_alg(base);
341 +}
342 +EXPORT_SYMBOL_GPL(crypto_register_acomp);
343 +
344 +int crypto_unregister_acomp(struct acomp_alg *alg)
345 +{
346 +       return crypto_unregister_alg(&alg->base);
347 +}
348 +EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
349 +
350 +MODULE_LICENSE("GPL");
351 +MODULE_DESCRIPTION("Asynchronous compression type");
352 --- a/crypto/algboss.c
353 +++ b/crypto/algboss.c
354 @@ -247,17 +247,9 @@ static int cryptomgr_schedule_test(struc
355         memcpy(param->alg, alg->cra_name, sizeof(param->alg));
356         type = alg->cra_flags;
357  
358 -       /* This piece of crap needs to disappear into per-type test hooks. */
359 -#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
360 -       type |= CRYPTO_ALG_TESTED;
361 -#else
362 -       if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
363 -             CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
364 -           ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
365 -            CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
366 -                                        alg->cra_ablkcipher.ivsize))
367 +       /* Do not test internal algorithms. */
368 +       if (type & CRYPTO_ALG_INTERNAL)
369                 type |= CRYPTO_ALG_TESTED;
370 -#endif
371  
372         param->type = type;
373  
374 --- a/crypto/crypto_user.c
375 +++ b/crypto/crypto_user.c
376 @@ -112,6 +112,21 @@ nla_put_failure:
377         return -EMSGSIZE;
378  }
379  
380 +static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
381 +{
382 +       struct crypto_report_acomp racomp;
383 +
384 +       strncpy(racomp.type, "acomp", sizeof(racomp.type));
385 +
386 +       if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
387 +                   sizeof(struct crypto_report_acomp), &racomp))
388 +               goto nla_put_failure;
389 +       return 0;
390 +
391 +nla_put_failure:
392 +       return -EMSGSIZE;
393 +}
394 +
395  static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
396  {
397         struct crypto_report_akcipher rakcipher;
398 @@ -186,7 +201,11 @@ static int crypto_report_one(struct cryp
399                         goto nla_put_failure;
400  
401                 break;
402 +       case CRYPTO_ALG_TYPE_ACOMPRESS:
403 +               if (crypto_report_acomp(skb, alg))
404 +                       goto nla_put_failure;
405  
406 +               break;
407         case CRYPTO_ALG_TYPE_AKCIPHER:
408                 if (crypto_report_akcipher(skb, alg))
409                         goto nla_put_failure;
410 --- /dev/null
411 +++ b/crypto/scompress.c
412 @@ -0,0 +1,356 @@
413 +/*
414 + * Synchronous Compression operations
415 + *
416 + * Copyright 2015 LG Electronics Inc.
417 + * Copyright (c) 2016, Intel Corporation
418 + * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
419 + *
420 + * This program is free software; you can redistribute it and/or modify it
421 + * under the terms of the GNU General Public License as published by the Free
422 + * Software Foundation; either version 2 of the License, or (at your option)
423 + * any later version.
424 + *
425 + */
426 +#include <linux/errno.h>
427 +#include <linux/kernel.h>
428 +#include <linux/module.h>
429 +#include <linux/seq_file.h>
430 +#include <linux/slab.h>
431 +#include <linux/string.h>
432 +#include <linux/crypto.h>
433 +#include <linux/vmalloc.h>
434 +#include <crypto/algapi.h>
435 +#include <linux/cryptouser.h>
436 +#include <net/netlink.h>
437 +#include <linux/scatterlist.h>
438 +#include <crypto/scatterwalk.h>
439 +#include <crypto/internal/acompress.h>
440 +#include <crypto/internal/scompress.h>
441 +#include "internal.h"
442 +
443 +static const struct crypto_type crypto_scomp_type;
444 +static void * __percpu *scomp_src_scratches;
445 +static void * __percpu *scomp_dst_scratches;
446 +static int scomp_scratch_users;
447 +static DEFINE_MUTEX(scomp_lock);
448 +
449 +#ifdef CONFIG_NET
450 +static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
451 +{
452 +       struct crypto_report_comp rscomp;
453 +
454 +       strncpy(rscomp.type, "scomp", sizeof(rscomp.type));
455 +
456 +       if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
457 +                   sizeof(struct crypto_report_comp), &rscomp))
458 +               goto nla_put_failure;
459 +       return 0;
460 +
461 +nla_put_failure:
462 +       return -EMSGSIZE;
463 +}
464 +#else
465 +static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
466 +{
467 +       return -ENOSYS;
468 +}
469 +#endif
470 +
471 +static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
472 +       __attribute__ ((unused));
473 +
474 +static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
475 +{
476 +       seq_puts(m, "type         : scomp\n");
477 +}
478 +
479 +static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
480 +{
481 +       return 0;
482 +}
483 +
484 +static void crypto_scomp_free_scratches(void * __percpu *scratches)
485 +{
486 +       int i;
487 +
488 +       if (!scratches)
489 +               return;
490 +
491 +       for_each_possible_cpu(i)
492 +               vfree(*per_cpu_ptr(scratches, i));
493 +
494 +       free_percpu(scratches);
495 +}
496 +
497 +static void * __percpu *crypto_scomp_alloc_scratches(void)
498 +{
499 +       void * __percpu *scratches;
500 +       int i;
501 +
502 +       scratches = alloc_percpu(void *);
503 +       if (!scratches)
504 +               return NULL;
505 +
506 +       for_each_possible_cpu(i) {
507 +               void *scratch;
508 +
509 +               scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
510 +               if (!scratch)
511 +                       goto error;
512 +               *per_cpu_ptr(scratches, i) = scratch;
513 +       }
514 +
515 +       return scratches;
516 +
517 +error:
518 +       crypto_scomp_free_scratches(scratches);
519 +       return NULL;
520 +}
521 +
522 +static void crypto_scomp_free_all_scratches(void)
523 +{
524 +       if (!--scomp_scratch_users) {
525 +               crypto_scomp_free_scratches(scomp_src_scratches);
526 +               crypto_scomp_free_scratches(scomp_dst_scratches);
527 +               scomp_src_scratches = NULL;
528 +               scomp_dst_scratches = NULL;
529 +       }
530 +}
531 +
532 +static int crypto_scomp_alloc_all_scratches(void)
533 +{
534 +       if (!scomp_scratch_users++) {
535 +               scomp_src_scratches = crypto_scomp_alloc_scratches();
536 +               if (!scomp_src_scratches)
537 +                       return -ENOMEM;
538 +               scomp_dst_scratches = crypto_scomp_alloc_scratches();
539 +               if (!scomp_dst_scratches)
540 +                       return -ENOMEM;
541 +       }
542 +       return 0;
543 +}
544 +
545 +static void crypto_scomp_sg_free(struct scatterlist *sgl)
546 +{
547 +       int i, n;
548 +       struct page *page;
549 +
550 +       if (!sgl)
551 +               return;
552 +
553 +       n = sg_nents(sgl);
554 +       for_each_sg(sgl, sgl, n, i) {
555 +               page = sg_page(sgl);
556 +               if (page)
557 +                       __free_page(page);
558 +       }
559 +
560 +       kfree(sgl);
561 +}
562 +
563 +static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp)
564 +{
565 +       struct scatterlist *sgl;
566 +       struct page *page;
567 +       int i, n;
568 +
569 +       n = ((size - 1) >> PAGE_SHIFT) + 1;
570 +
571 +       sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp);
572 +       if (!sgl)
573 +               return NULL;
574 +
575 +       sg_init_table(sgl, n);
576 +
577 +       for (i = 0; i < n; i++) {
578 +               page = alloc_page(gfp);
579 +               if (!page)
580 +                       goto err;
581 +               sg_set_page(sgl + i, page, PAGE_SIZE, 0);
582 +       }
583 +
584 +       return sgl;
585 +
586 +err:
587 +       sg_mark_end(sgl + i);
588 +       crypto_scomp_sg_free(sgl);
589 +       return NULL;
590 +}
591 +
592 +static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
593 +{
594 +       struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
595 +       void **tfm_ctx = acomp_tfm_ctx(tfm);
596 +       struct crypto_scomp *scomp = *tfm_ctx;
597 +       void **ctx = acomp_request_ctx(req);
598 +       const int cpu = get_cpu();
599 +       u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
600 +       u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
601 +       int ret;
602 +
603 +       if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
604 +               ret = -EINVAL;
605 +               goto out;
606 +       }
607 +
608 +       if (req->dst && !req->dlen) {
609 +               ret = -EINVAL;
610 +               goto out;
611 +       }
612 +
613 +       if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
614 +               req->dlen = SCOMP_SCRATCH_SIZE;
615 +
616 +       scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
617 +       if (dir)
618 +               ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
619 +                                           scratch_dst, &req->dlen, *ctx);
620 +       else
621 +               ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
622 +                                             scratch_dst, &req->dlen, *ctx);
623 +       if (!ret) {
624 +               if (!req->dst) {
625 +                       req->dst = crypto_scomp_sg_alloc(req->dlen,
626 +                                  req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
627 +                                  GFP_KERNEL : GFP_ATOMIC);
628 +                       if (!req->dst)
629 +                               goto out;
630 +               }
631 +               scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
632 +                                        1);
633 +       }
634 +out:
635 +       put_cpu();
636 +       return ret;
637 +}
638 +
639 +static int scomp_acomp_compress(struct acomp_req *req)
640 +{
641 +       return scomp_acomp_comp_decomp(req, 1);
642 +}
643 +
644 +static int scomp_acomp_decompress(struct acomp_req *req)
645 +{
646 +       return scomp_acomp_comp_decomp(req, 0);
647 +}
648 +
649 +static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
650 +{
651 +       struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
652 +
653 +       crypto_free_scomp(*ctx);
654 +}
655 +
656 +int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
657 +{
658 +       struct crypto_alg *calg = tfm->__crt_alg;
659 +       struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
660 +       struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
661 +       struct crypto_scomp *scomp;
662 +
663 +       if (!crypto_mod_get(calg))
664 +               return -EAGAIN;
665 +
666 +       scomp = crypto_create_tfm(calg, &crypto_scomp_type);
667 +       if (IS_ERR(scomp)) {
668 +               crypto_mod_put(calg);
669 +               return PTR_ERR(scomp);
670 +       }
671 +
672 +       *ctx = scomp;
673 +       tfm->exit = crypto_exit_scomp_ops_async;
674 +
675 +       crt->compress = scomp_acomp_compress;
676 +       crt->decompress = scomp_acomp_decompress;
677 +       crt->dst_free = crypto_scomp_sg_free;
678 +       crt->reqsize = sizeof(void *);
679 +
680 +       return 0;
681 +}
682 +
683 +struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
684 +{
685 +       struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
686 +       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
687 +       struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
688 +       struct crypto_scomp *scomp = *tfm_ctx;
689 +       void *ctx;
690 +
691 +       ctx = crypto_scomp_alloc_ctx(scomp);
692 +       if (IS_ERR(ctx)) {
693 +               kfree(req);
694 +               return NULL;
695 +       }
696 +
697 +       *req->__ctx = ctx;
698 +
699 +       return req;
700 +}
701 +
702 +void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
703 +{
704 +       struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
705 +       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
706 +       struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
707 +       struct crypto_scomp *scomp = *tfm_ctx;
708 +       void *ctx = *req->__ctx;
709 +
710 +       if (ctx)
711 +               crypto_scomp_free_ctx(scomp, ctx);
712 +}
713 +
714 +static const struct crypto_type crypto_scomp_type = {
715 +       .extsize = crypto_alg_extsize,
716 +       .init_tfm = crypto_scomp_init_tfm,
717 +#ifdef CONFIG_PROC_FS
718 +       .show = crypto_scomp_show,
719 +#endif
720 +       .report = crypto_scomp_report,
721 +       .maskclear = ~CRYPTO_ALG_TYPE_MASK,
722 +       .maskset = CRYPTO_ALG_TYPE_MASK,
723 +       .type = CRYPTO_ALG_TYPE_SCOMPRESS,
724 +       .tfmsize = offsetof(struct crypto_scomp, base),
725 +};
726 +
727 +int crypto_register_scomp(struct scomp_alg *alg)
728 +{
729 +       struct crypto_alg *base = &alg->base;
730 +       int ret = -ENOMEM;
731 +
732 +       mutex_lock(&scomp_lock);
733 +       if (crypto_scomp_alloc_all_scratches())
734 +               goto error;
735 +
736 +       base->cra_type = &crypto_scomp_type;
737 +       base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
738 +       base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
739 +
740 +       ret = crypto_register_alg(base);
741 +       if (ret)
742 +               goto error;
743 +
744 +       mutex_unlock(&scomp_lock);
745 +       return ret;
746 +
747 +error:
748 +       crypto_scomp_free_all_scratches();
749 +       mutex_unlock(&scomp_lock);
750 +       return ret;
751 +}
752 +EXPORT_SYMBOL_GPL(crypto_register_scomp);
753 +
754 +int crypto_unregister_scomp(struct scomp_alg *alg)
755 +{
756 +       int ret;
757 +
758 +       mutex_lock(&scomp_lock);
759 +       ret = crypto_unregister_alg(&alg->base);
760 +       crypto_scomp_free_all_scratches();
761 +       mutex_unlock(&scomp_lock);
762 +
763 +       return ret;
764 +}
765 +EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
766 +
767 +MODULE_LICENSE("GPL");
768 +MODULE_DESCRIPTION("Synchronous compression type");
769 --- a/crypto/tcrypt.c
770 +++ b/crypto/tcrypt.c
771 @@ -74,7 +74,7 @@ static char *check[] = {
772         "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta",  "fcrypt",
773         "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
774         "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
775 -       NULL
776 +       "rsa", NULL
777  };
778  
779  struct tcrypt_result {
780 @@ -1331,6 +1331,10 @@ static int do_test(const char *alg, u32
781                 ret += tcrypt_test("hmac(sha3-512)");
782                 break;
783  
784 +       case 115:
785 +               ret += tcrypt_test("rsa");
786 +               break;
787 +
788         case 150:
789                 ret += tcrypt_test("ansi_cprng");
790                 break;
791 @@ -1392,6 +1396,9 @@ static int do_test(const char *alg, u32
792         case 190:
793                 ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
794                 break;
795 +       case 191:
796 +               ret += tcrypt_test("tls10(hmac(sha1),cbc(aes))");
797 +               break;
798         case 200:
799                 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
800                                 speed_template_16_24_32);
801 @@ -1406,9 +1413,9 @@ static int do_test(const char *alg, u32
802                 test_cipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
803                                 speed_template_32_40_48);
804                 test_cipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
805 -                               speed_template_32_48_64);
806 +                               speed_template_32_64);
807                 test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
808 -                               speed_template_32_48_64);
809 +                               speed_template_32_64);
810                 test_cipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
811                                 speed_template_16_24_32);
812                 test_cipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
813 @@ -1839,9 +1846,9 @@ static int do_test(const char *alg, u32
814                 test_acipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
815                                    speed_template_32_40_48);
816                 test_acipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
817 -                                  speed_template_32_48_64);
818 +                                  speed_template_32_64);
819                 test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
820 -                                  speed_template_32_48_64);
821 +                                  speed_template_32_64);
822                 test_acipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
823                                    speed_template_16_24_32);
824                 test_acipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
825 --- a/crypto/testmgr.c
826 +++ b/crypto/testmgr.c
827 @@ -33,6 +33,7 @@
828  #include <crypto/drbg.h>
829  #include <crypto/akcipher.h>
830  #include <crypto/kpp.h>
831 +#include <crypto/acompress.h>
832  
833  #include "internal.h"
834  
835 @@ -62,7 +63,7 @@ int alg_test(const char *driver, const c
836   */
837  #define IDX1           32
838  #define IDX2           32400
839 -#define IDX3           1
840 +#define IDX3           1511
841  #define IDX4           8193
842  #define IDX5           22222
843  #define IDX6           17101
844 @@ -82,47 +83,54 @@ struct tcrypt_result {
845  
846  struct aead_test_suite {
847         struct {
848 -               struct aead_testvec *vecs;
849 +               const struct aead_testvec *vecs;
850                 unsigned int count;
851         } enc, dec;
852  };
853  
854  struct cipher_test_suite {
855         struct {
856 -               struct cipher_testvec *vecs;
857 +               const struct cipher_testvec *vecs;
858                 unsigned int count;
859         } enc, dec;
860  };
861  
862  struct comp_test_suite {
863         struct {
864 -               struct comp_testvec *vecs;
865 +               const struct comp_testvec *vecs;
866                 unsigned int count;
867         } comp, decomp;
868  };
869  
870  struct hash_test_suite {
871 -       struct hash_testvec *vecs;
872 +       const struct hash_testvec *vecs;
873         unsigned int count;
874  };
875  
876  struct cprng_test_suite {
877 -       struct cprng_testvec *vecs;
878 +       const struct cprng_testvec *vecs;
879         unsigned int count;
880  };
881  
882  struct drbg_test_suite {
883 -       struct drbg_testvec *vecs;
884 +       const struct drbg_testvec *vecs;
885         unsigned int count;
886  };
887  
888 +struct tls_test_suite {
889 +       struct {
890 +               struct tls_testvec *vecs;
891 +               unsigned int count;
892 +       } enc, dec;
893 +};
894 +
895  struct akcipher_test_suite {
896 -       struct akcipher_testvec *vecs;
897 +       const struct akcipher_testvec *vecs;
898         unsigned int count;
899  };
900  
901  struct kpp_test_suite {
902 -       struct kpp_testvec *vecs;
903 +       const struct kpp_testvec *vecs;
904         unsigned int count;
905  };
906  
907 @@ -139,12 +147,14 @@ struct alg_test_desc {
908                 struct hash_test_suite hash;
909                 struct cprng_test_suite cprng;
910                 struct drbg_test_suite drbg;
911 +               struct tls_test_suite tls;
912                 struct akcipher_test_suite akcipher;
913                 struct kpp_test_suite kpp;
914         } suite;
915  };
916  
917 -static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
918 +static const unsigned int IDX[8] = {
919 +       IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
920  
921  static void hexdump(unsigned char *buf, unsigned int len)
922  {
923 @@ -202,7 +212,7 @@ static int wait_async_op(struct tcrypt_r
924  }
925  
926  static int ahash_partial_update(struct ahash_request **preq,
927 -       struct crypto_ahash *tfm, struct hash_testvec *template,
928 +       struct crypto_ahash *tfm, const struct hash_testvec *template,
929         void *hash_buff, int k, int temp, struct scatterlist *sg,
930         const char *algo, char *result, struct tcrypt_result *tresult)
931  {
932 @@ -259,11 +269,12 @@ out_nostate:
933         return ret;
934  }
935  
936 -static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
937 -                      unsigned int tcount, bool use_digest,
938 -                      const int align_offset)
939 +static int __test_hash(struct crypto_ahash *tfm,
940 +                      const struct hash_testvec *template, unsigned int tcount,
941 +                      bool use_digest, const int align_offset)
942  {
943         const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
944 +       size_t digest_size = crypto_ahash_digestsize(tfm);
945         unsigned int i, j, k, temp;
946         struct scatterlist sg[8];
947         char *result;
948 @@ -274,7 +285,7 @@ static int __test_hash(struct crypto_aha
949         char *xbuf[XBUFSIZE];
950         int ret = -ENOMEM;
951  
952 -       result = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL);
953 +       result = kmalloc(digest_size, GFP_KERNEL);
954         if (!result)
955                 return ret;
956         key = kmalloc(MAX_KEYLEN, GFP_KERNEL);
957 @@ -304,7 +315,7 @@ static int __test_hash(struct crypto_aha
958                         goto out;
959  
960                 j++;
961 -               memset(result, 0, MAX_DIGEST_SIZE);
962 +               memset(result, 0, digest_size);
963  
964                 hash_buff = xbuf[0];
965                 hash_buff += align_offset;
966 @@ -379,7 +390,7 @@ static int __test_hash(struct crypto_aha
967                         continue;
968  
969                 j++;
970 -               memset(result, 0, MAX_DIGEST_SIZE);
971 +               memset(result, 0, digest_size);
972  
973                 temp = 0;
974                 sg_init_table(sg, template[i].np);
975 @@ -457,7 +468,7 @@ static int __test_hash(struct crypto_aha
976                         continue;
977  
978                 j++;
979 -               memset(result, 0, MAX_DIGEST_SIZE);
980 +               memset(result, 0, digest_size);
981  
982                 ret = -EINVAL;
983                 hash_buff = xbuf[0];
984 @@ -536,7 +547,8 @@ out_nobuf:
985         return ret;
986  }
987  
988 -static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
989 +static int test_hash(struct crypto_ahash *tfm,
990 +                    const struct hash_testvec *template,
991                      unsigned int tcount, bool use_digest)
992  {
993         unsigned int alignmask;
994 @@ -564,7 +576,7 @@ static int test_hash(struct crypto_ahash
995  }
996  
997  static int __test_aead(struct crypto_aead *tfm, int enc,
998 -                      struct aead_testvec *template, unsigned int tcount,
999 +                      const struct aead_testvec *template, unsigned int tcount,
1000                        const bool diff_dst, const int align_offset)
1001  {
1002         const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
1003 @@ -955,7 +967,7 @@ out_noxbuf:
1004  }
1005  
1006  static int test_aead(struct crypto_aead *tfm, int enc,
1007 -                    struct aead_testvec *template, unsigned int tcount)
1008 +                    const struct aead_testvec *template, unsigned int tcount)
1009  {
1010         unsigned int alignmask;
1011         int ret;
1012 @@ -987,8 +999,236 @@ static int test_aead(struct crypto_aead
1013         return 0;
1014  }
1015  
1016 +static int __test_tls(struct crypto_aead *tfm, int enc,
1017 +                     struct tls_testvec *template, unsigned int tcount,
1018 +                     const bool diff_dst)
1019 +{
1020 +       const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
1021 +       unsigned int i, k, authsize;
1022 +       char *q;
1023 +       struct aead_request *req;
1024 +       struct scatterlist *sg;
1025 +       struct scatterlist *sgout;
1026 +       const char *e, *d;
1027 +       struct tcrypt_result result;
1028 +       void *input;
1029 +       void *output;
1030 +       void *assoc;
1031 +       char *iv;
1032 +       char *key;
1033 +       char *xbuf[XBUFSIZE];
1034 +       char *xoutbuf[XBUFSIZE];
1035 +       char *axbuf[XBUFSIZE];
1036 +       int ret = -ENOMEM;
1037 +
1038 +       if (testmgr_alloc_buf(xbuf))
1039 +               goto out_noxbuf;
1040 +
1041 +       if (diff_dst && testmgr_alloc_buf(xoutbuf))
1042 +               goto out_nooutbuf;
1043 +
1044 +       if (testmgr_alloc_buf(axbuf))
1045 +               goto out_noaxbuf;
1046 +
1047 +       iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
1048 +       if (!iv)
1049 +               goto out_noiv;
1050 +
1051 +       key = kzalloc(MAX_KEYLEN, GFP_KERNEL);
1052 +       if (!key)
1053 +               goto out_nokey;
1054 +
1055 +       sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 2 : 1), GFP_KERNEL);
1056 +       if (!sg)
1057 +               goto out_nosg;
1058 +
1059 +       sgout = sg + 8;
1060 +
1061 +       d = diff_dst ? "-ddst" : "";
1062 +       e = enc ? "encryption" : "decryption";
1063 +
1064 +       init_completion(&result.completion);
1065 +
1066 +       req = aead_request_alloc(tfm, GFP_KERNEL);
1067 +       if (!req) {
1068 +               pr_err("alg: tls%s: Failed to allocate request for %s\n",
1069 +                      d, algo);
1070 +               goto out;
1071 +       }
1072 +
1073 +       aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1074 +                                 tcrypt_complete, &result);
1075 +
1076 +       for (i = 0; i < tcount; i++) {
1077 +               input = xbuf[0];
1078 +               assoc = axbuf[0];
1079 +
1080 +               ret = -EINVAL;
1081 +               if (WARN_ON(template[i].ilen > PAGE_SIZE ||
1082 +                           template[i].alen > PAGE_SIZE))
1083 +                       goto out;
1084 +
1085 +               memcpy(assoc, template[i].assoc, template[i].alen);
1086 +               memcpy(input, template[i].input, template[i].ilen);
1087 +
1088 +               if (template[i].iv)
1089 +                       memcpy(iv, template[i].iv, MAX_IVLEN);
1090 +               else
1091 +                       memset(iv, 0, MAX_IVLEN);
1092 +
1093 +               crypto_aead_clear_flags(tfm, ~0);
1094 +
1095 +               if (template[i].klen > MAX_KEYLEN) {
1096 +                       pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
1097 +                              d, i, algo, template[i].klen, MAX_KEYLEN);
1098 +                       ret = -EINVAL;
1099 +                       goto out;
1100 +               }
1101 +               memcpy(key, template[i].key, template[i].klen);
1102 +
1103 +               ret = crypto_aead_setkey(tfm, key, template[i].klen);
1104 +               if (!ret == template[i].fail) {
1105 +                       pr_err("alg: tls%s: setkey failed on test %d for %s: flags=%x\n",
1106 +                              d, i, algo, crypto_aead_get_flags(tfm));
1107 +                       goto out;
1108 +               } else if (ret)
1109 +                       continue;
1110 +
1111 +               authsize = 20;
1112 +               ret = crypto_aead_setauthsize(tfm, authsize);
1113 +               if (ret) {
1114 +                       pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n",
1115 +                              d, authsize, i, algo);
1116 +                       goto out;
1117 +               }
1118 +
1119 +               k = !!template[i].alen;
1120 +               sg_init_table(sg, k + 1);
1121 +               sg_set_buf(&sg[0], assoc, template[i].alen);
1122 +               sg_set_buf(&sg[k], input, (enc ? template[i].rlen :
1123 +                                          template[i].ilen));
1124 +               output = input;
1125 +
1126 +               if (diff_dst) {
1127 +                       sg_init_table(sgout, k + 1);
1128 +                       sg_set_buf(&sgout[0], assoc, template[i].alen);
1129 +
1130 +                       output = xoutbuf[0];
1131 +                       sg_set_buf(&sgout[k], output,
1132 +                                  (enc ? template[i].rlen : template[i].ilen));
1133 +               }
1134 +
1135 +               aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
1136 +                                      template[i].ilen, iv);
1137 +
1138 +               aead_request_set_ad(req, template[i].alen);
1139 +
1140 +               ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
1141 +
1142 +               switch (ret) {
1143 +               case 0:
1144 +                       if (template[i].novrfy) {
1145 +                               /* verification was supposed to fail */
1146 +                               pr_err("alg: tls%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
1147 +                                      d, e, i, algo);
1148 +                               /* so really, we got a bad message */
1149 +                               ret = -EBADMSG;
1150 +                               goto out;
1151 +                       }
1152 +                       break;
1153 +               case -EINPROGRESS:
1154 +               case -EBUSY:
1155 +                       wait_for_completion(&result.completion);
1156 +                       reinit_completion(&result.completion);
1157 +                       ret = result.err;
1158 +                       if (!ret)
1159 +                               break;
1160 +               case -EBADMSG:
1161 +                       /* verification failure was expected */
1162 +                       if (template[i].novrfy)
1163 +                               continue;
1164 +                       /* fall through */
1165 +               default:
1166 +                       pr_err("alg: tls%s: %s failed on test %d for %s: ret=%d\n",
1167 +                              d, e, i, algo, -ret);
1168 +                       goto out;
1169 +               }
1170 +
1171 +               q = output;
1172 +               if (memcmp(q, template[i].result, template[i].rlen)) {
1173 +                       pr_err("alg: tls%s: Test %d failed on %s for %s\n",
1174 +                              d, i, e, algo);
1175 +                       hexdump(q, template[i].rlen);
1176 +                       pr_err("should be:\n");
1177 +                       hexdump(template[i].result, template[i].rlen);
1178 +                       ret = -EINVAL;
1179 +                       goto out;
1180 +               }
1181 +       }
1182 +
1183 +out:
1184 +       aead_request_free(req);
1185 +
1186 +       kfree(sg);
1187 +out_nosg:
1188 +       kfree(key);
1189 +out_nokey:
1190 +       kfree(iv);
1191 +out_noiv:
1192 +       testmgr_free_buf(axbuf);
1193 +out_noaxbuf:
1194 +       if (diff_dst)
1195 +               testmgr_free_buf(xoutbuf);
1196 +out_nooutbuf:
1197 +       testmgr_free_buf(xbuf);
1198 +out_noxbuf:
1199 +       return ret;
1200 +}
1201 +
1202 +static int test_tls(struct crypto_aead *tfm, int enc,
1203 +                   struct tls_testvec *template, unsigned int tcount)
1204 +{
1205 +       int ret;
1206 +       /* test 'dst == src' case */
1207 +       ret = __test_tls(tfm, enc, template, tcount, false);
1208 +       if (ret)
1209 +               return ret;
1210 +       /* test 'dst != src' case */
1211 +       return __test_tls(tfm, enc, template, tcount, true);
1212 +}
1213 +
1214 +static int alg_test_tls(const struct alg_test_desc *desc, const char *driver,
1215 +                       u32 type, u32 mask)
1216 +{
1217 +       struct crypto_aead *tfm;
1218 +       int err = 0;
1219 +
1220 +       tfm = crypto_alloc_aead(driver, type, mask);
1221 +       if (IS_ERR(tfm)) {
1222 +               pr_err("alg: aead: Failed to load transform for %s: %ld\n",
1223 +                      driver, PTR_ERR(tfm));
1224 +               return PTR_ERR(tfm);
1225 +       }
1226 +
1227 +       if (desc->suite.tls.enc.vecs) {
1228 +               err = test_tls(tfm, ENCRYPT, desc->suite.tls.enc.vecs,
1229 +                              desc->suite.tls.enc.count);
1230 +               if (err)
1231 +                       goto out;
1232 +       }
1233 +
1234 +       if (!err && desc->suite.tls.dec.vecs)
1235 +               err = test_tls(tfm, DECRYPT, desc->suite.tls.dec.vecs,
1236 +                              desc->suite.tls.dec.count);
1237 +
1238 +out:
1239 +       crypto_free_aead(tfm);
1240 +       return err;
1241 +}
1242 +
1243  static int test_cipher(struct crypto_cipher *tfm, int enc,
1244 -                      struct cipher_testvec *template, unsigned int tcount)
1245 +                      const struct cipher_testvec *template,
1246 +                      unsigned int tcount)
1247  {
1248         const char *algo = crypto_tfm_alg_driver_name(crypto_cipher_tfm(tfm));
1249         unsigned int i, j, k;
1250 @@ -1066,7 +1306,8 @@ out_nobuf:
1251  }
1252  
1253  static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
1254 -                          struct cipher_testvec *template, unsigned int tcount,
1255 +                          const struct cipher_testvec *template,
1256 +                          unsigned int tcount,
1257                            const bool diff_dst, const int align_offset)
1258  {
1259         const char *algo =
1260 @@ -1330,7 +1571,8 @@ out_nobuf:
1261  }
1262  
1263  static int test_skcipher(struct crypto_skcipher *tfm, int enc,
1264 -                        struct cipher_testvec *template, unsigned int tcount)
1265 +                        const struct cipher_testvec *template,
1266 +                        unsigned int tcount)
1267  {
1268         unsigned int alignmask;
1269         int ret;
1270 @@ -1362,8 +1604,10 @@ static int test_skcipher(struct crypto_s
1271         return 0;
1272  }
1273  
1274 -static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate,
1275 -                    struct comp_testvec *dtemplate, int ctcount, int dtcount)
1276 +static int test_comp(struct crypto_comp *tfm,
1277 +                    const struct comp_testvec *ctemplate,
1278 +                    const struct comp_testvec *dtemplate,
1279 +                    int ctcount, int dtcount)
1280  {
1281         const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm));
1282         unsigned int i;
1283 @@ -1442,7 +1686,154 @@ out:
1284         return ret;
1285  }
1286  
1287 -static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
1288 +static int test_acomp(struct crypto_acomp *tfm,
1289 +                     const struct comp_testvec *ctemplate,
1290 +                     const struct comp_testvec *dtemplate,
1291 +                     int ctcount, int dtcount)
1292 +{
1293 +       const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
1294 +       unsigned int i;
1295 +       char *output;
1296 +       int ret;
1297 +       struct scatterlist src, dst;
1298 +       struct acomp_req *req;
1299 +       struct tcrypt_result result;
1300 +
1301 +       output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
1302 +       if (!output)
1303 +               return -ENOMEM;
1304 +
1305 +       for (i = 0; i < ctcount; i++) {
1306 +               unsigned int dlen = COMP_BUF_SIZE;
1307 +               int ilen = ctemplate[i].inlen;
1308 +               void *input_vec;
1309 +
1310 +               input_vec = kmemdup(ctemplate[i].input, ilen, GFP_KERNEL);
1311 +               if (!input_vec) {
1312 +                       ret = -ENOMEM;
1313 +                       goto out;
1314 +               }
1315 +
1316 +               memset(output, 0, dlen);
1317 +               init_completion(&result.completion);
1318 +               sg_init_one(&src, input_vec, ilen);
1319 +               sg_init_one(&dst, output, dlen);
1320 +
1321 +               req = acomp_request_alloc(tfm);
1322 +               if (!req) {
1323 +                       pr_err("alg: acomp: request alloc failed for %s\n",
1324 +                              algo);
1325 +                       kfree(input_vec);
1326 +                       ret = -ENOMEM;
1327 +                       goto out;
1328 +               }
1329 +
1330 +               acomp_request_set_params(req, &src, &dst, ilen, dlen);
1331 +               acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1332 +                                          tcrypt_complete, &result);
1333 +
1334 +               ret = wait_async_op(&result, crypto_acomp_compress(req));
1335 +               if (ret) {
1336 +                       pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
1337 +                              i + 1, algo, -ret);
1338 +                       kfree(input_vec);
1339 +                       acomp_request_free(req);
1340 +                       goto out;
1341 +               }
1342 +
1343 +               if (req->dlen != ctemplate[i].outlen) {
1344 +                       pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
1345 +                              i + 1, algo, req->dlen);
1346 +                       ret = -EINVAL;
1347 +                       kfree(input_vec);
1348 +                       acomp_request_free(req);
1349 +                       goto out;
1350 +               }
1351 +
1352 +               if (memcmp(output, ctemplate[i].output, req->dlen)) {
1353 +                       pr_err("alg: acomp: Compression test %d failed for %s\n",
1354 +                              i + 1, algo);
1355 +                       hexdump(output, req->dlen);
1356 +                       ret = -EINVAL;
1357 +                       kfree(input_vec);
1358 +                       acomp_request_free(req);
1359 +                       goto out;
1360 +               }
1361 +
1362 +               kfree(input_vec);
1363 +               acomp_request_free(req);
1364 +       }
1365 +
1366 +       for (i = 0; i < dtcount; i++) {
1367 +               unsigned int dlen = COMP_BUF_SIZE;
1368 +               int ilen = dtemplate[i].inlen;
1369 +               void *input_vec;
1370 +
1371 +               input_vec = kmemdup(dtemplate[i].input, ilen, GFP_KERNEL);
1372 +               if (!input_vec) {
1373 +                       ret = -ENOMEM;
1374 +                       goto out;
1375 +               }
1376 +
1377 +               memset(output, 0, dlen);
1378 +               init_completion(&result.completion);
1379 +               sg_init_one(&src, input_vec, ilen);
1380 +               sg_init_one(&dst, output, dlen);
1381 +
1382 +               req = acomp_request_alloc(tfm);
1383 +               if (!req) {
1384 +                       pr_err("alg: acomp: request alloc failed for %s\n",
1385 +                              algo);
1386 +                       kfree(input_vec);
1387 +                       ret = -ENOMEM;
1388 +                       goto out;
1389 +               }
1390 +
1391 +               acomp_request_set_params(req, &src, &dst, ilen, dlen);
1392 +               acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1393 +                                          tcrypt_complete, &result);
1394 +
1395 +               ret = wait_async_op(&result, crypto_acomp_decompress(req));
1396 +               if (ret) {
1397 +                       pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
1398 +                              i + 1, algo, -ret);
1399 +                       kfree(input_vec);
1400 +                       acomp_request_free(req);
1401 +                       goto out;
1402 +               }
1403 +
1404 +               if (req->dlen != dtemplate[i].outlen) {
1405 +                       pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n",
1406 +                              i + 1, algo, req->dlen);
1407 +                       ret = -EINVAL;
1408 +                       kfree(input_vec);
1409 +                       acomp_request_free(req);
1410 +                       goto out;
1411 +               }
1412 +
1413 +               if (memcmp(output, dtemplate[i].output, req->dlen)) {
1414 +                       pr_err("alg: acomp: Decompression test %d failed for %s\n",
1415 +                              i + 1, algo);
1416 +                       hexdump(output, req->dlen);
1417 +                       ret = -EINVAL;
1418 +                       kfree(input_vec);
1419 +                       acomp_request_free(req);
1420 +                       goto out;
1421 +               }
1422 +
1423 +               kfree(input_vec);
1424 +               acomp_request_free(req);
1425 +       }
1426 +
1427 +       ret = 0;
1428 +
1429 +out:
1430 +       kfree(output);
1431 +       return ret;
1432 +}
1433 +
1434 +static int test_cprng(struct crypto_rng *tfm,
1435 +                     const struct cprng_testvec *template,
1436                       unsigned int tcount)
1437  {
1438         const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
1439 @@ -1509,7 +1900,7 @@ static int alg_test_aead(const struct al
1440         struct crypto_aead *tfm;
1441         int err = 0;
1442  
1443 -       tfm = crypto_alloc_aead(driver, type | CRYPTO_ALG_INTERNAL, mask);
1444 +       tfm = crypto_alloc_aead(driver, type, mask);
1445         if (IS_ERR(tfm)) {
1446                 printk(KERN_ERR "alg: aead: Failed to load transform for %s: "
1447                        "%ld\n", driver, PTR_ERR(tfm));
1448 @@ -1538,7 +1929,7 @@ static int alg_test_cipher(const struct
1449         struct crypto_cipher *tfm;
1450         int err = 0;
1451  
1452 -       tfm = crypto_alloc_cipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1453 +       tfm = crypto_alloc_cipher(driver, type, mask);
1454         if (IS_ERR(tfm)) {
1455                 printk(KERN_ERR "alg: cipher: Failed to load transform for "
1456                        "%s: %ld\n", driver, PTR_ERR(tfm));
1457 @@ -1567,7 +1958,7 @@ static int alg_test_skcipher(const struc
1458         struct crypto_skcipher *tfm;
1459         int err = 0;
1460  
1461 -       tfm = crypto_alloc_skcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1462 +       tfm = crypto_alloc_skcipher(driver, type, mask);
1463         if (IS_ERR(tfm)) {
1464                 printk(KERN_ERR "alg: skcipher: Failed to load transform for "
1465                        "%s: %ld\n", driver, PTR_ERR(tfm));
1466 @@ -1593,22 +1984,38 @@ out:
1467  static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
1468                          u32 type, u32 mask)
1469  {
1470 -       struct crypto_comp *tfm;
1471 +       struct crypto_comp *comp;
1472 +       struct crypto_acomp *acomp;
1473         int err;
1474 +       u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
1475  
1476 -       tfm = crypto_alloc_comp(driver, type, mask);
1477 -       if (IS_ERR(tfm)) {
1478 -               printk(KERN_ERR "alg: comp: Failed to load transform for %s: "
1479 -                      "%ld\n", driver, PTR_ERR(tfm));
1480 -               return PTR_ERR(tfm);
1481 -       }
1482 +       if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) {
1483 +               acomp = crypto_alloc_acomp(driver, type, mask);
1484 +               if (IS_ERR(acomp)) {
1485 +                       pr_err("alg: acomp: Failed to load transform for %s: %ld\n",
1486 +                              driver, PTR_ERR(acomp));
1487 +                       return PTR_ERR(acomp);
1488 +               }
1489 +               err = test_acomp(acomp, desc->suite.comp.comp.vecs,
1490 +                                desc->suite.comp.decomp.vecs,
1491 +                                desc->suite.comp.comp.count,
1492 +                                desc->suite.comp.decomp.count);
1493 +               crypto_free_acomp(acomp);
1494 +       } else {
1495 +               comp = crypto_alloc_comp(driver, type, mask);
1496 +               if (IS_ERR(comp)) {
1497 +                       pr_err("alg: comp: Failed to load transform for %s: %ld\n",
1498 +                              driver, PTR_ERR(comp));
1499 +                       return PTR_ERR(comp);
1500 +               }
1501  
1502 -       err = test_comp(tfm, desc->suite.comp.comp.vecs,
1503 -                       desc->suite.comp.decomp.vecs,
1504 -                       desc->suite.comp.comp.count,
1505 -                       desc->suite.comp.decomp.count);
1506 +               err = test_comp(comp, desc->suite.comp.comp.vecs,
1507 +                               desc->suite.comp.decomp.vecs,
1508 +                               desc->suite.comp.comp.count,
1509 +                               desc->suite.comp.decomp.count);
1510  
1511 -       crypto_free_comp(tfm);
1512 +               crypto_free_comp(comp);
1513 +       }
1514         return err;
1515  }
1516  
1517 @@ -1618,7 +2025,7 @@ static int alg_test_hash(const struct al
1518         struct crypto_ahash *tfm;
1519         int err;
1520  
1521 -       tfm = crypto_alloc_ahash(driver, type | CRYPTO_ALG_INTERNAL, mask);
1522 +       tfm = crypto_alloc_ahash(driver, type, mask);
1523         if (IS_ERR(tfm)) {
1524                 printk(KERN_ERR "alg: hash: Failed to load transform for %s: "
1525                        "%ld\n", driver, PTR_ERR(tfm));
1526 @@ -1646,7 +2053,7 @@ static int alg_test_crc32c(const struct
1527         if (err)
1528                 goto out;
1529  
1530 -       tfm = crypto_alloc_shash(driver, type | CRYPTO_ALG_INTERNAL, mask);
1531 +       tfm = crypto_alloc_shash(driver, type, mask);
1532         if (IS_ERR(tfm)) {
1533                 printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
1534                        "%ld\n", driver, PTR_ERR(tfm));
1535 @@ -1688,7 +2095,7 @@ static int alg_test_cprng(const struct a
1536         struct crypto_rng *rng;
1537         int err;
1538  
1539 -       rng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
1540 +       rng = crypto_alloc_rng(driver, type, mask);
1541         if (IS_ERR(rng)) {
1542                 printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
1543                        "%ld\n", driver, PTR_ERR(rng));
1544 @@ -1703,7 +2110,7 @@ static int alg_test_cprng(const struct a
1545  }
1546  
1547  
1548 -static int drbg_cavs_test(struct drbg_testvec *test, int pr,
1549 +static int drbg_cavs_test(const struct drbg_testvec *test, int pr,
1550                           const char *driver, u32 type, u32 mask)
1551  {
1552         int ret = -EAGAIN;
1553 @@ -1715,7 +2122,7 @@ static int drbg_cavs_test(struct drbg_te
1554         if (!buf)
1555                 return -ENOMEM;
1556  
1557 -       drng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
1558 +       drng = crypto_alloc_rng(driver, type, mask);
1559         if (IS_ERR(drng)) {
1560                 printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
1561                        "%s\n", driver);
1562 @@ -1777,7 +2184,7 @@ static int alg_test_drbg(const struct al
1563         int err = 0;
1564         int pr = 0;
1565         int i = 0;
1566 -       struct drbg_testvec *template = desc->suite.drbg.vecs;
1567 +       const struct drbg_testvec *template = desc->suite.drbg.vecs;
1568         unsigned int tcount = desc->suite.drbg.count;
1569  
1570         if (0 == memcmp(driver, "drbg_pr_", 8))
1571 @@ -1796,7 +2203,7 @@ static int alg_test_drbg(const struct al
1572  
1573  }
1574  
1575 -static int do_test_kpp(struct crypto_kpp *tfm, struct kpp_testvec *vec,
1576 +static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
1577                        const char *alg)
1578  {
1579         struct kpp_request *req;
1580 @@ -1888,7 +2295,7 @@ free_req:
1581  }
1582  
1583  static int test_kpp(struct crypto_kpp *tfm, const char *alg,
1584 -                   struct kpp_testvec *vecs, unsigned int tcount)
1585 +                   const struct kpp_testvec *vecs, unsigned int tcount)
1586  {
1587         int ret, i;
1588  
1589 @@ -1909,7 +2316,7 @@ static int alg_test_kpp(const struct alg
1590         struct crypto_kpp *tfm;
1591         int err = 0;
1592  
1593 -       tfm = crypto_alloc_kpp(driver, type | CRYPTO_ALG_INTERNAL, mask);
1594 +       tfm = crypto_alloc_kpp(driver, type, mask);
1595         if (IS_ERR(tfm)) {
1596                 pr_err("alg: kpp: Failed to load tfm for %s: %ld\n",
1597                        driver, PTR_ERR(tfm));
1598 @@ -1924,7 +2331,7 @@ static int alg_test_kpp(const struct alg
1599  }
1600  
1601  static int test_akcipher_one(struct crypto_akcipher *tfm,
1602 -                            struct akcipher_testvec *vecs)
1603 +                            const struct akcipher_testvec *vecs)
1604  {
1605         char *xbuf[XBUFSIZE];
1606         struct akcipher_request *req;
1607 @@ -2044,7 +2451,8 @@ free_xbuf:
1608  }
1609  
1610  static int test_akcipher(struct crypto_akcipher *tfm, const char *alg,
1611 -                        struct akcipher_testvec *vecs, unsigned int tcount)
1612 +                        const struct akcipher_testvec *vecs,
1613 +                        unsigned int tcount)
1614  {
1615         const char *algo =
1616                 crypto_tfm_alg_driver_name(crypto_akcipher_tfm(tfm));
1617 @@ -2068,7 +2476,7 @@ static int alg_test_akcipher(const struc
1618         struct crypto_akcipher *tfm;
1619         int err = 0;
1620  
1621 -       tfm = crypto_alloc_akcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1622 +       tfm = crypto_alloc_akcipher(driver, type, mask);
1623         if (IS_ERR(tfm)) {
1624                 pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n",
1625                        driver, PTR_ERR(tfm));
1626 @@ -2088,112 +2496,23 @@ static int alg_test_null(const struct al
1627         return 0;
1628  }
1629  
1630 +#define __VECS(tv)     { .vecs = tv, .count = ARRAY_SIZE(tv) }
1631 +
1632  /* Please keep this list sorted by algorithm name. */
1633  static const struct alg_test_desc alg_test_descs[] = {
1634         {
1635 -               .alg = "__cbc-cast5-avx",
1636 -               .test = alg_test_null,
1637 -       }, {
1638 -               .alg = "__cbc-cast6-avx",
1639 -               .test = alg_test_null,
1640 -       }, {
1641 -               .alg = "__cbc-serpent-avx",
1642 -               .test = alg_test_null,
1643 -       }, {
1644 -               .alg = "__cbc-serpent-avx2",
1645 -               .test = alg_test_null,
1646 -       }, {
1647 -               .alg = "__cbc-serpent-sse2",
1648 -               .test = alg_test_null,
1649 -       }, {
1650 -               .alg = "__cbc-twofish-avx",
1651 -               .test = alg_test_null,
1652 -       }, {
1653 -               .alg = "__driver-cbc-aes-aesni",
1654 -               .test = alg_test_null,
1655 -               .fips_allowed = 1,
1656 -       }, {
1657 -               .alg = "__driver-cbc-camellia-aesni",
1658 -               .test = alg_test_null,
1659 -       }, {
1660 -               .alg = "__driver-cbc-camellia-aesni-avx2",
1661 -               .test = alg_test_null,
1662 -       }, {
1663 -               .alg = "__driver-cbc-cast5-avx",
1664 -               .test = alg_test_null,
1665 -       }, {
1666 -               .alg = "__driver-cbc-cast6-avx",
1667 -               .test = alg_test_null,
1668 -       }, {
1669 -               .alg = "__driver-cbc-serpent-avx",
1670 -               .test = alg_test_null,
1671 -       }, {
1672 -               .alg = "__driver-cbc-serpent-avx2",
1673 -               .test = alg_test_null,
1674 -       }, {
1675 -               .alg = "__driver-cbc-serpent-sse2",
1676 -               .test = alg_test_null,
1677 -       }, {
1678 -               .alg = "__driver-cbc-twofish-avx",
1679 -               .test = alg_test_null,
1680 -       }, {
1681 -               .alg = "__driver-ecb-aes-aesni",
1682 -               .test = alg_test_null,
1683 -               .fips_allowed = 1,
1684 -       }, {
1685 -               .alg = "__driver-ecb-camellia-aesni",
1686 -               .test = alg_test_null,
1687 -       }, {
1688 -               .alg = "__driver-ecb-camellia-aesni-avx2",
1689 -               .test = alg_test_null,
1690 -       }, {
1691 -               .alg = "__driver-ecb-cast5-avx",
1692 -               .test = alg_test_null,
1693 -       }, {
1694 -               .alg = "__driver-ecb-cast6-avx",
1695 -               .test = alg_test_null,
1696 -       }, {
1697 -               .alg = "__driver-ecb-serpent-avx",
1698 -               .test = alg_test_null,
1699 -       }, {
1700 -               .alg = "__driver-ecb-serpent-avx2",
1701 -               .test = alg_test_null,
1702 -       }, {
1703 -               .alg = "__driver-ecb-serpent-sse2",
1704 -               .test = alg_test_null,
1705 -       }, {
1706 -               .alg = "__driver-ecb-twofish-avx",
1707 -               .test = alg_test_null,
1708 -       }, {
1709 -               .alg = "__driver-gcm-aes-aesni",
1710 -               .test = alg_test_null,
1711 -               .fips_allowed = 1,
1712 -       }, {
1713 -               .alg = "__ghash-pclmulqdqni",
1714 -               .test = alg_test_null,
1715 -               .fips_allowed = 1,
1716 -       }, {
1717                 .alg = "ansi_cprng",
1718                 .test = alg_test_cprng,
1719                 .suite = {
1720 -                       .cprng = {
1721 -                               .vecs = ansi_cprng_aes_tv_template,
1722 -                               .count = ANSI_CPRNG_AES_TEST_VECTORS
1723 -                       }
1724 +                       .cprng = __VECS(ansi_cprng_aes_tv_template)
1725                 }
1726         }, {
1727                 .alg = "authenc(hmac(md5),ecb(cipher_null))",
1728                 .test = alg_test_aead,
1729                 .suite = {
1730                         .aead = {
1731 -                               .enc = {
1732 -                                       .vecs = hmac_md5_ecb_cipher_null_enc_tv_template,
1733 -                                       .count = HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS
1734 -                               },
1735 -                               .dec = {
1736 -                                       .vecs = hmac_md5_ecb_cipher_null_dec_tv_template,
1737 -                                       .count = HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS
1738 -                               }
1739 +                               .enc = __VECS(hmac_md5_ecb_cipher_null_enc_tv_template),
1740 +                               .dec = __VECS(hmac_md5_ecb_cipher_null_dec_tv_template)
1741                         }
1742                 }
1743         }, {
1744 @@ -2201,12 +2520,7 @@ static const struct alg_test_desc alg_te
1745                 .test = alg_test_aead,
1746                 .suite = {
1747                         .aead = {
1748 -                               .enc = {
1749 -                                       .vecs =
1750 -                                       hmac_sha1_aes_cbc_enc_tv_temp,
1751 -                                       .count =
1752 -                                       HMAC_SHA1_AES_CBC_ENC_TEST_VEC
1753 -                               }
1754 +                               .enc = __VECS(hmac_sha1_aes_cbc_enc_tv_temp)
1755                         }
1756                 }
1757         }, {
1758 @@ -2214,12 +2528,7 @@ static const struct alg_test_desc alg_te
1759                 .test = alg_test_aead,
1760                 .suite = {
1761                         .aead = {
1762 -                               .enc = {
1763 -                                       .vecs =
1764 -                                       hmac_sha1_des_cbc_enc_tv_temp,
1765 -                                       .count =
1766 -                                       HMAC_SHA1_DES_CBC_ENC_TEST_VEC
1767 -                               }
1768 +                               .enc = __VECS(hmac_sha1_des_cbc_enc_tv_temp)
1769                         }
1770                 }
1771         }, {
1772 @@ -2228,12 +2537,7 @@ static const struct alg_test_desc alg_te
1773                 .fips_allowed = 1,
1774                 .suite = {
1775                         .aead = {
1776 -                               .enc = {
1777 -                                       .vecs =
1778 -                                       hmac_sha1_des3_ede_cbc_enc_tv_temp,
1779 -                                       .count =
1780 -                                       HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC
1781 -                               }
1782 +                               .enc = __VECS(hmac_sha1_des3_ede_cbc_enc_tv_temp)
1783                         }
1784                 }
1785         }, {
1786 @@ -2245,18 +2549,8 @@ static const struct alg_test_desc alg_te
1787                 .test = alg_test_aead,
1788                 .suite = {
1789                         .aead = {
1790 -                               .enc = {
1791 -                                       .vecs =
1792 -                                       hmac_sha1_ecb_cipher_null_enc_tv_temp,
1793 -                                       .count =
1794 -                                       HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC
1795 -                               },
1796 -                               .dec = {
1797 -                                       .vecs =
1798 -                                       hmac_sha1_ecb_cipher_null_dec_tv_temp,
1799 -                                       .count =
1800 -                                       HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC
1801 -                               }
1802 +                               .enc = __VECS(hmac_sha1_ecb_cipher_null_enc_tv_temp),
1803 +                               .dec = __VECS(hmac_sha1_ecb_cipher_null_dec_tv_temp)
1804                         }
1805                 }
1806         }, {
1807 @@ -2268,12 +2562,7 @@ static const struct alg_test_desc alg_te
1808                 .test = alg_test_aead,
1809                 .suite = {
1810                         .aead = {
1811 -                               .enc = {
1812 -                                       .vecs =
1813 -                                       hmac_sha224_des_cbc_enc_tv_temp,
1814 -                                       .count =
1815 -                                       HMAC_SHA224_DES_CBC_ENC_TEST_VEC
1816 -                               }
1817 +                               .enc = __VECS(hmac_sha224_des_cbc_enc_tv_temp)
1818                         }
1819                 }
1820         }, {
1821 @@ -2282,12 +2571,7 @@ static const struct alg_test_desc alg_te
1822                 .fips_allowed = 1,
1823                 .suite = {
1824                         .aead = {
1825 -                               .enc = {
1826 -                                       .vecs =
1827 -                                       hmac_sha224_des3_ede_cbc_enc_tv_temp,
1828 -                                       .count =
1829 -                                       HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC
1830 -                               }
1831 +                               .enc = __VECS(hmac_sha224_des3_ede_cbc_enc_tv_temp)
1832                         }
1833                 }
1834         }, {
1835 @@ -2296,12 +2580,7 @@ static const struct alg_test_desc alg_te
1836                 .fips_allowed = 1,
1837                 .suite = {
1838                         .aead = {
1839 -                               .enc = {
1840 -                                       .vecs =
1841 -                                       hmac_sha256_aes_cbc_enc_tv_temp,
1842 -                                       .count =
1843 -                                       HMAC_SHA256_AES_CBC_ENC_TEST_VEC
1844 -                               }
1845 +                               .enc = __VECS(hmac_sha256_aes_cbc_enc_tv_temp)
1846                         }
1847                 }
1848         }, {
1849 @@ -2309,12 +2588,7 @@ static const struct alg_test_desc alg_te
1850                 .test = alg_test_aead,
1851                 .suite = {
1852                         .aead = {
1853 -                               .enc = {
1854 -                                       .vecs =
1855 -                                       hmac_sha256_des_cbc_enc_tv_temp,
1856 -                                       .count =
1857 -                                       HMAC_SHA256_DES_CBC_ENC_TEST_VEC
1858 -                               }
1859 +                               .enc = __VECS(hmac_sha256_des_cbc_enc_tv_temp)
1860                         }
1861                 }
1862         }, {
1863 @@ -2323,12 +2597,7 @@ static const struct alg_test_desc alg_te
1864                 .fips_allowed = 1,
1865                 .suite = {
1866                         .aead = {
1867 -                               .enc = {
1868 -                                       .vecs =
1869 -                                       hmac_sha256_des3_ede_cbc_enc_tv_temp,
1870 -                                       .count =
1871 -                                       HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC
1872 -                               }
1873 +                               .enc = __VECS(hmac_sha256_des3_ede_cbc_enc_tv_temp)
1874                         }
1875                 }
1876         }, {
1877 @@ -2344,12 +2613,7 @@ static const struct alg_test_desc alg_te
1878                 .test = alg_test_aead,
1879                 .suite = {
1880                         .aead = {
1881 -                               .enc = {
1882 -                                       .vecs =
1883 -                                       hmac_sha384_des_cbc_enc_tv_temp,
1884 -                                       .count =
1885 -                                       HMAC_SHA384_DES_CBC_ENC_TEST_VEC
1886 -                               }
1887 +                               .enc = __VECS(hmac_sha384_des_cbc_enc_tv_temp)
1888                         }
1889                 }
1890         }, {
1891 @@ -2358,12 +2622,7 @@ static const struct alg_test_desc alg_te
1892                 .fips_allowed = 1,
1893                 .suite = {
1894                         .aead = {
1895 -                               .enc = {
1896 -                                       .vecs =
1897 -                                       hmac_sha384_des3_ede_cbc_enc_tv_temp,
1898 -                                       .count =
1899 -                                       HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC
1900 -                               }
1901 +                               .enc = __VECS(hmac_sha384_des3_ede_cbc_enc_tv_temp)
1902                         }
1903                 }
1904         }, {
1905 @@ -2380,12 +2639,7 @@ static const struct alg_test_desc alg_te
1906                 .test = alg_test_aead,
1907                 .suite = {
1908                         .aead = {
1909 -                               .enc = {
1910 -                                       .vecs =
1911 -                                       hmac_sha512_aes_cbc_enc_tv_temp,
1912 -                                       .count =
1913 -                                       HMAC_SHA512_AES_CBC_ENC_TEST_VEC
1914 -                               }
1915 +                               .enc = __VECS(hmac_sha512_aes_cbc_enc_tv_temp)
1916                         }
1917                 }
1918         }, {
1919 @@ -2393,12 +2647,7 @@ static const struct alg_test_desc alg_te
1920                 .test = alg_test_aead,
1921                 .suite = {
1922                         .aead = {
1923 -                               .enc = {
1924 -                                       .vecs =
1925 -                                       hmac_sha512_des_cbc_enc_tv_temp,
1926 -                                       .count =
1927 -                                       HMAC_SHA512_DES_CBC_ENC_TEST_VEC
1928 -                               }
1929 +                               .enc = __VECS(hmac_sha512_des_cbc_enc_tv_temp)
1930                         }
1931                 }
1932         }, {
1933 @@ -2407,12 +2656,7 @@ static const struct alg_test_desc alg_te
1934                 .fips_allowed = 1,
1935                 .suite = {
1936                         .aead = {
1937 -                               .enc = {
1938 -                                       .vecs =
1939 -                                       hmac_sha512_des3_ede_cbc_enc_tv_temp,
1940 -                                       .count =
1941 -                                       HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC
1942 -                               }
1943 +                               .enc = __VECS(hmac_sha512_des3_ede_cbc_enc_tv_temp)
1944                         }
1945                 }
1946         }, {
1947 @@ -2429,14 +2673,8 @@ static const struct alg_test_desc alg_te
1948                 .fips_allowed = 1,
1949                 .suite = {
1950                         .cipher = {
1951 -                               .enc = {
1952 -                                       .vecs = aes_cbc_enc_tv_template,
1953 -                                       .count = AES_CBC_ENC_TEST_VECTORS
1954 -                               },
1955 -                               .dec = {
1956 -                                       .vecs = aes_cbc_dec_tv_template,
1957 -                                       .count = AES_CBC_DEC_TEST_VECTORS
1958 -                               }
1959 +                               .enc = __VECS(aes_cbc_enc_tv_template),
1960 +                               .dec = __VECS(aes_cbc_dec_tv_template)
1961                         }
1962                 }
1963         }, {
1964 @@ -2444,14 +2682,8 @@ static const struct alg_test_desc alg_te
1965                 .test = alg_test_skcipher,
1966                 .suite = {
1967                         .cipher = {
1968 -                               .enc = {
1969 -                                       .vecs = anubis_cbc_enc_tv_template,
1970 -                                       .count = ANUBIS_CBC_ENC_TEST_VECTORS
1971 -                               },
1972 -                               .dec = {
1973 -                                       .vecs = anubis_cbc_dec_tv_template,
1974 -                                       .count = ANUBIS_CBC_DEC_TEST_VECTORS
1975 -                               }
1976 +                               .enc = __VECS(anubis_cbc_enc_tv_template),
1977 +                               .dec = __VECS(anubis_cbc_dec_tv_template)
1978                         }
1979                 }
1980         }, {
1981 @@ -2459,14 +2691,8 @@ static const struct alg_test_desc alg_te
1982                 .test = alg_test_skcipher,
1983                 .suite = {
1984                         .cipher = {
1985 -                               .enc = {
1986 -                                       .vecs = bf_cbc_enc_tv_template,
1987 -                                       .count = BF_CBC_ENC_TEST_VECTORS
1988 -                               },
1989 -                               .dec = {
1990 -                                       .vecs = bf_cbc_dec_tv_template,
1991 -                                       .count = BF_CBC_DEC_TEST_VECTORS
1992 -                               }
1993 +                               .enc = __VECS(bf_cbc_enc_tv_template),
1994 +                               .dec = __VECS(bf_cbc_dec_tv_template)
1995                         }
1996                 }
1997         }, {
1998 @@ -2474,14 +2700,8 @@ static const struct alg_test_desc alg_te
1999                 .test = alg_test_skcipher,
2000                 .suite = {
2001                         .cipher = {
2002 -                               .enc = {
2003 -                                       .vecs = camellia_cbc_enc_tv_template,
2004 -                                       .count = CAMELLIA_CBC_ENC_TEST_VECTORS
2005 -                               },
2006 -                               .dec = {
2007 -                                       .vecs = camellia_cbc_dec_tv_template,
2008 -                                       .count = CAMELLIA_CBC_DEC_TEST_VECTORS
2009 -                               }
2010 +                               .enc = __VECS(camellia_cbc_enc_tv_template),
2011 +                               .dec = __VECS(camellia_cbc_dec_tv_template)
2012                         }
2013                 }
2014         }, {
2015 @@ -2489,14 +2709,8 @@ static const struct alg_test_desc alg_te
2016                 .test = alg_test_skcipher,
2017                 .suite = {
2018                         .cipher = {
2019 -                               .enc = {
2020 -                                       .vecs = cast5_cbc_enc_tv_template,
2021 -                                       .count = CAST5_CBC_ENC_TEST_VECTORS
2022 -                               },
2023 -                               .dec = {
2024 -                                       .vecs = cast5_cbc_dec_tv_template,
2025 -                                       .count = CAST5_CBC_DEC_TEST_VECTORS
2026 -                               }
2027 +                               .enc = __VECS(cast5_cbc_enc_tv_template),
2028 +                               .dec = __VECS(cast5_cbc_dec_tv_template)
2029                         }
2030                 }
2031         }, {
2032 @@ -2504,14 +2718,8 @@ static const struct alg_test_desc alg_te
2033                 .test = alg_test_skcipher,
2034                 .suite = {
2035                         .cipher = {
2036 -                               .enc = {
2037 -                                       .vecs = cast6_cbc_enc_tv_template,
2038 -                                       .count = CAST6_CBC_ENC_TEST_VECTORS
2039 -                               },
2040 -                               .dec = {
2041 -                                       .vecs = cast6_cbc_dec_tv_template,
2042 -                                       .count = CAST6_CBC_DEC_TEST_VECTORS
2043 -                               }
2044 +                               .enc = __VECS(cast6_cbc_enc_tv_template),
2045 +                               .dec = __VECS(cast6_cbc_dec_tv_template)
2046                         }
2047                 }
2048         }, {
2049 @@ -2519,14 +2727,8 @@ static const struct alg_test_desc alg_te
2050                 .test = alg_test_skcipher,
2051                 .suite = {
2052                         .cipher = {
2053 -                               .enc = {
2054 -                                       .vecs = des_cbc_enc_tv_template,
2055 -                                       .count = DES_CBC_ENC_TEST_VECTORS
2056 -                               },
2057 -                               .dec = {
2058 -                                       .vecs = des_cbc_dec_tv_template,
2059 -                                       .count = DES_CBC_DEC_TEST_VECTORS
2060 -                               }
2061 +                               .enc = __VECS(des_cbc_enc_tv_template),
2062 +                               .dec = __VECS(des_cbc_dec_tv_template)
2063                         }
2064                 }
2065         }, {
2066 @@ -2535,14 +2737,8 @@ static const struct alg_test_desc alg_te
2067                 .fips_allowed = 1,
2068                 .suite = {
2069                         .cipher = {
2070 -                               .enc = {
2071 -                                       .vecs = des3_ede_cbc_enc_tv_template,
2072 -                                       .count = DES3_EDE_CBC_ENC_TEST_VECTORS
2073 -                               },
2074 -                               .dec = {
2075 -                                       .vecs = des3_ede_cbc_dec_tv_template,
2076 -                                       .count = DES3_EDE_CBC_DEC_TEST_VECTORS
2077 -                               }
2078 +                               .enc = __VECS(des3_ede_cbc_enc_tv_template),
2079 +                               .dec = __VECS(des3_ede_cbc_dec_tv_template)
2080                         }
2081                 }
2082         }, {
2083 @@ -2550,14 +2746,8 @@ static const struct alg_test_desc alg_te
2084                 .test = alg_test_skcipher,
2085                 .suite = {
2086                         .cipher = {
2087 -                               .enc = {
2088 -                                       .vecs = serpent_cbc_enc_tv_template,
2089 -                                       .count = SERPENT_CBC_ENC_TEST_VECTORS
2090 -                               },
2091 -                               .dec = {
2092 -                                       .vecs = serpent_cbc_dec_tv_template,
2093 -                                       .count = SERPENT_CBC_DEC_TEST_VECTORS
2094 -                               }
2095 +                               .enc = __VECS(serpent_cbc_enc_tv_template),
2096 +                               .dec = __VECS(serpent_cbc_dec_tv_template)
2097                         }
2098                 }
2099         }, {
2100 @@ -2565,30 +2755,25 @@ static const struct alg_test_desc alg_te
2101                 .test = alg_test_skcipher,
2102                 .suite = {
2103                         .cipher = {
2104 -                               .enc = {
2105 -                                       .vecs = tf_cbc_enc_tv_template,
2106 -                                       .count = TF_CBC_ENC_TEST_VECTORS
2107 -                               },
2108 -                               .dec = {
2109 -                                       .vecs = tf_cbc_dec_tv_template,
2110 -                                       .count = TF_CBC_DEC_TEST_VECTORS
2111 -                               }
2112 +                               .enc = __VECS(tf_cbc_enc_tv_template),
2113 +                               .dec = __VECS(tf_cbc_dec_tv_template)
2114                         }
2115                 }
2116         }, {
2117 +               .alg = "cbcmac(aes)",
2118 +               .fips_allowed = 1,
2119 +               .test = alg_test_hash,
2120 +               .suite = {
2121 +                       .hash = __VECS(aes_cbcmac_tv_template)
2122 +               }
2123 +       }, {
2124                 .alg = "ccm(aes)",
2125                 .test = alg_test_aead,
2126                 .fips_allowed = 1,
2127                 .suite = {
2128                         .aead = {
2129 -                               .enc = {
2130 -                                       .vecs = aes_ccm_enc_tv_template,
2131 -                                       .count = AES_CCM_ENC_TEST_VECTORS
2132 -                               },
2133 -                               .dec = {
2134 -                                       .vecs = aes_ccm_dec_tv_template,
2135 -                                       .count = AES_CCM_DEC_TEST_VECTORS
2136 -                               }
2137 +                               .enc = __VECS(aes_ccm_enc_tv_template),
2138 +                               .dec = __VECS(aes_ccm_dec_tv_template)
2139                         }
2140                 }
2141         }, {
2142 @@ -2596,14 +2781,8 @@ static const struct alg_test_desc alg_te
2143                 .test = alg_test_skcipher,
2144                 .suite = {
2145                         .cipher = {
2146 -                               .enc = {
2147 -                                       .vecs = chacha20_enc_tv_template,
2148 -                                       .count = CHACHA20_ENC_TEST_VECTORS
2149 -                               },
2150 -                               .dec = {
2151 -                                       .vecs = chacha20_enc_tv_template,
2152 -                                       .count = CHACHA20_ENC_TEST_VECTORS
2153 -                               },
2154 +                               .enc = __VECS(chacha20_enc_tv_template),
2155 +                               .dec = __VECS(chacha20_enc_tv_template),
2156                         }
2157                 }
2158         }, {
2159 @@ -2611,20 +2790,14 @@ static const struct alg_test_desc alg_te
2160                 .fips_allowed = 1,
2161                 .test = alg_test_hash,
2162                 .suite = {
2163 -                       .hash = {
2164 -                               .vecs = aes_cmac128_tv_template,
2165 -                               .count = CMAC_AES_TEST_VECTORS
2166 -                       }
2167 +                       .hash = __VECS(aes_cmac128_tv_template)
2168                 }
2169         }, {
2170                 .alg = "cmac(des3_ede)",
2171                 .fips_allowed = 1,
2172                 .test = alg_test_hash,
2173                 .suite = {
2174 -                       .hash = {
2175 -                               .vecs = des3_ede_cmac64_tv_template,
2176 -                               .count = CMAC_DES3_EDE_TEST_VECTORS
2177 -                       }
2178 +                       .hash = __VECS(des3_ede_cmac64_tv_template)
2179                 }
2180         }, {
2181                 .alg = "compress_null",
2182 @@ -2633,94 +2806,30 @@ static const struct alg_test_desc alg_te
2183                 .alg = "crc32",
2184                 .test = alg_test_hash,
2185                 .suite = {
2186 -                       .hash = {
2187 -                               .vecs = crc32_tv_template,
2188 -                               .count = CRC32_TEST_VECTORS
2189 -                       }
2190 +                       .hash = __VECS(crc32_tv_template)
2191                 }
2192         }, {
2193                 .alg = "crc32c",
2194                 .test = alg_test_crc32c,
2195                 .fips_allowed = 1,
2196                 .suite = {
2197 -                       .hash = {
2198 -                               .vecs = crc32c_tv_template,
2199 -                               .count = CRC32C_TEST_VECTORS
2200 -                       }
2201 +                       .hash = __VECS(crc32c_tv_template)
2202                 }
2203         }, {
2204                 .alg = "crct10dif",
2205                 .test = alg_test_hash,
2206                 .fips_allowed = 1,
2207                 .suite = {
2208 -                       .hash = {
2209 -                               .vecs = crct10dif_tv_template,
2210 -                               .count = CRCT10DIF_TEST_VECTORS
2211 -                       }
2212 +                       .hash = __VECS(crct10dif_tv_template)
2213                 }
2214         }, {
2215 -               .alg = "cryptd(__driver-cbc-aes-aesni)",
2216 -               .test = alg_test_null,
2217 -               .fips_allowed = 1,
2218 -       }, {
2219 -               .alg = "cryptd(__driver-cbc-camellia-aesni)",
2220 -               .test = alg_test_null,
2221 -       }, {
2222 -               .alg = "cryptd(__driver-cbc-camellia-aesni-avx2)",
2223 -               .test = alg_test_null,
2224 -       }, {
2225 -               .alg = "cryptd(__driver-cbc-serpent-avx2)",
2226 -               .test = alg_test_null,
2227 -       }, {
2228 -               .alg = "cryptd(__driver-ecb-aes-aesni)",
2229 -               .test = alg_test_null,
2230 -               .fips_allowed = 1,
2231 -       }, {
2232 -               .alg = "cryptd(__driver-ecb-camellia-aesni)",
2233 -               .test = alg_test_null,
2234 -       }, {
2235 -               .alg = "cryptd(__driver-ecb-camellia-aesni-avx2)",
2236 -               .test = alg_test_null,
2237 -       }, {
2238 -               .alg = "cryptd(__driver-ecb-cast5-avx)",
2239 -               .test = alg_test_null,
2240 -       }, {
2241 -               .alg = "cryptd(__driver-ecb-cast6-avx)",
2242 -               .test = alg_test_null,
2243 -       }, {
2244 -               .alg = "cryptd(__driver-ecb-serpent-avx)",
2245 -               .test = alg_test_null,
2246 -       }, {
2247 -               .alg = "cryptd(__driver-ecb-serpent-avx2)",
2248 -               .test = alg_test_null,
2249 -       }, {
2250 -               .alg = "cryptd(__driver-ecb-serpent-sse2)",
2251 -               .test = alg_test_null,
2252 -       }, {
2253 -               .alg = "cryptd(__driver-ecb-twofish-avx)",
2254 -               .test = alg_test_null,
2255 -       }, {
2256 -               .alg = "cryptd(__driver-gcm-aes-aesni)",
2257 -               .test = alg_test_null,
2258 -               .fips_allowed = 1,
2259 -       }, {
2260 -               .alg = "cryptd(__ghash-pclmulqdqni)",
2261 -               .test = alg_test_null,
2262 -               .fips_allowed = 1,
2263 -       }, {
2264                 .alg = "ctr(aes)",
2265                 .test = alg_test_skcipher,
2266                 .fips_allowed = 1,
2267                 .suite = {
2268                         .cipher = {
2269 -                               .enc = {
2270 -                                       .vecs = aes_ctr_enc_tv_template,
2271 -                                       .count = AES_CTR_ENC_TEST_VECTORS
2272 -                               },
2273 -                               .dec = {
2274 -                                       .vecs = aes_ctr_dec_tv_template,
2275 -                                       .count = AES_CTR_DEC_TEST_VECTORS
2276 -                               }
2277 +                               .enc = __VECS(aes_ctr_enc_tv_template),
2278 +                               .dec = __VECS(aes_ctr_dec_tv_template)
2279                         }
2280                 }
2281         }, {
2282 @@ -2728,14 +2837,8 @@ static const struct alg_test_desc alg_te
2283                 .test = alg_test_skcipher,
2284                 .suite = {
2285                         .cipher = {
2286 -                               .enc = {
2287 -                                       .vecs = bf_ctr_enc_tv_template,
2288 -                                       .count = BF_CTR_ENC_TEST_VECTORS
2289 -                               },
2290 -                               .dec = {
2291 -                                       .vecs = bf_ctr_dec_tv_template,
2292 -                                       .count = BF_CTR_DEC_TEST_VECTORS
2293 -                               }
2294 +                               .enc = __VECS(bf_ctr_enc_tv_template),
2295 +                               .dec = __VECS(bf_ctr_dec_tv_template)
2296                         }
2297                 }
2298         }, {
2299 @@ -2743,14 +2846,8 @@ static const struct alg_test_desc alg_te
2300                 .test = alg_test_skcipher,
2301                 .suite = {
2302                         .cipher = {
2303 -                               .enc = {
2304 -                                       .vecs = camellia_ctr_enc_tv_template,
2305 -                                       .count = CAMELLIA_CTR_ENC_TEST_VECTORS
2306 -                               },
2307 -                               .dec = {
2308 -                                       .vecs = camellia_ctr_dec_tv_template,
2309 -                                       .count = CAMELLIA_CTR_DEC_TEST_VECTORS
2310 -                               }
2311 +                               .enc = __VECS(camellia_ctr_enc_tv_template),
2312 +                               .dec = __VECS(camellia_ctr_dec_tv_template)
2313                         }
2314                 }
2315         }, {
2316 @@ -2758,14 +2855,8 @@ static const struct alg_test_desc alg_te
2317                 .test = alg_test_skcipher,
2318                 .suite = {
2319                         .cipher = {
2320 -                               .enc = {
2321 -                                       .vecs = cast5_ctr_enc_tv_template,
2322 -                                       .count = CAST5_CTR_ENC_TEST_VECTORS
2323 -                               },
2324 -                               .dec = {
2325 -                                       .vecs = cast5_ctr_dec_tv_template,
2326 -                                       .count = CAST5_CTR_DEC_TEST_VECTORS
2327 -                               }
2328 +                               .enc = __VECS(cast5_ctr_enc_tv_template),
2329 +                               .dec = __VECS(cast5_ctr_dec_tv_template)
2330                         }
2331                 }
2332         }, {
2333 @@ -2773,14 +2864,8 @@ static const struct alg_test_desc alg_te
2334                 .test = alg_test_skcipher,
2335                 .suite = {
2336                         .cipher = {
2337 -                               .enc = {
2338 -                                       .vecs = cast6_ctr_enc_tv_template,
2339 -                                       .count = CAST6_CTR_ENC_TEST_VECTORS
2340 -                               },
2341 -                               .dec = {
2342 -                                       .vecs = cast6_ctr_dec_tv_template,
2343 -                                       .count = CAST6_CTR_DEC_TEST_VECTORS
2344 -                               }
2345 +                               .enc = __VECS(cast6_ctr_enc_tv_template),
2346 +                               .dec = __VECS(cast6_ctr_dec_tv_template)
2347                         }
2348                 }
2349         }, {
2350 @@ -2788,29 +2873,18 @@ static const struct alg_test_desc alg_te
2351                 .test = alg_test_skcipher,
2352                 .suite = {
2353                         .cipher = {
2354 -                               .enc = {
2355 -                                       .vecs = des_ctr_enc_tv_template,
2356 -                                       .count = DES_CTR_ENC_TEST_VECTORS
2357 -                               },
2358 -                               .dec = {
2359 -                                       .vecs = des_ctr_dec_tv_template,
2360 -                                       .count = DES_CTR_DEC_TEST_VECTORS
2361 -                               }
2362 +                               .enc = __VECS(des_ctr_enc_tv_template),
2363 +                               .dec = __VECS(des_ctr_dec_tv_template)
2364                         }
2365                 }
2366         }, {
2367                 .alg = "ctr(des3_ede)",
2368                 .test = alg_test_skcipher,
2369 +               .fips_allowed = 1,
2370                 .suite = {
2371                         .cipher = {
2372 -                               .enc = {
2373 -                                       .vecs = des3_ede_ctr_enc_tv_template,
2374 -                                       .count = DES3_EDE_CTR_ENC_TEST_VECTORS
2375 -                               },
2376 -                               .dec = {
2377 -                                       .vecs = des3_ede_ctr_dec_tv_template,
2378 -                                       .count = DES3_EDE_CTR_DEC_TEST_VECTORS
2379 -                               }
2380 +                               .enc = __VECS(des3_ede_ctr_enc_tv_template),
2381 +                               .dec = __VECS(des3_ede_ctr_dec_tv_template)
2382                         }
2383                 }
2384         }, {
2385 @@ -2818,14 +2892,8 @@ static const struct alg_test_desc alg_te
2386                 .test = alg_test_skcipher,
2387                 .suite = {
2388                         .cipher = {
2389 -                               .enc = {
2390 -                                       .vecs = serpent_ctr_enc_tv_template,
2391 -                                       .count = SERPENT_CTR_ENC_TEST_VECTORS
2392 -                               },
2393 -                               .dec = {
2394 -                                       .vecs = serpent_ctr_dec_tv_template,
2395 -                                       .count = SERPENT_CTR_DEC_TEST_VECTORS
2396 -                               }
2397 +                               .enc = __VECS(serpent_ctr_enc_tv_template),
2398 +                               .dec = __VECS(serpent_ctr_dec_tv_template)
2399                         }
2400                 }
2401         }, {
2402 @@ -2833,14 +2901,8 @@ static const struct alg_test_desc alg_te
2403                 .test = alg_test_skcipher,
2404                 .suite = {
2405                         .cipher = {
2406 -                               .enc = {
2407 -                                       .vecs = tf_ctr_enc_tv_template,
2408 -                                       .count = TF_CTR_ENC_TEST_VECTORS
2409 -                               },
2410 -                               .dec = {
2411 -                                       .vecs = tf_ctr_dec_tv_template,
2412 -                                       .count = TF_CTR_DEC_TEST_VECTORS
2413 -                               }
2414 +                               .enc = __VECS(tf_ctr_enc_tv_template),
2415 +                               .dec = __VECS(tf_ctr_dec_tv_template)
2416                         }
2417                 }
2418         }, {
2419 @@ -2848,14 +2910,8 @@ static const struct alg_test_desc alg_te
2420                 .test = alg_test_skcipher,
2421                 .suite = {
2422                         .cipher = {
2423 -                               .enc = {
2424 -                                       .vecs = cts_mode_enc_tv_template,
2425 -                                       .count = CTS_MODE_ENC_TEST_VECTORS
2426 -                               },
2427 -                               .dec = {
2428 -                                       .vecs = cts_mode_dec_tv_template,
2429 -                                       .count = CTS_MODE_DEC_TEST_VECTORS
2430 -                               }
2431 +                               .enc = __VECS(cts_mode_enc_tv_template),
2432 +                               .dec = __VECS(cts_mode_dec_tv_template)
2433                         }
2434                 }
2435         }, {
2436 @@ -2864,14 +2920,8 @@ static const struct alg_test_desc alg_te
2437                 .fips_allowed = 1,
2438                 .suite = {
2439                         .comp = {
2440 -                               .comp = {
2441 -                                       .vecs = deflate_comp_tv_template,
2442 -                                       .count = DEFLATE_COMP_TEST_VECTORS
2443 -                               },
2444 -                               .decomp = {
2445 -                                       .vecs = deflate_decomp_tv_template,
2446 -                                       .count = DEFLATE_DECOMP_TEST_VECTORS
2447 -                               }
2448 +                               .comp = __VECS(deflate_comp_tv_template),
2449 +                               .decomp = __VECS(deflate_decomp_tv_template)
2450                         }
2451                 }
2452         }, {
2453 @@ -2879,10 +2929,7 @@ static const struct alg_test_desc alg_te
2454                 .test = alg_test_kpp,
2455                 .fips_allowed = 1,
2456                 .suite = {
2457 -                       .kpp = {
2458 -                               .vecs = dh_tv_template,
2459 -                               .count = DH_TEST_VECTORS
2460 -                       }
2461 +                       .kpp = __VECS(dh_tv_template)
2462                 }
2463         }, {
2464                 .alg = "digest_null",
2465 @@ -2892,30 +2939,21 @@ static const struct alg_test_desc alg_te
2466                 .test = alg_test_drbg,
2467                 .fips_allowed = 1,
2468                 .suite = {
2469 -                       .drbg = {
2470 -                               .vecs = drbg_nopr_ctr_aes128_tv_template,
2471 -                               .count = ARRAY_SIZE(drbg_nopr_ctr_aes128_tv_template)
2472 -                       }
2473 +                       .drbg = __VECS(drbg_nopr_ctr_aes128_tv_template)
2474                 }
2475         }, {
2476                 .alg = "drbg_nopr_ctr_aes192",
2477                 .test = alg_test_drbg,
2478                 .fips_allowed = 1,
2479                 .suite = {
2480 -                       .drbg = {
2481 -                               .vecs = drbg_nopr_ctr_aes192_tv_template,
2482 -                               .count = ARRAY_SIZE(drbg_nopr_ctr_aes192_tv_template)
2483 -                       }
2484 +                       .drbg = __VECS(drbg_nopr_ctr_aes192_tv_template)
2485                 }
2486         }, {
2487                 .alg = "drbg_nopr_ctr_aes256",
2488                 .test = alg_test_drbg,
2489                 .fips_allowed = 1,
2490                 .suite = {
2491 -                       .drbg = {
2492 -                               .vecs = drbg_nopr_ctr_aes256_tv_template,
2493 -                               .count = ARRAY_SIZE(drbg_nopr_ctr_aes256_tv_template)
2494 -                       }
2495 +                       .drbg = __VECS(drbg_nopr_ctr_aes256_tv_template)
2496                 }
2497         }, {
2498                 /*
2499 @@ -2930,11 +2968,7 @@ static const struct alg_test_desc alg_te
2500                 .test = alg_test_drbg,
2501                 .fips_allowed = 1,
2502                 .suite = {
2503 -                       .drbg = {
2504 -                               .vecs = drbg_nopr_hmac_sha256_tv_template,
2505 -                               .count =
2506 -                               ARRAY_SIZE(drbg_nopr_hmac_sha256_tv_template)
2507 -                       }
2508 +                       .drbg = __VECS(drbg_nopr_hmac_sha256_tv_template)
2509                 }
2510         }, {
2511                 /* covered by drbg_nopr_hmac_sha256 test */
2512 @@ -2954,10 +2988,7 @@ static const struct alg_test_desc alg_te
2513                 .test = alg_test_drbg,
2514                 .fips_allowed = 1,
2515                 .suite = {
2516 -                       .drbg = {
2517 -                               .vecs = drbg_nopr_sha256_tv_template,
2518 -                               .count = ARRAY_SIZE(drbg_nopr_sha256_tv_template)
2519 -                       }
2520 +                       .drbg = __VECS(drbg_nopr_sha256_tv_template)
2521                 }
2522         }, {
2523                 /* covered by drbg_nopr_sha256 test */
2524 @@ -2973,10 +3004,7 @@ static const struct alg_test_desc alg_te
2525                 .test = alg_test_drbg,
2526                 .fips_allowed = 1,
2527                 .suite = {
2528 -                       .drbg = {
2529 -                               .vecs = drbg_pr_ctr_aes128_tv_template,
2530 -                               .count = ARRAY_SIZE(drbg_pr_ctr_aes128_tv_template)
2531 -                       }
2532 +                       .drbg = __VECS(drbg_pr_ctr_aes128_tv_template)
2533                 }
2534         }, {
2535                 /* covered by drbg_pr_ctr_aes128 test */
2536 @@ -2996,10 +3024,7 @@ static const struct alg_test_desc alg_te
2537                 .test = alg_test_drbg,
2538                 .fips_allowed = 1,
2539                 .suite = {
2540 -                       .drbg = {
2541 -                               .vecs = drbg_pr_hmac_sha256_tv_template,
2542 -                               .count = ARRAY_SIZE(drbg_pr_hmac_sha256_tv_template)
2543 -                       }
2544 +                       .drbg = __VECS(drbg_pr_hmac_sha256_tv_template)
2545                 }
2546         }, {
2547                 /* covered by drbg_pr_hmac_sha256 test */
2548 @@ -3019,10 +3044,7 @@ static const struct alg_test_desc alg_te
2549                 .test = alg_test_drbg,
2550                 .fips_allowed = 1,
2551                 .suite = {
2552 -                       .drbg = {
2553 -                               .vecs = drbg_pr_sha256_tv_template,
2554 -                               .count = ARRAY_SIZE(drbg_pr_sha256_tv_template)
2555 -                       }
2556 +                       .drbg = __VECS(drbg_pr_sha256_tv_template)
2557                 }
2558         }, {
2559                 /* covered by drbg_pr_sha256 test */
2560 @@ -3034,23 +3056,13 @@ static const struct alg_test_desc alg_te
2561                 .fips_allowed = 1,
2562                 .test = alg_test_null,
2563         }, {
2564 -               .alg = "ecb(__aes-aesni)",
2565 -               .test = alg_test_null,
2566 -               .fips_allowed = 1,
2567 -       }, {
2568                 .alg = "ecb(aes)",
2569                 .test = alg_test_skcipher,
2570                 .fips_allowed = 1,
2571                 .suite = {
2572                         .cipher = {
2573 -                               .enc = {
2574 -                                       .vecs = aes_enc_tv_template,
2575 -                                       .count = AES_ENC_TEST_VECTORS
2576 -                               },
2577 -                               .dec = {
2578 -                                       .vecs = aes_dec_tv_template,
2579 -                                       .count = AES_DEC_TEST_VECTORS
2580 -                               }
2581 +                               .enc = __VECS(aes_enc_tv_template),
2582 +                               .dec = __VECS(aes_dec_tv_template)
2583                         }
2584                 }
2585         }, {
2586 @@ -3058,14 +3070,8 @@ static const struct alg_test_desc alg_te
2587                 .test = alg_test_skcipher,
2588                 .suite = {
2589                         .cipher = {
2590 -                               .enc = {
2591 -                                       .vecs = anubis_enc_tv_template,
2592 -                                       .count = ANUBIS_ENC_TEST_VECTORS
2593 -                               },
2594 -                               .dec = {
2595 -                                       .vecs = anubis_dec_tv_template,
2596 -                                       .count = ANUBIS_DEC_TEST_VECTORS
2597 -                               }
2598 +                               .enc = __VECS(anubis_enc_tv_template),
2599 +                               .dec = __VECS(anubis_dec_tv_template)
2600                         }
2601                 }
2602         }, {
2603 @@ -3073,14 +3079,8 @@ static const struct alg_test_desc alg_te
2604                 .test = alg_test_skcipher,
2605                 .suite = {
2606                         .cipher = {
2607 -                               .enc = {
2608 -                                       .vecs = arc4_enc_tv_template,
2609 -                                       .count = ARC4_ENC_TEST_VECTORS
2610 -                               },
2611 -                               .dec = {
2612 -                                       .vecs = arc4_dec_tv_template,
2613 -                                       .count = ARC4_DEC_TEST_VECTORS
2614 -                               }
2615 +                               .enc = __VECS(arc4_enc_tv_template),
2616 +                               .dec = __VECS(arc4_dec_tv_template)
2617                         }
2618                 }
2619         }, {
2620 @@ -3088,14 +3088,8 @@ static const struct alg_test_desc alg_te
2621                 .test = alg_test_skcipher,
2622                 .suite = {
2623                         .cipher = {
2624 -                               .enc = {
2625 -                                       .vecs = bf_enc_tv_template,
2626 -                                       .count = BF_ENC_TEST_VECTORS
2627 -                               },
2628 -                               .dec = {
2629 -                                       .vecs = bf_dec_tv_template,
2630 -                                       .count = BF_DEC_TEST_VECTORS
2631 -                               }
2632 +                               .enc = __VECS(bf_enc_tv_template),
2633 +                               .dec = __VECS(bf_dec_tv_template)
2634                         }
2635                 }
2636         }, {
2637 @@ -3103,14 +3097,8 @@ static const struct alg_test_desc alg_te
2638                 .test = alg_test_skcipher,
2639                 .suite = {
2640                         .cipher = {
2641 -                               .enc = {
2642 -                                       .vecs = camellia_enc_tv_template,
2643 -                                       .count = CAMELLIA_ENC_TEST_VECTORS
2644 -                               },
2645 -                               .dec = {
2646 -                                       .vecs = camellia_dec_tv_template,
2647 -                                       .count = CAMELLIA_DEC_TEST_VECTORS
2648 -                               }
2649 +                               .enc = __VECS(camellia_enc_tv_template),
2650 +                               .dec = __VECS(camellia_dec_tv_template)
2651                         }
2652                 }
2653         }, {
2654 @@ -3118,14 +3106,8 @@ static const struct alg_test_desc alg_te
2655                 .test = alg_test_skcipher,
2656                 .suite = {
2657                         .cipher = {
2658 -                               .enc = {
2659 -                                       .vecs = cast5_enc_tv_template,
2660 -                                       .count = CAST5_ENC_TEST_VECTORS
2661 -                               },
2662 -                               .dec = {
2663 -                                       .vecs = cast5_dec_tv_template,
2664 -                                       .count = CAST5_DEC_TEST_VECTORS
2665 -                               }
2666 +                               .enc = __VECS(cast5_enc_tv_template),
2667 +                               .dec = __VECS(cast5_dec_tv_template)
2668                         }
2669                 }
2670         }, {
2671 @@ -3133,14 +3115,8 @@ static const struct alg_test_desc alg_te
2672                 .test = alg_test_skcipher,
2673                 .suite = {
2674                         .cipher = {
2675 -                               .enc = {
2676 -                                       .vecs = cast6_enc_tv_template,
2677 -                                       .count = CAST6_ENC_TEST_VECTORS
2678 -                               },
2679 -                               .dec = {
2680 -                                       .vecs = cast6_dec_tv_template,
2681 -                                       .count = CAST6_DEC_TEST_VECTORS
2682 -                               }
2683 +                               .enc = __VECS(cast6_enc_tv_template),
2684 +                               .dec = __VECS(cast6_dec_tv_template)
2685                         }
2686                 }
2687         }, {
2688 @@ -3151,14 +3127,8 @@ static const struct alg_test_desc alg_te
2689                 .test = alg_test_skcipher,
2690                 .suite = {
2691                         .cipher = {
2692 -                               .enc = {
2693 -                                       .vecs = des_enc_tv_template,
2694 -                                       .count = DES_ENC_TEST_VECTORS
2695 -                               },
2696 -                               .dec = {
2697 -                                       .vecs = des_dec_tv_template,
2698 -                                       .count = DES_DEC_TEST_VECTORS
2699 -                               }
2700 +                               .enc = __VECS(des_enc_tv_template),
2701 +                               .dec = __VECS(des_dec_tv_template)
2702                         }
2703                 }
2704         }, {
2705 @@ -3167,14 +3137,8 @@ static const struct alg_test_desc alg_te
2706                 .fips_allowed = 1,
2707                 .suite = {
2708                         .cipher = {
2709 -                               .enc = {
2710 -                                       .vecs = des3_ede_enc_tv_template,
2711 -                                       .count = DES3_EDE_ENC_TEST_VECTORS
2712 -                               },
2713 -                               .dec = {
2714 -                                       .vecs = des3_ede_dec_tv_template,
2715 -                                       .count = DES3_EDE_DEC_TEST_VECTORS
2716 -                               }
2717 +                               .enc = __VECS(des3_ede_enc_tv_template),
2718 +                               .dec = __VECS(des3_ede_dec_tv_template)
2719                         }
2720                 }
2721         }, {
2722 @@ -3197,14 +3161,8 @@ static const struct alg_test_desc alg_te
2723                 .test = alg_test_skcipher,
2724                 .suite = {
2725                         .cipher = {
2726 -                               .enc = {
2727 -                                       .vecs = khazad_enc_tv_template,
2728 -                                       .count = KHAZAD_ENC_TEST_VECTORS
2729 -                               },
2730 -                               .dec = {
2731 -                                       .vecs = khazad_dec_tv_template,
2732 -                                       .count = KHAZAD_DEC_TEST_VECTORS
2733 -                               }
2734 +                               .enc = __VECS(khazad_enc_tv_template),
2735 +                               .dec = __VECS(khazad_dec_tv_template)
2736                         }
2737                 }
2738         }, {
2739 @@ -3212,14 +3170,8 @@ static const struct alg_test_desc alg_te
2740                 .test = alg_test_skcipher,
2741                 .suite = {
2742                         .cipher = {
2743 -                               .enc = {
2744 -                                       .vecs = seed_enc_tv_template,
2745 -                                       .count = SEED_ENC_TEST_VECTORS
2746 -                               },
2747 -                               .dec = {
2748 -                                       .vecs = seed_dec_tv_template,
2749 -                                       .count = SEED_DEC_TEST_VECTORS
2750 -                               }
2751 +                               .enc = __VECS(seed_enc_tv_template),
2752 +                               .dec = __VECS(seed_dec_tv_template)
2753                         }
2754                 }
2755         }, {
2756 @@ -3227,14 +3179,8 @@ static const struct alg_test_desc alg_te
2757                 .test = alg_test_skcipher,
2758                 .suite = {
2759                         .cipher = {
2760 -                               .enc = {
2761 -                                       .vecs = serpent_enc_tv_template,
2762 -                                       .count = SERPENT_ENC_TEST_VECTORS
2763 -                               },
2764 -                               .dec = {
2765 -                                       .vecs = serpent_dec_tv_template,
2766 -                                       .count = SERPENT_DEC_TEST_VECTORS
2767 -                               }
2768 +                               .enc = __VECS(serpent_enc_tv_template),
2769 +                               .dec = __VECS(serpent_dec_tv_template)
2770                         }
2771                 }
2772         }, {
2773 @@ -3242,14 +3188,8 @@ static const struct alg_test_desc alg_te
2774                 .test = alg_test_skcipher,
2775                 .suite = {
2776                         .cipher = {
2777 -                               .enc = {
2778 -                                       .vecs = tea_enc_tv_template,
2779 -                                       .count = TEA_ENC_TEST_VECTORS
2780 -                               },
2781 -                               .dec = {
2782 -                                       .vecs = tea_dec_tv_template,
2783 -                                       .count = TEA_DEC_TEST_VECTORS
2784 -                               }
2785 +                               .enc = __VECS(tea_enc_tv_template),
2786 +                               .dec = __VECS(tea_dec_tv_template)
2787                         }
2788                 }
2789         }, {
2790 @@ -3257,14 +3197,8 @@ static const struct alg_test_desc alg_te
2791                 .test = alg_test_skcipher,
2792                 .suite = {
2793                         .cipher = {
2794 -                               .enc = {
2795 -                                       .vecs = tnepres_enc_tv_template,
2796 -                                       .count = TNEPRES_ENC_TEST_VECTORS
2797 -                               },
2798 -                               .dec = {
2799 -                                       .vecs = tnepres_dec_tv_template,
2800 -                                       .count = TNEPRES_DEC_TEST_VECTORS
2801 -                               }
2802 +                               .enc = __VECS(tnepres_enc_tv_template),
2803 +                               .dec = __VECS(tnepres_dec_tv_template)
2804                         }
2805                 }
2806         }, {
2807 @@ -3272,14 +3206,8 @@ static const struct alg_test_desc alg_te
2808                 .test = alg_test_skcipher,
2809                 .suite = {
2810                         .cipher = {
2811 -                               .enc = {
2812 -                                       .vecs = tf_enc_tv_template,
2813 -                                       .count = TF_ENC_TEST_VECTORS
2814 -                               },
2815 -                               .dec = {
2816 -                                       .vecs = tf_dec_tv_template,
2817 -                                       .count = TF_DEC_TEST_VECTORS
2818 -                               }
2819 +                               .enc = __VECS(tf_enc_tv_template),
2820 +                               .dec = __VECS(tf_dec_tv_template)
2821                         }
2822                 }
2823         }, {
2824 @@ -3287,14 +3215,8 @@ static const struct alg_test_desc alg_te
2825                 .test = alg_test_skcipher,
2826                 .suite = {
2827                         .cipher = {
2828 -                               .enc = {
2829 -                                       .vecs = xeta_enc_tv_template,
2830 -                                       .count = XETA_ENC_TEST_VECTORS
2831 -                               },
2832 -                               .dec = {
2833 -                                       .vecs = xeta_dec_tv_template,
2834 -                                       .count = XETA_DEC_TEST_VECTORS
2835 -                               }
2836 +                               .enc = __VECS(xeta_enc_tv_template),
2837 +                               .dec = __VECS(xeta_dec_tv_template)
2838                         }
2839                 }
2840         }, {
2841 @@ -3302,14 +3224,8 @@ static const struct alg_test_desc alg_te
2842                 .test = alg_test_skcipher,
2843                 .suite = {
2844                         .cipher = {
2845 -                               .enc = {
2846 -                                       .vecs = xtea_enc_tv_template,
2847 -                                       .count = XTEA_ENC_TEST_VECTORS
2848 -                               },
2849 -                               .dec = {
2850 -                                       .vecs = xtea_dec_tv_template,
2851 -                                       .count = XTEA_DEC_TEST_VECTORS
2852 -                               }
2853 +                               .enc = __VECS(xtea_enc_tv_template),
2854 +                               .dec = __VECS(xtea_dec_tv_template)
2855                         }
2856                 }
2857         }, {
2858 @@ -3317,10 +3233,7 @@ static const struct alg_test_desc alg_te
2859                 .test = alg_test_kpp,
2860                 .fips_allowed = 1,
2861                 .suite = {
2862 -                       .kpp = {
2863 -                               .vecs = ecdh_tv_template,
2864 -                               .count = ECDH_TEST_VECTORS
2865 -                       }
2866 +                       .kpp = __VECS(ecdh_tv_template)
2867                 }
2868         }, {
2869                 .alg = "gcm(aes)",
2870 @@ -3328,14 +3241,8 @@ static const struct alg_test_desc alg_te
2871                 .fips_allowed = 1,
2872                 .suite = {
2873                         .aead = {
2874 -                               .enc = {
2875 -                                       .vecs = aes_gcm_enc_tv_template,
2876 -                                       .count = AES_GCM_ENC_TEST_VECTORS
2877 -                               },
2878 -                               .dec = {
2879 -                                       .vecs = aes_gcm_dec_tv_template,
2880 -                                       .count = AES_GCM_DEC_TEST_VECTORS
2881 -                               }
2882 +                               .enc = __VECS(aes_gcm_enc_tv_template),
2883 +                               .dec = __VECS(aes_gcm_dec_tv_template)
2884                         }
2885                 }
2886         }, {
2887 @@ -3343,136 +3250,94 @@ static const struct alg_test_desc alg_te
2888                 .test = alg_test_hash,
2889                 .fips_allowed = 1,
2890                 .suite = {
2891 -                       .hash = {
2892 -                               .vecs = ghash_tv_template,
2893 -                               .count = GHASH_TEST_VECTORS
2894 -                       }
2895 +                       .hash = __VECS(ghash_tv_template)
2896                 }
2897         }, {
2898                 .alg = "hmac(crc32)",
2899                 .test = alg_test_hash,
2900                 .suite = {
2901 -                       .hash = {
2902 -                               .vecs = bfin_crc_tv_template,
2903 -                               .count = BFIN_CRC_TEST_VECTORS
2904 -                       }
2905 +                       .hash = __VECS(bfin_crc_tv_template)
2906                 }
2907         }, {
2908                 .alg = "hmac(md5)",
2909                 .test = alg_test_hash,
2910                 .suite = {
2911 -                       .hash = {
2912 -                               .vecs = hmac_md5_tv_template,
2913 -                               .count = HMAC_MD5_TEST_VECTORS
2914 -                       }
2915 +                       .hash = __VECS(hmac_md5_tv_template)
2916                 }
2917         }, {
2918                 .alg = "hmac(rmd128)",
2919                 .test = alg_test_hash,
2920                 .suite = {
2921 -                       .hash = {
2922 -                               .vecs = hmac_rmd128_tv_template,
2923 -                               .count = HMAC_RMD128_TEST_VECTORS
2924 -                       }
2925 +                       .hash = __VECS(hmac_rmd128_tv_template)
2926                 }
2927         }, {
2928                 .alg = "hmac(rmd160)",
2929                 .test = alg_test_hash,
2930                 .suite = {
2931 -                       .hash = {
2932 -                               .vecs = hmac_rmd160_tv_template,
2933 -                               .count = HMAC_RMD160_TEST_VECTORS
2934 -                       }
2935 +                       .hash = __VECS(hmac_rmd160_tv_template)
2936                 }
2937         }, {
2938                 .alg = "hmac(sha1)",
2939                 .test = alg_test_hash,
2940                 .fips_allowed = 1,
2941                 .suite = {
2942 -                       .hash = {
2943 -                               .vecs = hmac_sha1_tv_template,
2944 -                               .count = HMAC_SHA1_TEST_VECTORS
2945 -                       }
2946 +                       .hash = __VECS(hmac_sha1_tv_template)
2947                 }
2948         }, {
2949                 .alg = "hmac(sha224)",
2950                 .test = alg_test_hash,
2951                 .fips_allowed = 1,
2952                 .suite = {
2953 -                       .hash = {
2954 -                               .vecs = hmac_sha224_tv_template,
2955 -                               .count = HMAC_SHA224_TEST_VECTORS
2956 -                       }
2957 +                       .hash = __VECS(hmac_sha224_tv_template)
2958                 }
2959         }, {
2960                 .alg = "hmac(sha256)",
2961                 .test = alg_test_hash,
2962                 .fips_allowed = 1,
2963                 .suite = {
2964 -                       .hash = {
2965 -                               .vecs = hmac_sha256_tv_template,
2966 -                               .count = HMAC_SHA256_TEST_VECTORS
2967 -                       }
2968 +                       .hash = __VECS(hmac_sha256_tv_template)
2969                 }
2970         }, {
2971                 .alg = "hmac(sha3-224)",
2972                 .test = alg_test_hash,
2973                 .fips_allowed = 1,
2974                 .suite = {
2975 -                       .hash = {
2976 -                               .vecs = hmac_sha3_224_tv_template,
2977 -                               .count = HMAC_SHA3_224_TEST_VECTORS
2978 -                       }
2979 +                       .hash = __VECS(hmac_sha3_224_tv_template)
2980                 }
2981         }, {
2982                 .alg = "hmac(sha3-256)",
2983                 .test = alg_test_hash,
2984                 .fips_allowed = 1,
2985                 .suite = {
2986 -                       .hash = {
2987 -                               .vecs = hmac_sha3_256_tv_template,
2988 -                               .count = HMAC_SHA3_256_TEST_VECTORS
2989 -                       }
2990 +                       .hash = __VECS(hmac_sha3_256_tv_template)
2991                 }
2992         }, {
2993                 .alg = "hmac(sha3-384)",
2994                 .test = alg_test_hash,
2995                 .fips_allowed = 1,
2996                 .suite = {
2997 -                       .hash = {
2998 -                               .vecs = hmac_sha3_384_tv_template,
2999 -                               .count = HMAC_SHA3_384_TEST_VECTORS
3000 -                       }
3001 +                       .hash = __VECS(hmac_sha3_384_tv_template)
3002                 }
3003         }, {
3004                 .alg = "hmac(sha3-512)",
3005                 .test = alg_test_hash,
3006                 .fips_allowed = 1,
3007                 .suite = {
3008 -                       .hash = {
3009 -                               .vecs = hmac_sha3_512_tv_template,
3010 -                               .count = HMAC_SHA3_512_TEST_VECTORS
3011 -                       }
3012 +                       .hash = __VECS(hmac_sha3_512_tv_template)
3013                 }
3014         }, {
3015                 .alg = "hmac(sha384)",
3016                 .test = alg_test_hash,
3017                 .fips_allowed = 1,
3018                 .suite = {
3019 -                       .hash = {
3020 -                               .vecs = hmac_sha384_tv_template,
3021 -                               .count = HMAC_SHA384_TEST_VECTORS
3022 -                       }
3023 +                       .hash = __VECS(hmac_sha384_tv_template)
3024                 }
3025         }, {
3026                 .alg = "hmac(sha512)",
3027                 .test = alg_test_hash,
3028                 .fips_allowed = 1,
3029                 .suite = {
3030 -                       .hash = {
3031 -                               .vecs = hmac_sha512_tv_template,
3032 -                               .count = HMAC_SHA512_TEST_VECTORS
3033 -                       }
3034 +                       .hash = __VECS(hmac_sha512_tv_template)
3035                 }
3036         }, {
3037                 .alg = "jitterentropy_rng",
3038 @@ -3484,14 +3349,8 @@ static const struct alg_test_desc alg_te
3039                 .fips_allowed = 1,
3040                 .suite = {
3041                         .cipher = {
3042 -                               .enc = {
3043 -                                       .vecs = aes_kw_enc_tv_template,
3044 -                                       .count = ARRAY_SIZE(aes_kw_enc_tv_template)
3045 -                               },
3046 -                               .dec = {
3047 -                                       .vecs = aes_kw_dec_tv_template,
3048 -                                       .count = ARRAY_SIZE(aes_kw_dec_tv_template)
3049 -                               }
3050 +                               .enc = __VECS(aes_kw_enc_tv_template),
3051 +                               .dec = __VECS(aes_kw_dec_tv_template)
3052                         }
3053                 }
3054         }, {
3055 @@ -3499,14 +3358,8 @@ static const struct alg_test_desc alg_te
3056                 .test = alg_test_skcipher,
3057                 .suite = {
3058                         .cipher = {
3059 -                               .enc = {
3060 -                                       .vecs = aes_lrw_enc_tv_template,
3061 -                                       .count = AES_LRW_ENC_TEST_VECTORS
3062 -                               },
3063 -                               .dec = {
3064 -                                       .vecs = aes_lrw_dec_tv_template,
3065 -                                       .count = AES_LRW_DEC_TEST_VECTORS
3066 -                               }
3067 +                               .enc = __VECS(aes_lrw_enc_tv_template),
3068 +                               .dec = __VECS(aes_lrw_dec_tv_template)
3069                         }
3070                 }
3071         }, {
3072 @@ -3514,14 +3367,8 @@ static const struct alg_test_desc alg_te
3073                 .test = alg_test_skcipher,
3074                 .suite = {
3075                         .cipher = {
3076 -                               .enc = {
3077 -                                       .vecs = camellia_lrw_enc_tv_template,
3078 -                                       .count = CAMELLIA_LRW_ENC_TEST_VECTORS
3079 -                               },
3080 -                               .dec = {
3081 -                                       .vecs = camellia_lrw_dec_tv_template,
3082 -                                       .count = CAMELLIA_LRW_DEC_TEST_VECTORS
3083 -                               }
3084 +                               .enc = __VECS(camellia_lrw_enc_tv_template),
3085 +                               .dec = __VECS(camellia_lrw_dec_tv_template)
3086                         }
3087                 }
3088         }, {
3089 @@ -3529,14 +3376,8 @@ static const struct alg_test_desc alg_te
3090                 .test = alg_test_skcipher,
3091                 .suite = {
3092                         .cipher = {
3093 -                               .enc = {
3094 -                                       .vecs = cast6_lrw_enc_tv_template,
3095 -                                       .count = CAST6_LRW_ENC_TEST_VECTORS
3096 -                               },
3097 -                               .dec = {
3098 -                                       .vecs = cast6_lrw_dec_tv_template,
3099 -                                       .count = CAST6_LRW_DEC_TEST_VECTORS
3100 -                               }
3101 +                               .enc = __VECS(cast6_lrw_enc_tv_template),
3102 +                               .dec = __VECS(cast6_lrw_dec_tv_template)
3103                         }
3104                 }
3105         }, {
3106 @@ -3544,14 +3385,8 @@ static const struct alg_test_desc alg_te
3107                 .test = alg_test_skcipher,
3108                 .suite = {
3109                         .cipher = {
3110 -                               .enc = {
3111 -                                       .vecs = serpent_lrw_enc_tv_template,
3112 -                                       .count = SERPENT_LRW_ENC_TEST_VECTORS
3113 -                               },
3114 -                               .dec = {
3115 -                                       .vecs = serpent_lrw_dec_tv_template,
3116 -                                       .count = SERPENT_LRW_DEC_TEST_VECTORS
3117 -                               }
3118 +                               .enc = __VECS(serpent_lrw_enc_tv_template),
3119 +                               .dec = __VECS(serpent_lrw_dec_tv_template)
3120                         }
3121                 }
3122         }, {
3123 @@ -3559,14 +3394,8 @@ static const struct alg_test_desc alg_te
3124                 .test = alg_test_skcipher,
3125                 .suite = {
3126                         .cipher = {
3127 -                               .enc = {
3128 -                                       .vecs = tf_lrw_enc_tv_template,
3129 -                                       .count = TF_LRW_ENC_TEST_VECTORS
3130 -                               },
3131 -                               .dec = {
3132 -                                       .vecs = tf_lrw_dec_tv_template,
3133 -                                       .count = TF_LRW_DEC_TEST_VECTORS
3134 -                               }
3135 +                               .enc = __VECS(tf_lrw_enc_tv_template),
3136 +                               .dec = __VECS(tf_lrw_dec_tv_template)
3137                         }
3138                 }
3139         }, {
3140 @@ -3575,14 +3404,8 @@ static const struct alg_test_desc alg_te
3141                 .fips_allowed = 1,
3142                 .suite = {
3143                         .comp = {
3144 -                               .comp = {
3145 -                                       .vecs = lz4_comp_tv_template,
3146 -                                       .count = LZ4_COMP_TEST_VECTORS
3147 -                               },
3148 -                               .decomp = {
3149 -                                       .vecs = lz4_decomp_tv_template,
3150 -                                       .count = LZ4_DECOMP_TEST_VECTORS
3151 -                               }
3152 +                               .comp = __VECS(lz4_comp_tv_template),
3153 +                               .decomp = __VECS(lz4_decomp_tv_template)
3154                         }
3155                 }
3156         }, {
3157 @@ -3591,14 +3414,8 @@ static const struct alg_test_desc alg_te
3158                 .fips_allowed = 1,
3159                 .suite = {
3160                         .comp = {
3161 -                               .comp = {
3162 -                                       .vecs = lz4hc_comp_tv_template,
3163 -                                       .count = LZ4HC_COMP_TEST_VECTORS
3164 -                               },
3165 -                               .decomp = {
3166 -                                       .vecs = lz4hc_decomp_tv_template,
3167 -                                       .count = LZ4HC_DECOMP_TEST_VECTORS
3168 -                               }
3169 +                               .comp = __VECS(lz4hc_comp_tv_template),
3170 +                               .decomp = __VECS(lz4hc_decomp_tv_template)
3171                         }
3172                 }
3173         }, {
3174 @@ -3607,42 +3424,27 @@ static const struct alg_test_desc alg_te
3175                 .fips_allowed = 1,
3176                 .suite = {
3177                         .comp = {
3178 -                               .comp = {
3179 -                                       .vecs = lzo_comp_tv_template,
3180 -                                       .count = LZO_COMP_TEST_VECTORS
3181 -                               },
3182 -                               .decomp = {
3183 -                                       .vecs = lzo_decomp_tv_template,
3184 -                                       .count = LZO_DECOMP_TEST_VECTORS
3185 -                               }
3186 +                               .comp = __VECS(lzo_comp_tv_template),
3187 +                               .decomp = __VECS(lzo_decomp_tv_template)
3188                         }
3189                 }
3190         }, {
3191                 .alg = "md4",
3192                 .test = alg_test_hash,
3193                 .suite = {
3194 -                       .hash = {
3195 -                               .vecs = md4_tv_template,
3196 -                               .count = MD4_TEST_VECTORS
3197 -                       }
3198 +                       .hash = __VECS(md4_tv_template)
3199                 }
3200         }, {
3201                 .alg = "md5",
3202                 .test = alg_test_hash,
3203                 .suite = {
3204 -                       .hash = {
3205 -                               .vecs = md5_tv_template,
3206 -                               .count = MD5_TEST_VECTORS
3207 -                       }
3208 +                       .hash = __VECS(md5_tv_template)
3209                 }
3210         }, {
3211                 .alg = "michael_mic",
3212                 .test = alg_test_hash,
3213                 .suite = {
3214 -                       .hash = {
3215 -                               .vecs = michael_mic_tv_template,
3216 -                               .count = MICHAEL_MIC_TEST_VECTORS
3217 -                       }
3218 +                       .hash = __VECS(michael_mic_tv_template)
3219                 }
3220         }, {
3221                 .alg = "ofb(aes)",
3222 @@ -3650,14 +3452,8 @@ static const struct alg_test_desc alg_te
3223                 .fips_allowed = 1,
3224                 .suite = {
3225                         .cipher = {
3226 -                               .enc = {
3227 -                                       .vecs = aes_ofb_enc_tv_template,
3228 -                                       .count = AES_OFB_ENC_TEST_VECTORS
3229 -                               },
3230 -                               .dec = {
3231 -                                       .vecs = aes_ofb_dec_tv_template,
3232 -                                       .count = AES_OFB_DEC_TEST_VECTORS
3233 -                               }
3234 +                               .enc = __VECS(aes_ofb_enc_tv_template),
3235 +                               .dec = __VECS(aes_ofb_dec_tv_template)
3236                         }
3237                 }
3238         }, {
3239 @@ -3665,24 +3461,15 @@ static const struct alg_test_desc alg_te
3240                 .test = alg_test_skcipher,
3241                 .suite = {
3242                         .cipher = {
3243 -                               .enc = {
3244 -                                       .vecs = fcrypt_pcbc_enc_tv_template,
3245 -                                       .count = FCRYPT_ENC_TEST_VECTORS
3246 -                               },
3247 -                               .dec = {
3248 -                                       .vecs = fcrypt_pcbc_dec_tv_template,
3249 -                                       .count = FCRYPT_DEC_TEST_VECTORS
3250 -                               }
3251 +                               .enc = __VECS(fcrypt_pcbc_enc_tv_template),
3252 +                               .dec = __VECS(fcrypt_pcbc_dec_tv_template)
3253                         }
3254                 }
3255         }, {
3256                 .alg = "poly1305",
3257                 .test = alg_test_hash,
3258                 .suite = {
3259 -                       .hash = {
3260 -                               .vecs = poly1305_tv_template,
3261 -                               .count = POLY1305_TEST_VECTORS
3262 -                       }
3263 +                       .hash = __VECS(poly1305_tv_template)
3264                 }
3265         }, {
3266                 .alg = "rfc3686(ctr(aes))",
3267 @@ -3690,14 +3477,8 @@ static const struct alg_test_desc alg_te
3268                 .fips_allowed = 1,
3269                 .suite = {
3270                         .cipher = {
3271 -                               .enc = {
3272 -                                       .vecs = aes_ctr_rfc3686_enc_tv_template,
3273 -                                       .count = AES_CTR_3686_ENC_TEST_VECTORS
3274 -                               },
3275 -                               .dec = {
3276 -                                       .vecs = aes_ctr_rfc3686_dec_tv_template,
3277 -                                       .count = AES_CTR_3686_DEC_TEST_VECTORS
3278 -                               }
3279 +                               .enc = __VECS(aes_ctr_rfc3686_enc_tv_template),
3280 +                               .dec = __VECS(aes_ctr_rfc3686_dec_tv_template)
3281                         }
3282                 }
3283         }, {
3284 @@ -3706,14 +3487,8 @@ static const struct alg_test_desc alg_te
3285                 .fips_allowed = 1,
3286                 .suite = {
3287                         .aead = {
3288 -                               .enc = {
3289 -                                       .vecs = aes_gcm_rfc4106_enc_tv_template,
3290 -                                       .count = AES_GCM_4106_ENC_TEST_VECTORS
3291 -                               },
3292 -                               .dec = {
3293 -                                       .vecs = aes_gcm_rfc4106_dec_tv_template,
3294 -                                       .count = AES_GCM_4106_DEC_TEST_VECTORS
3295 -                               }
3296 +                               .enc = __VECS(aes_gcm_rfc4106_enc_tv_template),
3297 +                               .dec = __VECS(aes_gcm_rfc4106_dec_tv_template)
3298                         }
3299                 }
3300         }, {
3301 @@ -3722,14 +3497,8 @@ static const struct alg_test_desc alg_te
3302                 .fips_allowed = 1,
3303                 .suite = {
3304                         .aead = {
3305 -                               .enc = {
3306 -                                       .vecs = aes_ccm_rfc4309_enc_tv_template,
3307 -                                       .count = AES_CCM_4309_ENC_TEST_VECTORS
3308 -                               },
3309 -                               .dec = {
3310 -                                       .vecs = aes_ccm_rfc4309_dec_tv_template,
3311 -                                       .count = AES_CCM_4309_DEC_TEST_VECTORS
3312 -                               }
3313 +                               .enc = __VECS(aes_ccm_rfc4309_enc_tv_template),
3314 +                               .dec = __VECS(aes_ccm_rfc4309_dec_tv_template)
3315                         }
3316                 }
3317         }, {
3318 @@ -3737,14 +3506,8 @@ static const struct alg_test_desc alg_te
3319                 .test = alg_test_aead,
3320                 .suite = {
3321                         .aead = {
3322 -                               .enc = {
3323 -                                       .vecs = aes_gcm_rfc4543_enc_tv_template,
3324 -                                       .count = AES_GCM_4543_ENC_TEST_VECTORS
3325 -                               },
3326 -                               .dec = {
3327 -                                       .vecs = aes_gcm_rfc4543_dec_tv_template,
3328 -                                       .count = AES_GCM_4543_DEC_TEST_VECTORS
3329 -                               },
3330 +                               .enc = __VECS(aes_gcm_rfc4543_enc_tv_template),
3331 +                               .dec = __VECS(aes_gcm_rfc4543_dec_tv_template),
3332                         }
3333                 }
3334         }, {
3335 @@ -3752,14 +3515,8 @@ static const struct alg_test_desc alg_te
3336                 .test = alg_test_aead,
3337                 .suite = {
3338                         .aead = {
3339 -                               .enc = {
3340 -                                       .vecs = rfc7539_enc_tv_template,
3341 -                                       .count = RFC7539_ENC_TEST_VECTORS
3342 -                               },
3343 -                               .dec = {
3344 -                                       .vecs = rfc7539_dec_tv_template,
3345 -                                       .count = RFC7539_DEC_TEST_VECTORS
3346 -                               },
3347 +                               .enc = __VECS(rfc7539_enc_tv_template),
3348 +                               .dec = __VECS(rfc7539_dec_tv_template),
3349                         }
3350                 }
3351         }, {
3352 @@ -3767,71 +3524,47 @@ static const struct alg_test_desc alg_te
3353                 .test = alg_test_aead,
3354                 .suite = {
3355                         .aead = {
3356 -                               .enc = {
3357 -                                       .vecs = rfc7539esp_enc_tv_template,
3358 -                                       .count = RFC7539ESP_ENC_TEST_VECTORS
3359 -                               },
3360 -                               .dec = {
3361 -                                       .vecs = rfc7539esp_dec_tv_template,
3362 -                                       .count = RFC7539ESP_DEC_TEST_VECTORS
3363 -                               },
3364 +                               .enc = __VECS(rfc7539esp_enc_tv_template),
3365 +                               .dec = __VECS(rfc7539esp_dec_tv_template),
3366                         }
3367                 }
3368         }, {
3369                 .alg = "rmd128",
3370                 .test = alg_test_hash,
3371                 .suite = {
3372 -                       .hash = {
3373 -                               .vecs = rmd128_tv_template,
3374 -                               .count = RMD128_TEST_VECTORS
3375 -                       }
3376 +                       .hash = __VECS(rmd128_tv_template)
3377                 }
3378         }, {
3379                 .alg = "rmd160",
3380                 .test = alg_test_hash,
3381                 .suite = {
3382 -                       .hash = {
3383 -                               .vecs = rmd160_tv_template,
3384 -                               .count = RMD160_TEST_VECTORS
3385 -                       }
3386 +                       .hash = __VECS(rmd160_tv_template)
3387                 }
3388         }, {
3389                 .alg = "rmd256",
3390                 .test = alg_test_hash,
3391                 .suite = {
3392 -                       .hash = {
3393 -                               .vecs = rmd256_tv_template,
3394 -                               .count = RMD256_TEST_VECTORS
3395 -                       }
3396 +                       .hash = __VECS(rmd256_tv_template)
3397                 }
3398         }, {
3399                 .alg = "rmd320",
3400                 .test = alg_test_hash,
3401                 .suite = {
3402 -                       .hash = {
3403 -                               .vecs = rmd320_tv_template,
3404 -                               .count = RMD320_TEST_VECTORS
3405 -                       }
3406 +                       .hash = __VECS(rmd320_tv_template)
3407                 }
3408         }, {
3409                 .alg = "rsa",
3410                 .test = alg_test_akcipher,
3411                 .fips_allowed = 1,
3412                 .suite = {
3413 -                       .akcipher = {
3414 -                               .vecs = rsa_tv_template,
3415 -                               .count = RSA_TEST_VECTORS
3416 -                       }
3417 +                       .akcipher = __VECS(rsa_tv_template)
3418                 }
3419         }, {
3420                 .alg = "salsa20",
3421                 .test = alg_test_skcipher,
3422                 .suite = {
3423                         .cipher = {
3424 -                               .enc = {
3425 -                                       .vecs = salsa20_stream_enc_tv_template,
3426 -                                       .count = SALSA20_STREAM_ENC_TEST_VECTORS
3427 -                               }
3428 +                               .enc = __VECS(salsa20_stream_enc_tv_template)
3429                         }
3430                 }
3431         }, {
3432 @@ -3839,162 +3572,120 @@ static const struct alg_test_desc alg_te
3433                 .test = alg_test_hash,
3434                 .fips_allowed = 1,
3435                 .suite = {
3436 -                       .hash = {
3437 -                               .vecs = sha1_tv_template,
3438 -                               .count = SHA1_TEST_VECTORS
3439 -                       }
3440 +                       .hash = __VECS(sha1_tv_template)
3441                 }
3442         }, {
3443                 .alg = "sha224",
3444                 .test = alg_test_hash,
3445                 .fips_allowed = 1,
3446                 .suite = {
3447 -                       .hash = {
3448 -                               .vecs = sha224_tv_template,
3449 -                               .count = SHA224_TEST_VECTORS
3450 -                       }
3451 +                       .hash = __VECS(sha224_tv_template)
3452                 }
3453         }, {
3454                 .alg = "sha256",
3455                 .test = alg_test_hash,
3456                 .fips_allowed = 1,
3457                 .suite = {
3458 -                       .hash = {
3459 -                               .vecs = sha256_tv_template,
3460 -                               .count = SHA256_TEST_VECTORS
3461 -                       }
3462 +                       .hash = __VECS(sha256_tv_template)
3463                 }
3464         }, {
3465                 .alg = "sha3-224",
3466                 .test = alg_test_hash,
3467                 .fips_allowed = 1,
3468                 .suite = {
3469 -                       .hash = {
3470 -                               .vecs = sha3_224_tv_template,
3471 -                               .count = SHA3_224_TEST_VECTORS
3472 -                       }
3473 +                       .hash = __VECS(sha3_224_tv_template)
3474                 }
3475         }, {
3476                 .alg = "sha3-256",
3477                 .test = alg_test_hash,
3478                 .fips_allowed = 1,
3479                 .suite = {
3480 -                       .hash = {
3481 -                               .vecs = sha3_256_tv_template,
3482 -                               .count = SHA3_256_TEST_VECTORS
3483 -                       }
3484 +                       .hash = __VECS(sha3_256_tv_template)
3485                 }
3486         }, {
3487                 .alg = "sha3-384",
3488                 .test = alg_test_hash,
3489                 .fips_allowed = 1,
3490                 .suite = {
3491 -                       .hash = {
3492 -                               .vecs = sha3_384_tv_template,
3493 -                               .count = SHA3_384_TEST_VECTORS
3494 -                       }
3495 +                       .hash = __VECS(sha3_384_tv_template)
3496                 }
3497         }, {
3498                 .alg = "sha3-512",
3499                 .test = alg_test_hash,
3500                 .fips_allowed = 1,
3501                 .suite = {
3502 -                       .hash = {
3503 -                               .vecs = sha3_512_tv_template,
3504 -                               .count = SHA3_512_TEST_VECTORS
3505 -                       }
3506 +                       .hash = __VECS(sha3_512_tv_template)
3507                 }
3508         }, {
3509                 .alg = "sha384",
3510                 .test = alg_test_hash,
3511                 .fips_allowed = 1,
3512                 .suite = {
3513 -                       .hash = {
3514 -                               .vecs = sha384_tv_template,
3515 -                               .count = SHA384_TEST_VECTORS
3516 -                       }
3517 +                       .hash = __VECS(sha384_tv_template)
3518                 }
3519         }, {
3520                 .alg = "sha512",
3521                 .test = alg_test_hash,
3522                 .fips_allowed = 1,
3523                 .suite = {
3524 -                       .hash = {
3525 -                               .vecs = sha512_tv_template,
3526 -                               .count = SHA512_TEST_VECTORS
3527 -                       }
3528 +                       .hash = __VECS(sha512_tv_template)
3529                 }
3530         }, {
3531                 .alg = "tgr128",
3532                 .test = alg_test_hash,
3533                 .suite = {
3534 -                       .hash = {
3535 -                               .vecs = tgr128_tv_template,
3536 -                               .count = TGR128_TEST_VECTORS
3537 -                       }
3538 +                       .hash = __VECS(tgr128_tv_template)
3539                 }
3540         }, {
3541                 .alg = "tgr160",
3542                 .test = alg_test_hash,
3543                 .suite = {
3544 -                       .hash = {
3545 -                               .vecs = tgr160_tv_template,
3546 -                               .count = TGR160_TEST_VECTORS
3547 -                       }
3548 +                       .hash = __VECS(tgr160_tv_template)
3549                 }
3550         }, {
3551                 .alg = "tgr192",
3552                 .test = alg_test_hash,
3553                 .suite = {
3554 -                       .hash = {
3555 -                               .vecs = tgr192_tv_template,
3556 -                               .count = TGR192_TEST_VECTORS
3557 +                       .hash = __VECS(tgr192_tv_template)
3558 +               }
3559 +       }, {
3560 +               .alg = "tls10(hmac(sha1),cbc(aes))",
3561 +               .test = alg_test_tls,
3562 +               .suite = {
3563 +                       .tls = {
3564 +                               .enc = __VECS(tls_enc_tv_template),
3565 +                               .dec = __VECS(tls_dec_tv_template)
3566                         }
3567                 }
3568         }, {
3569                 .alg = "vmac(aes)",
3570                 .test = alg_test_hash,
3571                 .suite = {
3572 -                       .hash = {
3573 -                               .vecs = aes_vmac128_tv_template,
3574 -                               .count = VMAC_AES_TEST_VECTORS
3575 -                       }
3576 +                       .hash = __VECS(aes_vmac128_tv_template)
3577                 }
3578         }, {
3579                 .alg = "wp256",
3580                 .test = alg_test_hash,
3581                 .suite = {
3582 -                       .hash = {
3583 -                               .vecs = wp256_tv_template,
3584 -                               .count = WP256_TEST_VECTORS
3585 -                       }
3586 +                       .hash = __VECS(wp256_tv_template)
3587                 }
3588         }, {
3589                 .alg = "wp384",
3590                 .test = alg_test_hash,
3591                 .suite = {
3592 -                       .hash = {
3593 -                               .vecs = wp384_tv_template,
3594 -                               .count = WP384_TEST_VECTORS
3595 -                       }
3596 +                       .hash = __VECS(wp384_tv_template)
3597                 }
3598         }, {
3599                 .alg = "wp512",
3600                 .test = alg_test_hash,
3601                 .suite = {
3602 -                       .hash = {
3603 -                               .vecs = wp512_tv_template,
3604 -                               .count = WP512_TEST_VECTORS
3605 -                       }
3606 +                       .hash = __VECS(wp512_tv_template)
3607                 }
3608         }, {
3609                 .alg = "xcbc(aes)",
3610                 .test = alg_test_hash,
3611                 .suite = {
3612 -                       .hash = {
3613 -                               .vecs = aes_xcbc128_tv_template,
3614 -                               .count = XCBC_AES_TEST_VECTORS
3615 -                       }
3616 +                       .hash = __VECS(aes_xcbc128_tv_template)
3617                 }
3618         }, {
3619                 .alg = "xts(aes)",
3620 @@ -4002,14 +3693,8 @@ static const struct alg_test_desc alg_te
3621                 .fips_allowed = 1,
3622                 .suite = {
3623                         .cipher = {
3624 -                               .enc = {
3625 -                                       .vecs = aes_xts_enc_tv_template,
3626 -                                       .count = AES_XTS_ENC_TEST_VECTORS
3627 -                               },
3628 -                               .dec = {
3629 -                                       .vecs = aes_xts_dec_tv_template,
3630 -                                       .count = AES_XTS_DEC_TEST_VECTORS
3631 -                               }
3632 +                               .enc = __VECS(aes_xts_enc_tv_template),
3633 +                               .dec = __VECS(aes_xts_dec_tv_template)
3634                         }
3635                 }
3636         }, {
3637 @@ -4017,14 +3702,8 @@ static const struct alg_test_desc alg_te
3638                 .test = alg_test_skcipher,
3639                 .suite = {
3640                         .cipher = {
3641 -                               .enc = {
3642 -                                       .vecs = camellia_xts_enc_tv_template,
3643 -                                       .count = CAMELLIA_XTS_ENC_TEST_VECTORS
3644 -                               },
3645 -                               .dec = {
3646 -                                       .vecs = camellia_xts_dec_tv_template,
3647 -                                       .count = CAMELLIA_XTS_DEC_TEST_VECTORS
3648 -                               }
3649 +                               .enc = __VECS(camellia_xts_enc_tv_template),
3650 +                               .dec = __VECS(camellia_xts_dec_tv_template)
3651                         }
3652                 }
3653         }, {
3654 @@ -4032,14 +3711,8 @@ static const struct alg_test_desc alg_te
3655                 .test = alg_test_skcipher,
3656                 .suite = {
3657                         .cipher = {
3658 -                               .enc = {
3659 -                                       .vecs = cast6_xts_enc_tv_template,
3660 -                                       .count = CAST6_XTS_ENC_TEST_VECTORS
3661 -                               },
3662 -                               .dec = {
3663 -                                       .vecs = cast6_xts_dec_tv_template,
3664 -                                       .count = CAST6_XTS_DEC_TEST_VECTORS
3665 -                               }
3666 +                               .enc = __VECS(cast6_xts_enc_tv_template),
3667 +                               .dec = __VECS(cast6_xts_dec_tv_template)
3668                         }
3669                 }
3670         }, {
3671 @@ -4047,14 +3720,8 @@ static const struct alg_test_desc alg_te
3672                 .test = alg_test_skcipher,
3673                 .suite = {
3674                         .cipher = {
3675 -                               .enc = {
3676 -                                       .vecs = serpent_xts_enc_tv_template,
3677 -                                       .count = SERPENT_XTS_ENC_TEST_VECTORS
3678 -                               },
3679 -                               .dec = {
3680 -                                       .vecs = serpent_xts_dec_tv_template,
3681 -                                       .count = SERPENT_XTS_DEC_TEST_VECTORS
3682 -                               }
3683 +                               .enc = __VECS(serpent_xts_enc_tv_template),
3684 +                               .dec = __VECS(serpent_xts_dec_tv_template)
3685                         }
3686                 }
3687         }, {
3688 @@ -4062,14 +3729,8 @@ static const struct alg_test_desc alg_te
3689                 .test = alg_test_skcipher,
3690                 .suite = {
3691                         .cipher = {
3692 -                               .enc = {
3693 -                                       .vecs = tf_xts_enc_tv_template,
3694 -                                       .count = TF_XTS_ENC_TEST_VECTORS
3695 -                               },
3696 -                               .dec = {
3697 -                                       .vecs = tf_xts_dec_tv_template,
3698 -                                       .count = TF_XTS_DEC_TEST_VECTORS
3699 -                               }
3700 +                               .enc = __VECS(tf_xts_enc_tv_template),
3701 +                               .dec = __VECS(tf_xts_dec_tv_template)
3702                         }
3703                 }
3704         }
3705 --- a/crypto/testmgr.h
3706 +++ b/crypto/testmgr.h
3707 @@ -34,9 +34,9 @@
3708  
3709  struct hash_testvec {
3710         /* only used with keyed hash algorithms */
3711 -       char *key;
3712 -       char *plaintext;
3713 -       char *digest;
3714 +       const char *key;
3715 +       const char *plaintext;
3716 +       const char *digest;
3717         unsigned char tap[MAX_TAP];
3718         unsigned short psize;
3719         unsigned char np;
3720 @@ -63,11 +63,11 @@ struct hash_testvec {
3721   */
3722  
3723  struct cipher_testvec {
3724 -       char *key;
3725 -       char *iv;
3726 -       char *iv_out;
3727 -       char *input;
3728 -       char *result;
3729 +       const char *key;
3730 +       const char *iv;
3731 +       const char *iv_out;
3732 +       const char *input;
3733 +       const char *result;
3734         unsigned short tap[MAX_TAP];
3735         int np;
3736         unsigned char also_non_np;
3737 @@ -80,11 +80,11 @@ struct cipher_testvec {
3738  };
3739  
3740  struct aead_testvec {
3741 -       char *key;
3742 -       char *iv;
3743 -       char *input;
3744 -       char *assoc;
3745 -       char *result;
3746 +       const char *key;
3747 +       const char *iv;
3748 +       const char *input;
3749 +       const char *assoc;
3750 +       const char *result;
3751         unsigned char tap[MAX_TAP];
3752         unsigned char atap[MAX_TAP];
3753         int np;
3754 @@ -99,10 +99,10 @@ struct aead_testvec {
3755  };
3756  
3757  struct cprng_testvec {
3758 -       char *key;
3759 -       char *dt;
3760 -       char *v;
3761 -       char *result;
3762 +       const char *key;
3763 +       const char *dt;
3764 +       const char *v;
3765 +       const char *result;
3766         unsigned char klen;
3767         unsigned short dtlen;
3768         unsigned short vlen;
3769 @@ -111,24 +111,38 @@ struct cprng_testvec {
3770  };
3771  
3772  struct drbg_testvec {
3773 -       unsigned char *entropy;
3774 +       const unsigned char *entropy;
3775         size_t entropylen;
3776 -       unsigned char *entpra;
3777 -       unsigned char *entprb;
3778 +       const unsigned char *entpra;
3779 +       const unsigned char *entprb;
3780         size_t entprlen;
3781 -       unsigned char *addtla;
3782 -       unsigned char *addtlb;
3783 +       const unsigned char *addtla;
3784 +       const unsigned char *addtlb;
3785         size_t addtllen;
3786 -       unsigned char *pers;
3787 +       const unsigned char *pers;
3788         size_t perslen;
3789 -       unsigned char *expected;
3790 +       const unsigned char *expected;
3791         size_t expectedlen;
3792  };
3793  
3794 +struct tls_testvec {
3795 +       char *key;      /* wrapped keys for encryption and authentication */
3796 +       char *iv;       /* initialization vector */
3797 +       char *input;    /* input data */
3798 +       char *assoc;    /* associated data: seq num, type, version, input len */
3799 +       char *result;   /* result data */
3800 +       unsigned char fail;     /* the test failure is expected */
3801 +       unsigned char novrfy;   /* dec verification failure expected */
3802 +       unsigned char klen;     /* key length */
3803 +       unsigned short ilen;    /* input data length */
3804 +       unsigned short alen;    /* associated data length */
3805 +       unsigned short rlen;    /* result length */
3806 +};
3807 +
3808  struct akcipher_testvec {
3809 -       unsigned char *key;
3810 -       unsigned char *m;
3811 -       unsigned char *c;
3812 +       const unsigned char *key;
3813 +       const unsigned char *m;
3814 +       const unsigned char *c;
3815         unsigned int key_len;
3816         unsigned int m_size;
3817         unsigned int c_size;
3818 @@ -136,27 +150,227 @@ struct akcipher_testvec {
3819  };
3820  
3821  struct kpp_testvec {
3822 -       unsigned char *secret;
3823 -       unsigned char *b_public;
3824 -       unsigned char *expected_a_public;
3825 -       unsigned char *expected_ss;
3826 +       const unsigned char *secret;
3827 +       const unsigned char *b_public;
3828 +       const unsigned char *expected_a_public;
3829 +       const unsigned char *expected_ss;
3830         unsigned short secret_size;
3831         unsigned short b_public_size;
3832         unsigned short expected_a_public_size;
3833         unsigned short expected_ss_size;
3834  };
3835  
3836 -static char zeroed_string[48];
3837 +static const char zeroed_string[48];
3838  
3839  /*
3840 - * RSA test vectors. Borrowed from openSSL.
3841 + * TLS1.0 synthetic test vectors
3842   */
3843 -#ifdef CONFIG_CRYPTO_FIPS
3844 -#define RSA_TEST_VECTORS       2
3845 +static struct tls_testvec tls_enc_tv_template[] = {
3846 +       {
3847 +#ifdef __LITTLE_ENDIAN
3848 +               .key    = "\x08\x00"            /* rta length */
3849 +                       "\x01\x00"              /* rta type */
3850 +#else
3851 +               .key    = "\x00\x08"            /* rta length */
3852 +                       "\x00\x01"              /* rta type */
3853 +#endif
3854 +                       "\x00\x00\x00\x10"      /* enc key length */
3855 +                       "authenticationkey20benckeyis16_bytes",
3856 +               .klen   = 8 + 20 + 16,
3857 +               .iv     = "iv0123456789abcd",
3858 +               .input  = "Single block msg",
3859 +               .ilen   = 16,
3860 +               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
3861 +                       "\x00\x03\x01\x00\x10",
3862 +               .alen   = 13,
3863 +               .result = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
3864 +                       "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
3865 +                       "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
3866 +                       "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
3867 +                       "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
3868 +                       "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
3869 +               .rlen   = 16 + 20 + 12,
3870 +       }, {
3871 +#ifdef __LITTLE_ENDIAN
3872 +               .key    = "\x08\x00"            /* rta length */
3873 +                       "\x01\x00"              /* rta type */
3874 +#else
3875 +               .key    = "\x00\x08"            /* rta length */
3876 +                       "\x00\x01"              /* rta type */
3877 +#endif
3878 +                       "\x00\x00\x00\x10"      /* enc key length */
3879 +                       "authenticationkey20benckeyis16_bytes",
3880 +               .klen   = 8 + 20 + 16,
3881 +               .iv     = "iv0123456789abcd",
3882 +               .input  = "",
3883 +               .ilen   = 0,
3884 +               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
3885 +                       "\x00\x03\x01\x00\x00",
3886 +               .alen   = 13,
3887 +               .result = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
3888 +                       "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
3889 +                       "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
3890 +                       "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
3891 +               .rlen   = 20 + 12,
3892 +       }, {
3893 +#ifdef __LITTLE_ENDIAN
3894 +               .key    = "\x08\x00"            /* rta length */
3895 +                       "\x01\x00"              /* rta type */
3896 +#else
3897 +               .key    = "\x00\x08"            /* rta length */
3898 +                       "\x00\x01"              /* rta type */
3899 +#endif
3900 +                       "\x00\x00\x00\x10"      /* enc key length */
3901 +                       "authenticationkey20benckeyis16_bytes",
3902 +               .klen   = 8 + 20 + 16,
3903 +               .iv     = "iv0123456789abcd",
3904 +               .input  = "285 bytes plaintext285 bytes plaintext285 bytes"
3905 +                       " plaintext285 bytes plaintext285 bytes plaintext285"
3906 +                       " bytes plaintext285 bytes plaintext285 bytes"
3907 +                       " plaintext285 bytes plaintext285 bytes plaintext285"
3908 +                       " bytes plaintext285 bytes plaintext285 bytes"
3909 +                       " plaintext285 bytes plaintext285 bytes plaintext285"
3910 +                       " bytes plaintext285 bytes plaintext",
3911 +               .ilen   = 285,
3912 +               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
3913 +                       "\x00\x03\x01\x01\x1d",
3914 +               .alen   = 13,
3915 +               .result = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
3916 +                       "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
3917 +                       "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
3918 +                       "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
3919 +                       "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
3920 +                       "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
3921 +                       "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
3922 +                       "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
3923 +                       "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
3924 +                       "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
3925 +                       "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
3926 +                       "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
3927 +                       "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
3928 +                       "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
3929 +                       "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
3930 +                       "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
3931 +                       "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
3932 +                       "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
3933 +                       "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
3934 +                       "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
3935 +                       "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
3936 +                       "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
3937 +                       "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
3938 +                       "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
3939 +                       "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
3940 +                       "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
3941 +                       "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
3942 +               .rlen   = 285 + 20 + 15,
3943 +       }
3944 +};
3945 +
3946 +static struct tls_testvec tls_dec_tv_template[] = {
3947 +       {
3948 +#ifdef __LITTLE_ENDIAN
3949 +               .key    = "\x08\x00"            /* rta length */
3950 +                       "\x01\x00"              /* rta type */
3951 +#else
3952 +               .key    = "\x00\x08"            /* rta length */
3953 +                       "\x00\x01"              /* rta type */
3954 +#endif
3955 +                       "\x00\x00\x00\x10"      /* enc key length */
3956 +                       "authenticationkey20benckeyis16_bytes",
3957 +               .klen   = 8 + 20 + 16,
3958 +               .iv     = "iv0123456789abcd",
3959 +               .input  = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
3960 +                       "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
3961 +                       "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
3962 +                       "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
3963 +                       "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
3964 +                       "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
3965 +               .ilen   = 16 + 20 + 12,
3966 +               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
3967 +                       "\x00\x03\x01\x00\x30",
3968 +               .alen   = 13,
3969 +               .result = "Single block msg",
3970 +               .rlen   = 16,
3971 +       }, {
3972 +#ifdef __LITTLE_ENDIAN
3973 +               .key    = "\x08\x00"            /* rta length */
3974 +                       "\x01\x00"              /* rta type */
3975  #else
3976 -#define RSA_TEST_VECTORS       5
3977 +               .key    = "\x00\x08"            /* rta length */
3978 +                       "\x00\x01"              /* rta type */
3979  #endif
3980 -static struct akcipher_testvec rsa_tv_template[] = {
3981 +                       "\x00\x00\x00\x10"      /* enc key length */
3982 +                       "authenticationkey20benckeyis16_bytes",
3983 +               .klen   = 8 + 20 + 16,
3984 +               .iv     = "iv0123456789abcd",
3985 +               .input = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
3986 +                       "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
3987 +                       "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
3988 +                       "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
3989 +               .ilen   = 20 + 12,
3990 +               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
3991 +                       "\x00\x03\x01\x00\x20",
3992 +               .alen   = 13,
3993 +               .result = "",
3994 +               .rlen   = 0,
3995 +       }, {
3996 +#ifdef __LITTLE_ENDIAN
3997 +               .key    = "\x08\x00"            /* rta length */
3998 +                       "\x01\x00"              /* rta type */
3999 +#else
4000 +               .key    = "\x00\x08"            /* rta length */
4001 +                       "\x00\x01"              /* rta type */
4002 +#endif
4003 +                       "\x00\x00\x00\x10"      /* enc key length */
4004 +                       "authenticationkey20benckeyis16_bytes",
4005 +               .klen   = 8 + 20 + 16,
4006 +               .iv     = "iv0123456789abcd",
4007 +               .input = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
4008 +                       "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
4009 +                       "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
4010 +                       "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
4011 +                       "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
4012 +                       "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
4013 +                       "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
4014 +                       "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
4015 +                       "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
4016 +                       "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
4017 +                       "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
4018 +                       "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
4019 +                       "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
4020 +                       "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
4021 +                       "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
4022 +                       "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
4023 +                       "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
4024 +                       "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
4025 +                       "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
4026 +                       "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
4027 +                       "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
4028 +                       "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
4029 +                       "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
4030 +                       "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
4031 +                       "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
4032 +                       "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
4033 +                       "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
4034 +
4035 +               .ilen   = 285 + 20 + 15,
4036 +               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
4037 +                       "\x00\x03\x01\x01\x40",
4038 +               .alen   = 13,
4039 +               .result = "285 bytes plaintext285 bytes plaintext285 bytes"
4040 +                       " plaintext285 bytes plaintext285 bytes plaintext285"
4041 +                       " bytes plaintext285 bytes plaintext285 bytes"
4042 +                       " plaintext285 bytes plaintext285 bytes plaintext285"
4043 +                       " bytes plaintext285 bytes plaintext285 bytes"
4044 +                       " plaintext285 bytes plaintext285 bytes plaintext",
4045 +               .rlen   = 285,
4046 +       }
4047 +};
4048 +
4049 +/*
4050 + * RSA test vectors. Borrowed from openSSL.
4051 + */
4052 +static const struct akcipher_testvec rsa_tv_template[] = {
4053         {
4054  #ifndef CONFIG_CRYPTO_FIPS
4055         .key =
4056 @@ -340,6 +554,7 @@ static struct akcipher_testvec rsa_tv_te
4057         .m_size = 8,
4058         .c_size = 256,
4059         .public_key_vec = true,
4060 +#ifndef CONFIG_CRYPTO_FIPS
4061         }, {
4062         .key =
4063         "\x30\x82\x09\x29" /* sequence of 2345 bytes */
4064 @@ -538,12 +753,11 @@ static struct akcipher_testvec rsa_tv_te
4065         .key_len = 2349,
4066         .m_size = 8,
4067         .c_size = 512,
4068 +#endif
4069         }
4070  };
4071  
4072 -#define DH_TEST_VECTORS 2
4073 -
4074 -struct kpp_testvec dh_tv_template[] = {
4075 +static const struct kpp_testvec dh_tv_template[] = {
4076         {
4077         .secret =
4078  #ifdef __LITTLE_ENDIAN
4079 @@ -760,12 +974,7 @@ struct kpp_testvec dh_tv_template[] = {
4080         }
4081  };
4082  
4083 -#ifdef CONFIG_CRYPTO_FIPS
4084 -#define ECDH_TEST_VECTORS 1
4085 -#else
4086 -#define ECDH_TEST_VECTORS 2
4087 -#endif
4088 -struct kpp_testvec ecdh_tv_template[] = {
4089 +static const struct kpp_testvec ecdh_tv_template[] = {
4090         {
4091  #ifndef CONFIG_CRYPTO_FIPS
4092         .secret =
4093 @@ -856,9 +1065,7 @@ struct kpp_testvec ecdh_tv_template[] =
4094  /*
4095   * MD4 test vectors from RFC1320
4096   */
4097 -#define MD4_TEST_VECTORS       7
4098 -
4099 -static struct hash_testvec md4_tv_template [] = {
4100 +static const struct hash_testvec md4_tv_template[] = {
4101         {
4102                 .plaintext = "",
4103                 .digest = "\x31\xd6\xcf\xe0\xd1\x6a\xe9\x31"
4104 @@ -899,8 +1106,7 @@ static struct hash_testvec md4_tv_templa
4105         },
4106  };
4107  
4108 -#define SHA3_224_TEST_VECTORS  3
4109 -static struct hash_testvec sha3_224_tv_template[] = {
4110 +static const struct hash_testvec sha3_224_tv_template[] = {
4111         {
4112                 .plaintext = "",
4113                 .digest = "\x6b\x4e\x03\x42\x36\x67\xdb\xb7"
4114 @@ -925,8 +1131,7 @@ static struct hash_testvec sha3_224_tv_t
4115         },
4116  };
4117  
4118 -#define SHA3_256_TEST_VECTORS  3
4119 -static struct hash_testvec sha3_256_tv_template[] = {
4120 +static const struct hash_testvec sha3_256_tv_template[] = {
4121         {
4122                 .plaintext = "",
4123                 .digest = "\xa7\xff\xc6\xf8\xbf\x1e\xd7\x66"
4124 @@ -952,8 +1157,7 @@ static struct hash_testvec sha3_256_tv_t
4125  };
4126  
4127  
4128 -#define SHA3_384_TEST_VECTORS  3
4129 -static struct hash_testvec sha3_384_tv_template[] = {
4130 +static const struct hash_testvec sha3_384_tv_template[] = {
4131         {
4132                 .plaintext = "",
4133                 .digest = "\x0c\x63\xa7\x5b\x84\x5e\x4f\x7d"
4134 @@ -985,8 +1189,7 @@ static struct hash_testvec sha3_384_tv_t
4135  };
4136  
4137  
4138 -#define SHA3_512_TEST_VECTORS  3
4139 -static struct hash_testvec sha3_512_tv_template[] = {
4140 +static const struct hash_testvec sha3_512_tv_template[] = {
4141         {
4142                 .plaintext = "",
4143                 .digest = "\xa6\x9f\x73\xcc\xa2\x3a\x9a\xc5"
4144 @@ -1027,9 +1230,7 @@ static struct hash_testvec sha3_512_tv_t
4145  /*
4146   * MD5 test vectors from RFC1321
4147   */
4148 -#define MD5_TEST_VECTORS       7
4149 -
4150 -static struct hash_testvec md5_tv_template[] = {
4151 +static const struct hash_testvec md5_tv_template[] = {
4152         {
4153                 .digest = "\xd4\x1d\x8c\xd9\x8f\x00\xb2\x04"
4154                           "\xe9\x80\x09\x98\xec\xf8\x42\x7e",
4155 @@ -1073,9 +1274,7 @@ static struct hash_testvec md5_tv_templa
4156  /*
4157   * RIPEMD-128 test vectors from ISO/IEC 10118-3:2004(E)
4158   */
4159 -#define RMD128_TEST_VECTORS     10
4160 -
4161 -static struct hash_testvec rmd128_tv_template[] = {
4162 +static const struct hash_testvec rmd128_tv_template[] = {
4163         {
4164                 .digest = "\xcd\xf2\x62\x13\xa1\x50\xdc\x3e"
4165                           "\xcb\x61\x0f\x18\xf6\xb3\x8b\x46",
4166 @@ -1137,9 +1336,7 @@ static struct hash_testvec rmd128_tv_tem
4167  /*
4168   * RIPEMD-160 test vectors from ISO/IEC 10118-3:2004(E)
4169   */
4170 -#define RMD160_TEST_VECTORS     10
4171 -
4172 -static struct hash_testvec rmd160_tv_template[] = {
4173 +static const struct hash_testvec rmd160_tv_template[] = {
4174         {
4175                 .digest = "\x9c\x11\x85\xa5\xc5\xe9\xfc\x54\x61\x28"
4176                           "\x08\x97\x7e\xe8\xf5\x48\xb2\x25\x8d\x31",
4177 @@ -1201,9 +1398,7 @@ static struct hash_testvec rmd160_tv_tem
4178  /*
4179   * RIPEMD-256 test vectors
4180   */
4181 -#define RMD256_TEST_VECTORS     8
4182 -
4183 -static struct hash_testvec rmd256_tv_template[] = {
4184 +static const struct hash_testvec rmd256_tv_template[] = {
4185         {
4186                 .digest = "\x02\xba\x4c\x4e\x5f\x8e\xcd\x18"
4187                           "\x77\xfc\x52\xd6\x4d\x30\xe3\x7a"
4188 @@ -1269,9 +1464,7 @@ static struct hash_testvec rmd256_tv_tem
4189  /*
4190   * RIPEMD-320 test vectors
4191   */
4192 -#define RMD320_TEST_VECTORS     8
4193 -
4194 -static struct hash_testvec rmd320_tv_template[] = {
4195 +static const struct hash_testvec rmd320_tv_template[] = {
4196         {
4197                 .digest = "\x22\xd6\x5d\x56\x61\x53\x6c\xdc\x75\xc1"
4198                           "\xfd\xf5\xc6\xde\x7b\x41\xb9\xf2\x73\x25"
4199 @@ -1334,36 +1527,49 @@ static struct hash_testvec rmd320_tv_tem
4200         }
4201  };
4202  
4203 -#define CRCT10DIF_TEST_VECTORS 3
4204 -static struct hash_testvec crct10dif_tv_template[] = {
4205 +static const struct hash_testvec crct10dif_tv_template[] = {
4206         {
4207 -               .plaintext = "abc",
4208 -               .psize  = 3,
4209 -#ifdef __LITTLE_ENDIAN
4210 -               .digest = "\x3b\x44",
4211 -#else
4212 -               .digest = "\x44\x3b",
4213 -#endif
4214 -       }, {
4215 -               .plaintext = "1234567890123456789012345678901234567890"
4216 -                            "123456789012345678901234567890123456789",
4217 -               .psize  = 79,
4218 -#ifdef __LITTLE_ENDIAN
4219 -               .digest = "\x70\x4b",
4220 -#else
4221 -               .digest = "\x4b\x70",
4222 -#endif
4223 -       }, {
4224 -               .plaintext =
4225 -               "abcddddddddddddddddddddddddddddddddddddddddddddddddddddd",
4226 -               .psize  = 56,
4227 -#ifdef __LITTLE_ENDIAN
4228 -               .digest = "\xe3\x9c",
4229 -#else
4230 -               .digest = "\x9c\xe3",
4231 -#endif
4232 -               .np     = 2,
4233 -               .tap    = { 28, 28 }
4234 +               .plaintext      = "abc",
4235 +               .psize          = 3,
4236 +               .digest         = (u8 *)(u16 []){ 0x443b },
4237 +       }, {
4238 +               .plaintext      = "1234567890123456789012345678901234567890"
4239 +                                 "123456789012345678901234567890123456789",
4240 +               .psize          = 79,
4241 +               .digest         = (u8 *)(u16 []){ 0x4b70 },
4242 +               .np             = 2,
4243 +               .tap            = { 63, 16 },
4244 +       }, {
4245 +               .plaintext      = "abcdddddddddddddddddddddddddddddddddddddddd"
4246 +                                 "ddddddddddddd",
4247 +               .psize          = 56,
4248 +               .digest         = (u8 *)(u16 []){ 0x9ce3 },
4249 +               .np             = 8,
4250 +               .tap            = { 1, 2, 28, 7, 6, 5, 4, 3 },
4251 +       }, {
4252 +               .plaintext      = "1234567890123456789012345678901234567890"
4253 +                                 "1234567890123456789012345678901234567890"
4254 +                                 "1234567890123456789012345678901234567890"
4255 +                                 "1234567890123456789012345678901234567890"
4256 +                                 "1234567890123456789012345678901234567890"
4257 +                                 "1234567890123456789012345678901234567890"
4258 +                                 "1234567890123456789012345678901234567890"
4259 +                                 "123456789012345678901234567890123456789",
4260 +               .psize          = 319,
4261 +               .digest         = (u8 *)(u16 []){ 0x44c6 },
4262 +       }, {
4263 +               .plaintext      = "1234567890123456789012345678901234567890"
4264 +                                 "1234567890123456789012345678901234567890"
4265 +                                 "1234567890123456789012345678901234567890"
4266 +                                 "1234567890123456789012345678901234567890"
4267 +                                 "1234567890123456789012345678901234567890"
4268 +                                 "1234567890123456789012345678901234567890"
4269 +                                 "1234567890123456789012345678901234567890"
4270 +                                 "123456789012345678901234567890123456789",
4271 +               .psize          = 319,
4272 +               .digest         = (u8 *)(u16 []){ 0x44c6 },
4273 +               .np             = 4,
4274 +               .tap            = { 1, 255, 57, 6 },
4275         }
4276  };
4277  
4278 @@ -1371,9 +1577,7 @@ static struct hash_testvec crct10dif_tv_
4279   * SHA1 test vectors  from from FIPS PUB 180-1
4280   * Long vector from CAVS 5.0
4281   */
4282 -#define SHA1_TEST_VECTORS      6
4283 -
4284 -static struct hash_testvec sha1_tv_template[] = {
4285 +static const struct hash_testvec sha1_tv_template[] = {
4286         {
4287                 .plaintext = "",
4288                 .psize  = 0,
4289 @@ -1563,9 +1767,7 @@ static struct hash_testvec sha1_tv_templ
4290  /*
4291   * SHA224 test vectors from from FIPS PUB 180-2
4292   */
4293 -#define SHA224_TEST_VECTORS     5
4294 -
4295 -static struct hash_testvec sha224_tv_template[] = {
4296 +static const struct hash_testvec sha224_tv_template[] = {
4297         {
4298                 .plaintext = "",
4299                 .psize  = 0,
4300 @@ -1737,9 +1939,7 @@ static struct hash_testvec sha224_tv_tem
4301  /*
4302   * SHA256 test vectors from from NIST
4303   */
4304 -#define SHA256_TEST_VECTORS    5
4305 -
4306 -static struct hash_testvec sha256_tv_template[] = {
4307 +static const struct hash_testvec sha256_tv_template[] = {
4308         {
4309                 .plaintext = "",
4310                 .psize  = 0,
4311 @@ -1910,9 +2110,7 @@ static struct hash_testvec sha256_tv_tem
4312  /*
4313   * SHA384 test vectors from from NIST and kerneli
4314   */
4315 -#define SHA384_TEST_VECTORS    6
4316 -
4317 -static struct hash_testvec sha384_tv_template[] = {
4318 +static const struct hash_testvec sha384_tv_template[] = {
4319         {
4320                 .plaintext = "",
4321                 .psize  = 0,
4322 @@ -2104,9 +2302,7 @@ static struct hash_testvec sha384_tv_tem
4323  /*
4324   * SHA512 test vectors from from NIST and kerneli
4325   */
4326 -#define SHA512_TEST_VECTORS    6
4327 -
4328 -static struct hash_testvec sha512_tv_template[] = {
4329 +static const struct hash_testvec sha512_tv_template[] = {
4330         {
4331                 .plaintext = "",
4332                 .psize  = 0,
4333 @@ -2313,9 +2509,7 @@ static struct hash_testvec sha512_tv_tem
4334   * by Vincent Rijmen and Paulo S. L. M. Barreto as part of the NESSIE
4335   * submission
4336   */
4337 -#define WP512_TEST_VECTORS     8
4338 -
4339 -static struct hash_testvec wp512_tv_template[] = {
4340 +static const struct hash_testvec wp512_tv_template[] = {
4341         {
4342                 .plaintext = "",
4343                 .psize  = 0,
4344 @@ -2411,9 +2605,7 @@ static struct hash_testvec wp512_tv_temp
4345         },
4346  };
4347  
4348 -#define WP384_TEST_VECTORS     8
4349 -
4350 -static struct hash_testvec wp384_tv_template[] = {
4351 +static const struct hash_testvec wp384_tv_template[] = {
4352         {
4353                 .plaintext = "",
4354                 .psize  = 0,
4355 @@ -2493,9 +2685,7 @@ static struct hash_testvec wp384_tv_temp
4356         },
4357  };
4358  
4359 -#define WP256_TEST_VECTORS     8
4360 -
4361 -static struct hash_testvec wp256_tv_template[] = {
4362 +static const struct hash_testvec wp256_tv_template[] = {
4363         {
4364                 .plaintext = "",
4365                 .psize  = 0,
4366 @@ -2562,9 +2752,7 @@ static struct hash_testvec wp256_tv_temp
4367  /*
4368   * TIGER test vectors from Tiger website
4369   */
4370 -#define TGR192_TEST_VECTORS    6
4371 -
4372 -static struct hash_testvec tgr192_tv_template[] = {
4373 +static const struct hash_testvec tgr192_tv_template[] = {
4374         {
4375                 .plaintext = "",
4376                 .psize  = 0,
4377 @@ -2607,9 +2795,7 @@ static struct hash_testvec tgr192_tv_tem
4378         },
4379  };
4380  
4381 -#define TGR160_TEST_VECTORS    6
4382 -
4383 -static struct hash_testvec tgr160_tv_template[] = {
4384 +static const struct hash_testvec tgr160_tv_template[] = {
4385         {
4386                 .plaintext = "",
4387                 .psize  = 0,
4388 @@ -2652,9 +2838,7 @@ static struct hash_testvec tgr160_tv_tem
4389         },
4390  };
4391  
4392 -#define TGR128_TEST_VECTORS    6
4393 -
4394 -static struct hash_testvec tgr128_tv_template[] = {
4395 +static const struct hash_testvec tgr128_tv_template[] = {
4396         {
4397                 .plaintext = "",
4398                 .psize  = 0,
4399 @@ -2691,9 +2875,7 @@ static struct hash_testvec tgr128_tv_tem
4400         },
4401  };
4402  
4403 -#define GHASH_TEST_VECTORS 6
4404 -
4405 -static struct hash_testvec ghash_tv_template[] =
4406 +static const struct hash_testvec ghash_tv_template[] =
4407  {
4408         {
4409                 .key    = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03"
4410 @@ -2808,9 +2990,7 @@ static struct hash_testvec ghash_tv_temp
4411   * HMAC-MD5 test vectors from RFC2202
4412   * (These need to be fixed to not use strlen).
4413   */
4414 -#define HMAC_MD5_TEST_VECTORS  7
4415 -
4416 -static struct hash_testvec hmac_md5_tv_template[] =
4417 +static const struct hash_testvec hmac_md5_tv_template[] =
4418  {
4419         {
4420                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4421 @@ -2890,9 +3070,7 @@ static struct hash_testvec hmac_md5_tv_t
4422  /*
4423   * HMAC-RIPEMD128 test vectors from RFC2286
4424   */
4425 -#define HMAC_RMD128_TEST_VECTORS       7
4426 -
4427 -static struct hash_testvec hmac_rmd128_tv_template[] = {
4428 +static const struct hash_testvec hmac_rmd128_tv_template[] = {
4429         {
4430                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4431                 .ksize  = 16,
4432 @@ -2971,9 +3149,7 @@ static struct hash_testvec hmac_rmd128_t
4433  /*
4434   * HMAC-RIPEMD160 test vectors from RFC2286
4435   */
4436 -#define HMAC_RMD160_TEST_VECTORS       7
4437 -
4438 -static struct hash_testvec hmac_rmd160_tv_template[] = {
4439 +static const struct hash_testvec hmac_rmd160_tv_template[] = {
4440         {
4441                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4442                 .ksize  = 20,
4443 @@ -3052,9 +3228,7 @@ static struct hash_testvec hmac_rmd160_t
4444  /*
4445   * HMAC-SHA1 test vectors from RFC2202
4446   */
4447 -#define HMAC_SHA1_TEST_VECTORS 7
4448 -
4449 -static struct hash_testvec hmac_sha1_tv_template[] = {
4450 +static const struct hash_testvec hmac_sha1_tv_template[] = {
4451         {
4452                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4453                 .ksize  = 20,
4454 @@ -3135,9 +3309,7 @@ static struct hash_testvec hmac_sha1_tv_
4455  /*
4456   * SHA224 HMAC test vectors from RFC4231
4457   */
4458 -#define HMAC_SHA224_TEST_VECTORS    4
4459 -
4460 -static struct hash_testvec hmac_sha224_tv_template[] = {
4461 +static const struct hash_testvec hmac_sha224_tv_template[] = {
4462         {
4463                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4464                         "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4465 @@ -3250,9 +3422,7 @@ static struct hash_testvec hmac_sha224_t
4466   * HMAC-SHA256 test vectors from
4467   * draft-ietf-ipsec-ciph-sha-256-01.txt
4468   */
4469 -#define HMAC_SHA256_TEST_VECTORS       10
4470 -
4471 -static struct hash_testvec hmac_sha256_tv_template[] = {
4472 +static const struct hash_testvec hmac_sha256_tv_template[] = {
4473         {
4474                 .key    = "\x01\x02\x03\x04\x05\x06\x07\x08"
4475                           "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
4476 @@ -3387,9 +3557,7 @@ static struct hash_testvec hmac_sha256_t
4477         },
4478  };
4479  
4480 -#define CMAC_AES_TEST_VECTORS 6
4481 -
4482 -static struct hash_testvec aes_cmac128_tv_template[] = {
4483 +static const struct hash_testvec aes_cmac128_tv_template[] = {
4484         { /* From NIST Special Publication 800-38B, AES-128 */
4485                 .key            = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4486                                   "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4487 @@ -3464,9 +3632,67 @@ static struct hash_testvec aes_cmac128_t
4488         }
4489  };
4490  
4491 -#define CMAC_DES3_EDE_TEST_VECTORS 4
4492 +static const struct hash_testvec aes_cbcmac_tv_template[] = {
4493 +       {
4494 +               .key            = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4495 +                                 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4496 +               .plaintext      = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4497 +                                 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a",
4498 +               .digest         = "\x3a\xd7\x7b\xb4\x0d\x7a\x36\x60"
4499 +                                 "\xa8\x9e\xca\xf3\x24\x66\xef\x97",
4500 +               .psize          = 16,
4501 +               .ksize          = 16,
4502 +       }, {
4503 +               .key            = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4504 +                                 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4505 +               .plaintext      = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4506 +                                 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4507 +                                 "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4508 +                                 "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4509 +                                 "\x30",
4510 +               .digest         = "\x9d\x0d\xd0\x63\xfb\xcb\x24\x43"
4511 +                                 "\xf8\xf2\x76\x03\xac\x39\xb0\x9d",
4512 +               .psize          = 33,
4513 +               .ksize          = 16,
4514 +               .np             = 2,
4515 +               .tap            = { 7, 26 },
4516 +       }, {
4517 +               .key            = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4518 +                                 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4519 +               .plaintext      = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4520 +                                 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4521 +                                 "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4522 +                                 "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4523 +                                 "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
4524 +                                 "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
4525 +                                 "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
4526 +                                 "\xad\x2b\x41\x7b\xe6\x6c\x37",
4527 +               .digest         = "\xc0\x71\x73\xb8\xa0\x2c\x11\x7c"
4528 +                                 "\xaf\xdc\xb2\xf8\x89\x32\xa3\x3a",
4529 +               .psize          = 63,
4530 +               .ksize          = 16,
4531 +       }, {
4532 +               .key            = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
4533 +                                 "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
4534 +                                 "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
4535 +                                 "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
4536 +               .plaintext      = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4537 +                                 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4538 +                                 "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4539 +                                 "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4540 +                                 "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
4541 +                                 "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
4542 +                                 "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
4543 +                                 "\xad\x2b\x41\x7b\xe6\x6c\x37\x10"
4544 +                                 "\x1c",
4545 +               .digest         = "\x6a\x4e\xdb\x21\x47\x51\xdf\x4f"
4546 +                                 "\xa8\x4d\x4c\x10\x3b\x72\x7d\xd6",
4547 +               .psize          = 65,
4548 +               .ksize          = 32,
4549 +       }
4550 +};
4551  
4552 -static struct hash_testvec des3_ede_cmac64_tv_template[] = {
4553 +static const struct hash_testvec des3_ede_cmac64_tv_template[] = {
4554  /*
4555   * From NIST Special Publication 800-38B, Three Key TDEA
4556   * Corrected test vectors from:
4557 @@ -3512,9 +3738,7 @@ static struct hash_testvec des3_ede_cmac
4558         }
4559  };
4560  
4561 -#define XCBC_AES_TEST_VECTORS 6
4562 -
4563 -static struct hash_testvec aes_xcbc128_tv_template[] = {
4564 +static const struct hash_testvec aes_xcbc128_tv_template[] = {
4565         {
4566                 .key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
4567                           "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
4568 @@ -3580,36 +3804,35 @@ static struct hash_testvec aes_xcbc128_t
4569         }
4570  };
4571  
4572 -#define VMAC_AES_TEST_VECTORS  11
4573 -static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
4574 -                               '\x02', '\x03', '\x02', '\x02',
4575 -                               '\x02', '\x04', '\x01', '\x07',
4576 -                               '\x04', '\x01', '\x04', '\x03',};
4577 -static char vmac_string2[128] = {'a', 'b', 'c',};
4578 -static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
4579 -                               'a', 'b', 'c', 'a', 'b', 'c',
4580 -                               'a', 'b', 'c', 'a', 'b', 'c',
4581 -                               'a', 'b', 'c', 'a', 'b', 'c',
4582 -                               'a', 'b', 'c', 'a', 'b', 'c',
4583 -                               'a', 'b', 'c', 'a', 'b', 'c',
4584 -                               'a', 'b', 'c', 'a', 'b', 'c',
4585 -                               'a', 'b', 'c', 'a', 'b', 'c',
4586 -                               };
4587 -
4588 -static char vmac_string4[17] = {'b', 'c', 'e', 'f',
4589 -                               'i', 'j', 'l', 'm',
4590 -                               'o', 'p', 'r', 's',
4591 -                               't', 'u', 'w', 'x', 'z'};
4592 -
4593 -static char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
4594 -                                'o', 'l', 'k', ']', '%',
4595 -                                '9', '2', '7', '!', 'A'};
4596 -
4597 -static char vmac_string6[129] = {'p', 't', '*', '7', 'l',
4598 -                                'i', '!', '#', 'w', '0',
4599 -                                'z', '/', '4', 'A', 'n'};
4600 +static const char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
4601 +                                      '\x02', '\x03', '\x02', '\x02',
4602 +                                      '\x02', '\x04', '\x01', '\x07',
4603 +                                      '\x04', '\x01', '\x04', '\x03',};
4604 +static const char vmac_string2[128] = {'a', 'b', 'c',};
4605 +static const char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
4606 +                                      'a', 'b', 'c', 'a', 'b', 'c',
4607 +                                      'a', 'b', 'c', 'a', 'b', 'c',
4608 +                                      'a', 'b', 'c', 'a', 'b', 'c',
4609 +                                      'a', 'b', 'c', 'a', 'b', 'c',
4610 +                                      'a', 'b', 'c', 'a', 'b', 'c',
4611 +                                      'a', 'b', 'c', 'a', 'b', 'c',
4612 +                                      'a', 'b', 'c', 'a', 'b', 'c',
4613 +                                     };
4614 +
4615 +static const char vmac_string4[17] = {'b', 'c', 'e', 'f',
4616 +                                     'i', 'j', 'l', 'm',
4617 +                                     'o', 'p', 'r', 's',
4618 +                                     't', 'u', 'w', 'x', 'z'};
4619 +
4620 +static const char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
4621 +                                      'o', 'l', 'k', ']', '%',
4622 +                                      '9', '2', '7', '!', 'A'};
4623 +
4624 +static const char vmac_string6[129] = {'p', 't', '*', '7', 'l',
4625 +                                      'i', '!', '#', 'w', '0',
4626 +                                      'z', '/', '4', 'A', 'n'};
4627  
4628 -static struct hash_testvec aes_vmac128_tv_template[] = {
4629 +static const struct hash_testvec aes_vmac128_tv_template[] = {
4630         {
4631                 .key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
4632                           "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
4633 @@ -3687,9 +3910,7 @@ static struct hash_testvec aes_vmac128_t
4634   * SHA384 HMAC test vectors from RFC4231
4635   */
4636  
4637 -#define HMAC_SHA384_TEST_VECTORS       4
4638 -
4639 -static struct hash_testvec hmac_sha384_tv_template[] = {
4640 +static const struct hash_testvec hmac_sha384_tv_template[] = {
4641         {
4642                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4643                           "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4644 @@ -3787,9 +4008,7 @@ static struct hash_testvec hmac_sha384_t
4645   * SHA512 HMAC test vectors from RFC4231
4646   */
4647  
4648 -#define HMAC_SHA512_TEST_VECTORS       4
4649 -
4650 -static struct hash_testvec hmac_sha512_tv_template[] = {
4651 +static const struct hash_testvec hmac_sha512_tv_template[] = {
4652         {
4653                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4654                           "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4655 @@ -3894,9 +4113,7 @@ static struct hash_testvec hmac_sha512_t
4656         },
4657  };
4658  
4659 -#define HMAC_SHA3_224_TEST_VECTORS     4
4660 -
4661 -static struct hash_testvec hmac_sha3_224_tv_template[] = {
4662 +static const struct hash_testvec hmac_sha3_224_tv_template[] = {
4663         {
4664                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4665                           "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4666 @@ -3985,9 +4202,7 @@ static struct hash_testvec hmac_sha3_224
4667         },
4668  };
4669  
4670 -#define HMAC_SHA3_256_TEST_VECTORS     4
4671 -
4672 -static struct hash_testvec hmac_sha3_256_tv_template[] = {
4673 +static const struct hash_testvec hmac_sha3_256_tv_template[] = {
4674         {
4675                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4676                           "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4677 @@ -4076,9 +4291,7 @@ static struct hash_testvec hmac_sha3_256
4678         },
4679  };
4680  
4681 -#define HMAC_SHA3_384_TEST_VECTORS     4
4682 -
4683 -static struct hash_testvec hmac_sha3_384_tv_template[] = {
4684 +static const struct hash_testvec hmac_sha3_384_tv_template[] = {
4685         {
4686                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4687                           "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4688 @@ -4175,9 +4388,7 @@ static struct hash_testvec hmac_sha3_384
4689         },
4690  };
4691  
4692 -#define HMAC_SHA3_512_TEST_VECTORS     4
4693 -
4694 -static struct hash_testvec hmac_sha3_512_tv_template[] = {
4695 +static const struct hash_testvec hmac_sha3_512_tv_template[] = {
4696         {
4697                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4698                           "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4699 @@ -4286,9 +4497,7 @@ static struct hash_testvec hmac_sha3_512
4700   * Poly1305 test vectors from RFC7539 A.3.
4701   */
4702  
4703 -#define POLY1305_TEST_VECTORS  11
4704 -
4705 -static struct hash_testvec poly1305_tv_template[] = {
4706 +static const struct hash_testvec poly1305_tv_template[] = {
4707         { /* Test Vector #1 */
4708                 .plaintext      = "\x00\x00\x00\x00\x00\x00\x00\x00"
4709                                   "\x00\x00\x00\x00\x00\x00\x00\x00"
4710 @@ -4533,20 +4742,7 @@ static struct hash_testvec poly1305_tv_t
4711  /*
4712   * DES test vectors.
4713   */
4714 -#define DES_ENC_TEST_VECTORS           11
4715 -#define DES_DEC_TEST_VECTORS           5
4716 -#define DES_CBC_ENC_TEST_VECTORS       6
4717 -#define DES_CBC_DEC_TEST_VECTORS       5
4718 -#define DES_CTR_ENC_TEST_VECTORS       2
4719 -#define DES_CTR_DEC_TEST_VECTORS       2
4720 -#define DES3_EDE_ENC_TEST_VECTORS      4
4721 -#define DES3_EDE_DEC_TEST_VECTORS      4
4722 -#define DES3_EDE_CBC_ENC_TEST_VECTORS  2
4723 -#define DES3_EDE_CBC_DEC_TEST_VECTORS  2
4724 -#define DES3_EDE_CTR_ENC_TEST_VECTORS  2
4725 -#define DES3_EDE_CTR_DEC_TEST_VECTORS  2
4726 -
4727 -static struct cipher_testvec des_enc_tv_template[] = {
4728 +static const struct cipher_testvec des_enc_tv_template[] = {
4729         { /* From Applied Cryptography */
4730                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4731                 .klen   = 8,
4732 @@ -4720,7 +4916,7 @@ static struct cipher_testvec des_enc_tv_
4733         },
4734  };
4735  
4736 -static struct cipher_testvec des_dec_tv_template[] = {
4737 +static const struct cipher_testvec des_dec_tv_template[] = {
4738         { /* From Applied Cryptography */
4739                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4740                 .klen   = 8,
4741 @@ -4830,7 +5026,7 @@ static struct cipher_testvec des_dec_tv_
4742         },
4743  };
4744  
4745 -static struct cipher_testvec des_cbc_enc_tv_template[] = {
4746 +static const struct cipher_testvec des_cbc_enc_tv_template[] = {
4747         { /* From OpenSSL */
4748                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4749                 .klen   = 8,
4750 @@ -4956,7 +5152,7 @@ static struct cipher_testvec des_cbc_enc
4751         },
4752  };
4753  
4754 -static struct cipher_testvec des_cbc_dec_tv_template[] = {
4755 +static const struct cipher_testvec des_cbc_dec_tv_template[] = {
4756         { /* FIPS Pub 81 */
4757                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4758                 .klen   = 8,
4759 @@ -5065,7 +5261,7 @@ static struct cipher_testvec des_cbc_dec
4760         },
4761  };
4762  
4763 -static struct cipher_testvec des_ctr_enc_tv_template[] = {
4764 +static const struct cipher_testvec des_ctr_enc_tv_template[] = {
4765         { /* Generated with Crypto++ */
4766                 .key    = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
4767                 .klen   = 8,
4768 @@ -5211,7 +5407,7 @@ static struct cipher_testvec des_ctr_enc
4769         },
4770  };
4771  
4772 -static struct cipher_testvec des_ctr_dec_tv_template[] = {
4773 +static const struct cipher_testvec des_ctr_dec_tv_template[] = {
4774         { /* Generated with Crypto++ */
4775                 .key    = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
4776                 .klen   = 8,
4777 @@ -5357,7 +5553,7 @@ static struct cipher_testvec des_ctr_dec
4778         },
4779  };
4780  
4781 -static struct cipher_testvec des3_ede_enc_tv_template[] = {
4782 +static const struct cipher_testvec des3_ede_enc_tv_template[] = {
4783         { /* These are from openssl */
4784                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4785                           "\x55\x55\x55\x55\x55\x55\x55\x55"
4786 @@ -5522,7 +5718,7 @@ static struct cipher_testvec des3_ede_en
4787         },
4788  };
4789  
4790 -static struct cipher_testvec des3_ede_dec_tv_template[] = {
4791 +static const struct cipher_testvec des3_ede_dec_tv_template[] = {
4792         { /* These are from openssl */
4793                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4794                           "\x55\x55\x55\x55\x55\x55\x55\x55"
4795 @@ -5687,7 +5883,7 @@ static struct cipher_testvec des3_ede_de
4796         },
4797  };
4798  
4799 -static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
4800 +static const struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
4801         { /* Generated from openssl */
4802                 .key    = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
4803                           "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
4804 @@ -5867,7 +6063,7 @@ static struct cipher_testvec des3_ede_cb
4805         },
4806  };
4807  
4808 -static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
4809 +static const struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
4810         { /* Generated from openssl */
4811                 .key    = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
4812                           "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
4813 @@ -6047,7 +6243,7 @@ static struct cipher_testvec des3_ede_cb
4814         },
4815  };
4816  
4817 -static struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
4818 +static const struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
4819         { /* Generated with Crypto++ */
4820                 .key    = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
4821                           "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
4822 @@ -6325,7 +6521,7 @@ static struct cipher_testvec des3_ede_ct
4823         },
4824  };
4825  
4826 -static struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
4827 +static const struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
4828         { /* Generated with Crypto++ */
4829                 .key    = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
4830                           "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
4831 @@ -6606,14 +6802,7 @@ static struct cipher_testvec des3_ede_ct
4832  /*
4833   * Blowfish test vectors.
4834   */
4835 -#define BF_ENC_TEST_VECTORS    7
4836 -#define BF_DEC_TEST_VECTORS    7
4837 -#define BF_CBC_ENC_TEST_VECTORS        2
4838 -#define BF_CBC_DEC_TEST_VECTORS        2
4839 -#define BF_CTR_ENC_TEST_VECTORS        2
4840 -#define BF_CTR_DEC_TEST_VECTORS        2
4841 -
4842 -static struct cipher_testvec bf_enc_tv_template[] = {
4843 +static const struct cipher_testvec bf_enc_tv_template[] = {
4844         { /* DES test vectors from OpenSSL */
4845                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00",
4846                 .klen   = 8,
4847 @@ -6805,7 +6994,7 @@ static struct cipher_testvec bf_enc_tv_t
4848         },
4849  };
4850  
4851 -static struct cipher_testvec bf_dec_tv_template[] = {
4852 +static const struct cipher_testvec bf_dec_tv_template[] = {
4853         { /* DES test vectors from OpenSSL */
4854                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00",
4855                 .klen   = 8,
4856 @@ -6997,7 +7186,7 @@ static struct cipher_testvec bf_dec_tv_t
4857         },
4858  };
4859  
4860 -static struct cipher_testvec bf_cbc_enc_tv_template[] = {
4861 +static const struct cipher_testvec bf_cbc_enc_tv_template[] = {
4862         { /* From OpenSSL */
4863                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4864                           "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
4865 @@ -7154,7 +7343,7 @@ static struct cipher_testvec bf_cbc_enc_
4866         },
4867  };
4868  
4869 -static struct cipher_testvec bf_cbc_dec_tv_template[] = {
4870 +static const struct cipher_testvec bf_cbc_dec_tv_template[] = {
4871         { /* From OpenSSL */
4872                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4873                           "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
4874 @@ -7311,7 +7500,7 @@ static struct cipher_testvec bf_cbc_dec_
4875         },
4876  };
4877  
4878 -static struct cipher_testvec bf_ctr_enc_tv_template[] = {
4879 +static const struct cipher_testvec bf_ctr_enc_tv_template[] = {
4880         { /* Generated with Crypto++ */
4881                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4882                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4883 @@ -7723,7 +7912,7 @@ static struct cipher_testvec bf_ctr_enc_
4884         },
4885  };
4886  
4887 -static struct cipher_testvec bf_ctr_dec_tv_template[] = {
4888 +static const struct cipher_testvec bf_ctr_dec_tv_template[] = {
4889         { /* Generated with Crypto++ */
4890                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4891                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4892 @@ -8138,18 +8327,7 @@ static struct cipher_testvec bf_ctr_dec_
4893  /*
4894   * Twofish test vectors.
4895   */
4896 -#define TF_ENC_TEST_VECTORS            4
4897 -#define TF_DEC_TEST_VECTORS            4
4898 -#define TF_CBC_ENC_TEST_VECTORS                5
4899 -#define TF_CBC_DEC_TEST_VECTORS                5
4900 -#define TF_CTR_ENC_TEST_VECTORS                2
4901 -#define TF_CTR_DEC_TEST_VECTORS                2
4902 -#define TF_LRW_ENC_TEST_VECTORS                8
4903 -#define TF_LRW_DEC_TEST_VECTORS                8
4904 -#define TF_XTS_ENC_TEST_VECTORS                5
4905 -#define TF_XTS_DEC_TEST_VECTORS                5
4906 -
4907 -static struct cipher_testvec tf_enc_tv_template[] = {
4908 +static const struct cipher_testvec tf_enc_tv_template[] = {
4909         {
4910                 .key    = zeroed_string,
4911                 .klen   = 16,
4912 @@ -8317,7 +8495,7 @@ static struct cipher_testvec tf_enc_tv_t
4913         },
4914  };
4915  
4916 -static struct cipher_testvec tf_dec_tv_template[] = {
4917 +static const struct cipher_testvec tf_dec_tv_template[] = {
4918         {
4919                 .key    = zeroed_string,
4920                 .klen   = 16,
4921 @@ -8485,7 +8663,7 @@ static struct cipher_testvec tf_dec_tv_t
4922         },
4923  };
4924  
4925 -static struct cipher_testvec tf_cbc_enc_tv_template[] = {
4926 +static const struct cipher_testvec tf_cbc_enc_tv_template[] = {
4927         { /* Generated with Nettle */
4928                 .key    = zeroed_string,
4929                 .klen   = 16,
4930 @@ -8668,7 +8846,7 @@ static struct cipher_testvec tf_cbc_enc_
4931         },
4932  };
4933  
4934 -static struct cipher_testvec tf_cbc_dec_tv_template[] = {
4935 +static const struct cipher_testvec tf_cbc_dec_tv_template[] = {
4936         { /* Reverse of the first four above */
4937                 .key    = zeroed_string,
4938                 .klen   = 16,
4939 @@ -8851,7 +9029,7 @@ static struct cipher_testvec tf_cbc_dec_
4940         },
4941  };
4942  
4943 -static struct cipher_testvec tf_ctr_enc_tv_template[] = {
4944 +static const struct cipher_testvec tf_ctr_enc_tv_template[] = {
4945         { /* Generated with Crypto++ */
4946                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4947                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4948 @@ -9262,7 +9440,7 @@ static struct cipher_testvec tf_ctr_enc_
4949         },
4950  };
4951  
4952 -static struct cipher_testvec tf_ctr_dec_tv_template[] = {
4953 +static const struct cipher_testvec tf_ctr_dec_tv_template[] = {
4954         { /* Generated with Crypto++ */
4955                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4956                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4957 @@ -9673,7 +9851,7 @@ static struct cipher_testvec tf_ctr_dec_
4958         },
4959  };
4960  
4961 -static struct cipher_testvec tf_lrw_enc_tv_template[] = {
4962 +static const struct cipher_testvec tf_lrw_enc_tv_template[] = {
4963         /* Generated from AES-LRW test vectors */
4964         {
4965                 .key    = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
4966 @@ -9925,7 +10103,7 @@ static struct cipher_testvec tf_lrw_enc_
4967         },
4968  };
4969  
4970 -static struct cipher_testvec tf_lrw_dec_tv_template[] = {
4971 +static const struct cipher_testvec tf_lrw_dec_tv_template[] = {
4972         /* Generated from AES-LRW test vectors */
4973         /* same as enc vectors with input and result reversed */
4974         {
4975 @@ -10178,7 +10356,7 @@ static struct cipher_testvec tf_lrw_dec_
4976         },
4977  };
4978  
4979 -static struct cipher_testvec tf_xts_enc_tv_template[] = {
4980 +static const struct cipher_testvec tf_xts_enc_tv_template[] = {
4981         /* Generated from AES-XTS test vectors */
4982  {
4983                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00"
4984 @@ -10520,7 +10698,7 @@ static struct cipher_testvec tf_xts_enc_
4985         },
4986  };
4987  
4988 -static struct cipher_testvec tf_xts_dec_tv_template[] = {
4989 +static const struct cipher_testvec tf_xts_dec_tv_template[] = {
4990         /* Generated from AES-XTS test vectors */
4991         /* same as enc vectors with input and result reversed */
4992         {
4993 @@ -10867,25 +11045,7 @@ static struct cipher_testvec tf_xts_dec_
4994   * Serpent test vectors.  These are backwards because Serpent writes
4995   * octet sequences in right-to-left mode.
4996   */
4997 -#define SERPENT_ENC_TEST_VECTORS       5
4998 -#define SERPENT_DEC_TEST_VECTORS       5
4999 -
5000 -#define TNEPRES_ENC_TEST_VECTORS       4
5001 -#define TNEPRES_DEC_TEST_VECTORS       4
5002 -
5003 -#define SERPENT_CBC_ENC_TEST_VECTORS   1
5004 -#define SERPENT_CBC_DEC_TEST_VECTORS   1
5005 -
5006 -#define SERPENT_CTR_ENC_TEST_VECTORS   2
5007 -#define SERPENT_CTR_DEC_TEST_VECTORS   2
5008 -
5009 -#define SERPENT_LRW_ENC_TEST_VECTORS   8
5010 -#define SERPENT_LRW_DEC_TEST_VECTORS   8
5011 -
5012 -#define SERPENT_XTS_ENC_TEST_VECTORS   5
5013 -#define SERPENT_XTS_DEC_TEST_VECTORS   5
5014 -
5015 -static struct cipher_testvec serpent_enc_tv_template[] = {
5016 +static const struct cipher_testvec serpent_enc_tv_template[] = {
5017         {
5018                 .input  = "\x00\x01\x02\x03\x04\x05\x06\x07"
5019                           "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5020 @@ -11061,7 +11221,7 @@ static struct cipher_testvec serpent_enc
5021         },
5022  };
5023  
5024 -static struct cipher_testvec tnepres_enc_tv_template[] = {
5025 +static const struct cipher_testvec tnepres_enc_tv_template[] = {
5026         { /* KeySize=128, PT=0, I=1 */
5027                 .input  = "\x00\x00\x00\x00\x00\x00\x00\x00"
5028                           "\x00\x00\x00\x00\x00\x00\x00\x00",
5029 @@ -11111,7 +11271,7 @@ static struct cipher_testvec tnepres_enc
5030  };
5031  
5032  
5033 -static struct cipher_testvec serpent_dec_tv_template[] = {
5034 +static const struct cipher_testvec serpent_dec_tv_template[] = {
5035         {
5036                 .input  = "\x12\x07\xfc\xce\x9b\xd0\xd6\x47"
5037                           "\x6a\xe9\x8f\xbe\xd1\x43\xa0\xe2",
5038 @@ -11287,7 +11447,7 @@ static struct cipher_testvec serpent_dec
5039         },
5040  };
5041  
5042 -static struct cipher_testvec tnepres_dec_tv_template[] = {
5043 +static const struct cipher_testvec tnepres_dec_tv_template[] = {
5044         {
5045                 .input  = "\x41\xcc\x6b\x31\x59\x31\x45\x97"
5046                           "\x6d\x6f\xbb\x38\x4b\x37\x21\x28",
5047 @@ -11328,7 +11488,7 @@ static struct cipher_testvec tnepres_dec
5048         },
5049  };
5050  
5051 -static struct cipher_testvec serpent_cbc_enc_tv_template[] = {
5052 +static const struct cipher_testvec serpent_cbc_enc_tv_template[] = {
5053         { /* Generated with Crypto++ */
5054                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5055                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5056 @@ -11469,7 +11629,7 @@ static struct cipher_testvec serpent_cbc
5057         },
5058  };
5059  
5060 -static struct cipher_testvec serpent_cbc_dec_tv_template[] = {
5061 +static const struct cipher_testvec serpent_cbc_dec_tv_template[] = {
5062         { /* Generated with Crypto++ */
5063                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5064                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5065 @@ -11610,7 +11770,7 @@ static struct cipher_testvec serpent_cbc
5066         },
5067  };
5068  
5069 -static struct cipher_testvec serpent_ctr_enc_tv_template[] = {
5070 +static const struct cipher_testvec serpent_ctr_enc_tv_template[] = {
5071         { /* Generated with Crypto++ */
5072                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5073                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5074 @@ -12021,7 +12181,7 @@ static struct cipher_testvec serpent_ctr
5075         },
5076  };
5077  
5078 -static struct cipher_testvec serpent_ctr_dec_tv_template[] = {
5079 +static const struct cipher_testvec serpent_ctr_dec_tv_template[] = {
5080         { /* Generated with Crypto++ */
5081                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5082                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5083 @@ -12432,7 +12592,7 @@ static struct cipher_testvec serpent_ctr
5084         },
5085  };
5086  
5087 -static struct cipher_testvec serpent_lrw_enc_tv_template[] = {
5088 +static const struct cipher_testvec serpent_lrw_enc_tv_template[] = {
5089         /* Generated from AES-LRW test vectors */
5090         {
5091                 .key    = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
5092 @@ -12684,7 +12844,7 @@ static struct cipher_testvec serpent_lrw
5093         },
5094  };
5095  
5096 -static struct cipher_testvec serpent_lrw_dec_tv_template[] = {
5097 +static const struct cipher_testvec serpent_lrw_dec_tv_template[] = {
5098         /* Generated from AES-LRW test vectors */
5099         /* same as enc vectors with input and result reversed */
5100         {
5101 @@ -12937,7 +13097,7 @@ static struct cipher_testvec serpent_lrw
5102         },
5103  };
5104  
5105 -static struct cipher_testvec serpent_xts_enc_tv_template[] = {
5106 +static const struct cipher_testvec serpent_xts_enc_tv_template[] = {
5107         /* Generated from AES-XTS test vectors */
5108         {
5109                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00"
5110 @@ -13279,7 +13439,7 @@ static struct cipher_testvec serpent_xts
5111         },
5112  };
5113  
5114 -static struct cipher_testvec serpent_xts_dec_tv_template[] = {
5115 +static const struct cipher_testvec serpent_xts_dec_tv_template[] = {
5116         /* Generated from AES-XTS test vectors */
5117         /* same as enc vectors with input and result reversed */
5118         {
5119 @@ -13623,18 +13783,7 @@ static struct cipher_testvec serpent_xts
5120  };
5121  
5122  /* Cast6 test vectors from RFC 2612 */
5123 -#define CAST6_ENC_TEST_VECTORS         4
5124 -#define CAST6_DEC_TEST_VECTORS         4
5125 -#define CAST6_CBC_ENC_TEST_VECTORS     1
5126 -#define CAST6_CBC_DEC_TEST_VECTORS     1
5127 -#define CAST6_CTR_ENC_TEST_VECTORS     2
5128 -#define CAST6_CTR_DEC_TEST_VECTORS     2
5129 -#define CAST6_LRW_ENC_TEST_VECTORS     1
5130 -#define CAST6_LRW_DEC_TEST_VECTORS     1
5131 -#define CAST6_XTS_ENC_TEST_VECTORS     1
5132 -#define CAST6_XTS_DEC_TEST_VECTORS     1
5133 -
5134 -static struct cipher_testvec cast6_enc_tv_template[] = {
5135 +static const struct cipher_testvec cast6_enc_tv_template[] = {
5136         {
5137                 .key    = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
5138                           "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d",
5139 @@ -13805,7 +13954,7 @@ static struct cipher_testvec cast6_enc_t
5140         },
5141  };
5142  
5143 -static struct cipher_testvec cast6_dec_tv_template[] = {
5144 +static const struct cipher_testvec cast6_dec_tv_template[] = {
5145         {
5146                 .key    = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
5147                           "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d",
5148 @@ -13976,7 +14125,7 @@ static struct cipher_testvec cast6_dec_t
5149         },
5150  };
5151  
5152 -static struct cipher_testvec cast6_cbc_enc_tv_template[] = {
5153 +static const struct cipher_testvec cast6_cbc_enc_tv_template[] = {
5154         { /* Generated from TF test vectors */
5155                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5156                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5157 @@ -14117,7 +14266,7 @@ static struct cipher_testvec cast6_cbc_e
5158         },
5159  };
5160  
5161 -static struct cipher_testvec cast6_cbc_dec_tv_template[] = {
5162 +static const struct cipher_testvec cast6_cbc_dec_tv_template[] = {
5163         { /* Generated from TF test vectors */
5164                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5165                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5166 @@ -14258,7 +14407,7 @@ static struct cipher_testvec cast6_cbc_d
5167         },
5168  };
5169  
5170 -static struct cipher_testvec cast6_ctr_enc_tv_template[] = {
5171 +static const struct cipher_testvec cast6_ctr_enc_tv_template[] = {
5172         { /* Generated from TF test vectors */
5173                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5174                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5175 @@ -14415,7 +14564,7 @@ static struct cipher_testvec cast6_ctr_e
5176         },
5177  };
5178  
5179 -static struct cipher_testvec cast6_ctr_dec_tv_template[] = {
5180 +static const struct cipher_testvec cast6_ctr_dec_tv_template[] = {
5181         { /* Generated from TF test vectors */
5182                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5183                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5184 @@ -14572,7 +14721,7 @@ static struct cipher_testvec cast6_ctr_d
5185         },
5186  };
5187  
5188 -static struct cipher_testvec cast6_lrw_enc_tv_template[] = {
5189 +static const struct cipher_testvec cast6_lrw_enc_tv_template[] = {
5190         { /* Generated from TF test vectors */
5191                 .key    = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
5192                           "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
5193 @@ -14719,7 +14868,7 @@ static struct cipher_testvec cast6_lrw_e
5194         },
5195  };
5196  
5197 -static struct cipher_testvec cast6_lrw_dec_tv_template[] = {
5198 +static const struct cipher_testvec cast6_lrw_dec_tv_template[] = {
5199         { /* Generated from TF test vectors */
5200                 .key    = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
5201                           "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
5202 @@ -14866,7 +15015,7 @@ static struct cipher_testvec cast6_lrw_d
5203         },
5204  };
5205  
5206 -static struct cipher_testvec cast6_xts_enc_tv_template[] = {
5207 +static const struct cipher_testvec cast6_xts_enc_tv_template[] = {
5208         { /* Generated from TF test vectors */
5209                 .key    = "\x27\x18\x28\x18\x28\x45\x90\x45"
5210                           "\x23\x53\x60\x28\x74\x71\x35\x26"
5211 @@ -15015,7 +15164,7 @@ static struct cipher_testvec cast6_xts_e
5212         },
5213  };
5214  
5215 -static struct cipher_testvec cast6_xts_dec_tv_template[] = {
5216 +static const struct cipher_testvec cast6_xts_dec_tv_template[] = {
5217         { /* Generated from TF test vectors */
5218                 .key    = "\x27\x18\x28\x18\x28\x45\x90\x45"
5219                           "\x23\x53\x60\x28\x74\x71\x35\x26"
5220 @@ -15168,39 +15317,7 @@ static struct cipher_testvec cast6_xts_d
5221  /*
5222   * AES test vectors.
5223   */
5224 -#define AES_ENC_TEST_VECTORS 4
5225 -#define AES_DEC_TEST_VECTORS 4
5226 -#define AES_CBC_ENC_TEST_VECTORS 5
5227 -#define AES_CBC_DEC_TEST_VECTORS 5
5228 -#define HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS 2
5229 -#define HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS 2
5230 -#define HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC 2
5231 -#define HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC 2
5232 -#define HMAC_SHA1_AES_CBC_ENC_TEST_VEC 7
5233 -#define HMAC_SHA256_AES_CBC_ENC_TEST_VEC 7
5234 -#define HMAC_SHA512_AES_CBC_ENC_TEST_VEC 7
5235 -#define AES_LRW_ENC_TEST_VECTORS 8
5236 -#define AES_LRW_DEC_TEST_VECTORS 8
5237 -#define AES_XTS_ENC_TEST_VECTORS 5
5238 -#define AES_XTS_DEC_TEST_VECTORS 5
5239 -#define AES_CTR_ENC_TEST_VECTORS 5
5240 -#define AES_CTR_DEC_TEST_VECTORS 5
5241 -#define AES_OFB_ENC_TEST_VECTORS 1
5242 -#define AES_OFB_DEC_TEST_VECTORS 1
5243 -#define AES_CTR_3686_ENC_TEST_VECTORS 7
5244 -#define AES_CTR_3686_DEC_TEST_VECTORS 6
5245 -#define AES_GCM_ENC_TEST_VECTORS 9
5246 -#define AES_GCM_DEC_TEST_VECTORS 8
5247 -#define AES_GCM_4106_ENC_TEST_VECTORS 23
5248 -#define AES_GCM_4106_DEC_TEST_VECTORS 23
5249 -#define AES_GCM_4543_ENC_TEST_VECTORS 1
5250 -#define AES_GCM_4543_DEC_TEST_VECTORS 2
5251 -#define AES_CCM_ENC_TEST_VECTORS 8
5252 -#define AES_CCM_DEC_TEST_VECTORS 7
5253 -#define AES_CCM_4309_ENC_TEST_VECTORS 7
5254 -#define AES_CCM_4309_DEC_TEST_VECTORS 10
5255 -
5256 -static struct cipher_testvec aes_enc_tv_template[] = {
5257 +static const struct cipher_testvec aes_enc_tv_template[] = {
5258         { /* From FIPS-197 */
5259                 .key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
5260                           "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5261 @@ -15372,7 +15489,7 @@ static struct cipher_testvec aes_enc_tv_
5262         },
5263  };
5264  
5265 -static struct cipher_testvec aes_dec_tv_template[] = {
5266 +static const struct cipher_testvec aes_dec_tv_template[] = {
5267         { /* From FIPS-197 */
5268                 .key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
5269                           "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5270 @@ -15544,7 +15661,7 @@ static struct cipher_testvec aes_dec_tv_
5271         },
5272  };
5273  
5274 -static struct cipher_testvec aes_cbc_enc_tv_template[] = {
5275 +static const struct cipher_testvec aes_cbc_enc_tv_template[] = {
5276         { /* From RFC 3602 */
5277                 .key    = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
5278                           "\x51\x2e\x03\xd5\x34\x12\x00\x06",
5279 @@ -15766,7 +15883,7 @@ static struct cipher_testvec aes_cbc_enc
5280         },
5281  };
5282  
5283 -static struct cipher_testvec aes_cbc_dec_tv_template[] = {
5284 +static const struct cipher_testvec aes_cbc_dec_tv_template[] = {
5285         { /* From RFC 3602 */
5286                 .key    = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
5287                           "\x51\x2e\x03\xd5\x34\x12\x00\x06",
5288 @@ -15988,7 +16105,7 @@ static struct cipher_testvec aes_cbc_dec
5289         },
5290  };
5291  
5292 -static struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
5293 +static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
5294         { /* Input data from RFC 2410 Case 1 */
5295  #ifdef __LITTLE_ENDIAN
5296                 .key    = "\x08\x00"            /* rta length */
5297 @@ -16030,7 +16147,7 @@ static struct aead_testvec hmac_md5_ecb_
5298         },
5299  };
5300  
5301 -static struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
5302 +static const struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
5303         {
5304  #ifdef __LITTLE_ENDIAN
5305                 .key    = "\x08\x00"            /* rta length */
5306 @@ -16072,7 +16189,7 @@ static struct aead_testvec hmac_md5_ecb_
5307         },
5308  };
5309  
5310 -static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
5311 +static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
5312         { /* RFC 3602 Case 1 */
5313  #ifdef __LITTLE_ENDIAN
5314                 .key    = "\x08\x00"            /* rta length */
5315 @@ -16341,7 +16458,7 @@ static struct aead_testvec hmac_sha1_aes
5316         },
5317  };
5318  
5319 -static struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
5320 +static const struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
5321         { /* Input data from RFC 2410 Case 1 */
5322  #ifdef __LITTLE_ENDIAN
5323                 .key    = "\x08\x00"            /* rta length */
5324 @@ -16387,7 +16504,7 @@ static struct aead_testvec hmac_sha1_ecb
5325         },
5326  };
5327  
5328 -static struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = {
5329 +static const struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = {
5330         {
5331  #ifdef __LITTLE_ENDIAN
5332                 .key    = "\x08\x00"            /* rta length */
5333 @@ -16433,7 +16550,7 @@ static struct aead_testvec hmac_sha1_ecb
5334         },
5335  };
5336  
5337 -static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
5338 +static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
5339         { /* RFC 3602 Case 1 */
5340  #ifdef __LITTLE_ENDIAN
5341                 .key    = "\x08\x00"            /* rta length */
5342 @@ -16716,7 +16833,7 @@ static struct aead_testvec hmac_sha256_a
5343         },
5344  };
5345  
5346 -static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
5347 +static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
5348         { /* RFC 3602 Case 1 */
5349  #ifdef __LITTLE_ENDIAN
5350                 .key    = "\x08\x00"            /* rta length */
5351 @@ -17055,9 +17172,7 @@ static struct aead_testvec hmac_sha512_a
5352         },
5353  };
5354  
5355 -#define HMAC_SHA1_DES_CBC_ENC_TEST_VEC 1
5356 -
5357 -static struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
5358 +static const struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
5359         { /*Generated with cryptopp*/
5360  #ifdef __LITTLE_ENDIAN
5361                 .key    = "\x08\x00"            /* rta length */
5362 @@ -17116,9 +17231,7 @@ static struct aead_testvec hmac_sha1_des
5363         },
5364  };
5365  
5366 -#define HMAC_SHA224_DES_CBC_ENC_TEST_VEC       1
5367 -
5368 -static struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
5369 +static const struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
5370         { /*Generated with cryptopp*/
5371  #ifdef __LITTLE_ENDIAN
5372                 .key    = "\x08\x00"            /* rta length */
5373 @@ -17177,9 +17290,7 @@ static struct aead_testvec hmac_sha224_d
5374         },
5375  };
5376  
5377 -#define HMAC_SHA256_DES_CBC_ENC_TEST_VEC       1
5378 -
5379 -static struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
5380 +static const struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
5381         { /*Generated with cryptopp*/
5382  #ifdef __LITTLE_ENDIAN
5383                 .key    = "\x08\x00"            /* rta length */
5384 @@ -17240,9 +17351,7 @@ static struct aead_testvec hmac_sha256_d
5385         },
5386  };
5387  
5388 -#define HMAC_SHA384_DES_CBC_ENC_TEST_VEC       1
5389 -
5390 -static struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
5391 +static const struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
5392         { /*Generated with cryptopp*/
5393  #ifdef __LITTLE_ENDIAN
5394                 .key    = "\x08\x00"            /* rta length */
5395 @@ -17307,9 +17416,7 @@ static struct aead_testvec hmac_sha384_d
5396         },
5397  };
5398  
5399 -#define HMAC_SHA512_DES_CBC_ENC_TEST_VEC       1
5400 -
5401 -static struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
5402 +static const struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
5403         { /*Generated with cryptopp*/
5404  #ifdef __LITTLE_ENDIAN
5405                 .key    = "\x08\x00"            /* rta length */
5406 @@ -17378,9 +17485,7 @@ static struct aead_testvec hmac_sha512_d
5407         },
5408  };
5409  
5410 -#define HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC    1
5411 -
5412 -static struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
5413 +static const struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
5414         { /*Generated with cryptopp*/
5415  #ifdef __LITTLE_ENDIAN
5416                 .key    = "\x08\x00"            /* rta length */
5417 @@ -17441,9 +17546,7 @@ static struct aead_testvec hmac_sha1_des
5418         },
5419  };
5420  
5421 -#define HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC  1
5422 -
5423 -static struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
5424 +static const struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
5425         { /*Generated with cryptopp*/
5426  #ifdef __LITTLE_ENDIAN
5427                 .key    = "\x08\x00"            /* rta length */
5428 @@ -17504,9 +17607,7 @@ static struct aead_testvec hmac_sha224_d
5429         },
5430  };
5431  
5432 -#define HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC  1
5433 -
5434 -static struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
5435 +static const struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
5436         { /*Generated with cryptopp*/
5437  #ifdef __LITTLE_ENDIAN
5438                 .key    = "\x08\x00"            /* rta length */
5439 @@ -17569,9 +17670,7 @@ static struct aead_testvec hmac_sha256_d
5440         },
5441  };
5442  
5443 -#define HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC  1
5444 -
5445 -static struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
5446 +static const struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
5447         { /*Generated with cryptopp*/
5448  #ifdef __LITTLE_ENDIAN
5449                 .key    = "\x08\x00"            /* rta length */
5450 @@ -17638,9 +17737,7 @@ static struct aead_testvec hmac_sha384_d
5451         },
5452  };
5453  
5454 -#define HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC  1
5455 -
5456 -static struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
5457 +static const struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
5458         { /*Generated with cryptopp*/
5459  #ifdef __LITTLE_ENDIAN
5460                 .key    = "\x08\x00"            /* rta length */
5461 @@ -17711,7 +17808,7 @@ static struct aead_testvec hmac_sha512_d
5462         },
5463  };
5464  
5465 -static struct cipher_testvec aes_lrw_enc_tv_template[] = {
5466 +static const struct cipher_testvec aes_lrw_enc_tv_template[] = {
5467         /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
5468         { /* LRW-32-AES 1 */
5469                 .key    = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
5470 @@ -17964,7 +18061,7 @@ static struct cipher_testvec aes_lrw_enc
5471         }
5472  };
5473  
5474 -static struct cipher_testvec aes_lrw_dec_tv_template[] = {
5475 +static const struct cipher_testvec aes_lrw_dec_tv_template[] = {
5476         /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
5477         /* same as enc vectors with input and result reversed */
5478         { /* LRW-32-AES 1 */
5479 @@ -18218,7 +18315,7 @@ static struct cipher_testvec aes_lrw_dec
5480         }
5481  };
5482  
5483 -static struct cipher_testvec aes_xts_enc_tv_template[] = {
5484 +static const struct cipher_testvec aes_xts_enc_tv_template[] = {
5485         /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */
5486         { /* XTS-AES 1 */
5487                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00"
5488 @@ -18561,7 +18658,7 @@ static struct cipher_testvec aes_xts_enc
5489         }
5490  };
5491  
5492 -static struct cipher_testvec aes_xts_dec_tv_template[] = {
5493 +static const struct cipher_testvec aes_xts_dec_tv_template[] = {
5494         /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */
5495         { /* XTS-AES 1 */
5496                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00"
5497 @@ -18905,7 +19002,7 @@ static struct cipher_testvec aes_xts_dec
5498  };
5499  
5500  
5501 -static struct cipher_testvec aes_ctr_enc_tv_template[] = {
5502 +static const struct cipher_testvec aes_ctr_enc_tv_template[] = {
5503         { /* From NIST Special Publication 800-38A, Appendix F.5 */
5504                 .key    = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5505                           "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
5506 @@ -19260,7 +19357,7 @@ static struct cipher_testvec aes_ctr_enc
5507         },
5508  };
5509  
5510 -static struct cipher_testvec aes_ctr_dec_tv_template[] = {
5511 +static const struct cipher_testvec aes_ctr_dec_tv_template[] = {
5512         { /* From NIST Special Publication 800-38A, Appendix F.5 */
5513                 .key    = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5514                           "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
5515 @@ -19615,7 +19712,7 @@ static struct cipher_testvec aes_ctr_dec
5516         },
5517  };
5518  
5519 -static struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
5520 +static const struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
5521         { /* From RFC 3686 */
5522                 .key    = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
5523                           "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
5524 @@ -20747,7 +20844,7 @@ static struct cipher_testvec aes_ctr_rfc
5525         },
5526  };
5527  
5528 -static struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
5529 +static const struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
5530         { /* From RFC 3686 */
5531                 .key    = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
5532                           "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
5533 @@ -20838,7 +20935,7 @@ static struct cipher_testvec aes_ctr_rfc
5534         },
5535  };
5536  
5537 -static struct cipher_testvec aes_ofb_enc_tv_template[] = {
5538 +static const struct cipher_testvec aes_ofb_enc_tv_template[] = {
5539          /* From NIST Special Publication 800-38A, Appendix F.5 */
5540         {
5541                 .key    = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5542 @@ -20867,7 +20964,7 @@ static struct cipher_testvec aes_ofb_enc
5543         }
5544  };
5545  
5546 -static struct cipher_testvec aes_ofb_dec_tv_template[] = {
5547 +static const struct cipher_testvec aes_ofb_dec_tv_template[] = {
5548          /* From NIST Special Publication 800-38A, Appendix F.5 */
5549         {
5550                 .key    = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5551 @@ -20896,7 +20993,7 @@ static struct cipher_testvec aes_ofb_dec
5552         }
5553  };
5554  
5555 -static struct aead_testvec aes_gcm_enc_tv_template[] = {
5556 +static const struct aead_testvec aes_gcm_enc_tv_template[] = {
5557         { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
5558                 .key    = zeroed_string,
5559                 .klen   = 16,
5560 @@ -21056,7 +21153,7 @@ static struct aead_testvec aes_gcm_enc_t
5561         }
5562  };
5563  
5564 -static struct aead_testvec aes_gcm_dec_tv_template[] = {
5565 +static const struct aead_testvec aes_gcm_dec_tv_template[] = {
5566         { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
5567                 .key    = zeroed_string,
5568                 .klen   = 32,
5569 @@ -21258,7 +21355,7 @@ static struct aead_testvec aes_gcm_dec_t
5570         }
5571  };
5572  
5573 -static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
5574 +static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
5575         { /* Generated using Crypto++ */
5576                 .key    = zeroed_string,
5577                 .klen   = 20,
5578 @@ -21871,7 +21968,7 @@ static struct aead_testvec aes_gcm_rfc41
5579         }
5580  };
5581  
5582 -static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
5583 +static const struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
5584         { /* Generated using Crypto++ */
5585                 .key    = zeroed_string,
5586                 .klen   = 20,
5587 @@ -22485,7 +22582,7 @@ static struct aead_testvec aes_gcm_rfc41
5588         }
5589  };
5590  
5591 -static struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
5592 +static const struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
5593         { /* From draft-mcgrew-gcm-test-01 */
5594                 .key    = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda"
5595                           "\x90\x6a\xc7\x3c\x36\x13\xa6\x34"
5596 @@ -22516,7 +22613,7 @@ static struct aead_testvec aes_gcm_rfc45
5597         }
5598  };
5599  
5600 -static struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
5601 +static const struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
5602         { /* From draft-mcgrew-gcm-test-01 */
5603                 .key    = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda"
5604                           "\x90\x6a\xc7\x3c\x36\x13\xa6\x34"
5605 @@ -22575,7 +22672,7 @@ static struct aead_testvec aes_gcm_rfc45
5606         },
5607  };
5608  
5609 -static struct aead_testvec aes_ccm_enc_tv_template[] = {
5610 +static const struct aead_testvec aes_ccm_enc_tv_template[] = {
5611         { /* From RFC 3610 */
5612                 .key    = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5613                           "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5614 @@ -22859,7 +22956,7 @@ static struct aead_testvec aes_ccm_enc_t
5615         }
5616  };
5617  
5618 -static struct aead_testvec aes_ccm_dec_tv_template[] = {
5619 +static const struct aead_testvec aes_ccm_dec_tv_template[] = {
5620         { /* From RFC 3610 */
5621                 .key    = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5622                           "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5623 @@ -23191,7 +23288,7 @@ static struct aead_testvec aes_ccm_dec_t
5624   * These vectors are copied/generated from the ones for rfc4106 with
5625   * the key truncated by one byte..
5626   */
5627 -static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
5628 +static const struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
5629         { /* Generated using Crypto++ */
5630                 .key    = zeroed_string,
5631                 .klen   = 19,
5632 @@ -23804,7 +23901,7 @@ static struct aead_testvec aes_ccm_rfc43
5633         }
5634  };
5635  
5636 -static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[]   = {
5637 +static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[]     = {
5638         { /* Generated using Crypto++ */
5639                 .key    = zeroed_string,
5640                 .klen   = 19,
5641 @@ -24420,9 +24517,7 @@ static struct aead_testvec aes_ccm_rfc43
5642  /*
5643   * ChaCha20-Poly1305 AEAD test vectors from RFC7539 2.8.2./A.5.
5644   */
5645 -#define RFC7539_ENC_TEST_VECTORS 2
5646 -#define RFC7539_DEC_TEST_VECTORS 2
5647 -static struct aead_testvec rfc7539_enc_tv_template[] = {
5648 +static const struct aead_testvec rfc7539_enc_tv_template[] = {
5649         {
5650                 .key    = "\x80\x81\x82\x83\x84\x85\x86\x87"
5651                           "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
5652 @@ -24554,7 +24649,7 @@ static struct aead_testvec rfc7539_enc_t
5653         },
5654  };
5655  
5656 -static struct aead_testvec rfc7539_dec_tv_template[] = {
5657 +static const struct aead_testvec rfc7539_dec_tv_template[] = {
5658         {
5659                 .key    = "\x80\x81\x82\x83\x84\x85\x86\x87"
5660                           "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
5661 @@ -24689,9 +24784,7 @@ static struct aead_testvec rfc7539_dec_t
5662  /*
5663   * draft-irtf-cfrg-chacha20-poly1305
5664   */
5665 -#define RFC7539ESP_DEC_TEST_VECTORS 1
5666 -#define RFC7539ESP_ENC_TEST_VECTORS 1
5667 -static struct aead_testvec rfc7539esp_enc_tv_template[] = {
5668 +static const struct aead_testvec rfc7539esp_enc_tv_template[] = {
5669         {
5670                 .key    = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
5671                           "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
5672 @@ -24779,7 +24872,7 @@ static struct aead_testvec rfc7539esp_en
5673         },
5674  };
5675  
5676 -static struct aead_testvec rfc7539esp_dec_tv_template[] = {
5677 +static const struct aead_testvec rfc7539esp_dec_tv_template[] = {
5678         {
5679                 .key    = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
5680                           "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
5681 @@ -24875,7 +24968,7 @@ static struct aead_testvec rfc7539esp_de
5682   * semiblock of the ciphertext from the test vector. For decryption, iv is
5683   * the first semiblock of the ciphertext.
5684   */
5685 -static struct cipher_testvec aes_kw_enc_tv_template[] = {
5686 +static const struct cipher_testvec aes_kw_enc_tv_template[] = {
5687         {
5688                 .key    = "\x75\x75\xda\x3a\x93\x60\x7c\xc2"
5689                           "\xbf\xd8\xce\xc7\xaa\xdf\xd9\xa6",
5690 @@ -24890,7 +24983,7 @@ static struct cipher_testvec aes_kw_enc_
5691         },
5692  };
5693  
5694 -static struct cipher_testvec aes_kw_dec_tv_template[] = {
5695 +static const struct cipher_testvec aes_kw_dec_tv_template[] = {
5696         {
5697                 .key    = "\x80\xaa\x99\x73\x27\xa4\x80\x6b"
5698                           "\x6a\x7a\x41\xa5\x2b\x86\xc3\x71"
5699 @@ -24913,9 +25006,7 @@ static struct cipher_testvec aes_kw_dec_
5700   *     http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf
5701   * Only AES-128 is supported at this time.
5702   */
5703 -#define ANSI_CPRNG_AES_TEST_VECTORS    6
5704 -
5705 -static struct cprng_testvec ansi_cprng_aes_tv_template[] = {
5706 +static const struct cprng_testvec ansi_cprng_aes_tv_template[] = {
5707         {
5708                 .key    = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
5709                           "\xed\x06\x1c\xab\xb8\xd4\x62\x02",
5710 @@ -25011,7 +25102,7 @@ static struct cprng_testvec ansi_cprng_a
5711   * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
5712   * w/o personalization string, w/ and w/o additional input string).
5713   */
5714 -static struct drbg_testvec drbg_pr_sha256_tv_template[] = {
5715 +static const struct drbg_testvec drbg_pr_sha256_tv_template[] = {
5716         {
5717                 .entropy = (unsigned char *)
5718                         "\x72\x88\x4c\xcd\x6c\x85\x57\x70\xf7\x0b\x8b\x86"
5719 @@ -25169,7 +25260,7 @@ static struct drbg_testvec drbg_pr_sha25
5720         },
5721  };
5722  
5723 -static struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
5724 +static const struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
5725         {
5726                 .entropy = (unsigned char *)
5727                         "\x99\x69\xe5\x4b\x47\x03\xff\x31\x78\x5b\x87\x9a"
5728 @@ -25327,7 +25418,7 @@ static struct drbg_testvec drbg_pr_hmac_
5729         },
5730  };
5731  
5732 -static struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
5733 +static const struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
5734         {
5735                 .entropy = (unsigned char *)
5736                         "\xd1\x44\xc6\x61\x81\x6d\xca\x9d\x15\x28\x8a\x42"
5737 @@ -25451,7 +25542,7 @@ static struct drbg_testvec drbg_pr_ctr_a
5738   * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
5739   * w/o personalization string, w/ and w/o additional input string).
5740   */
5741 -static struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
5742 +static const struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
5743         {
5744                 .entropy = (unsigned char *)
5745                         "\xa6\x5a\xd0\xf3\x45\xdb\x4e\x0e\xff\xe8\x75\xc3"
5746 @@ -25573,7 +25664,7 @@ static struct drbg_testvec drbg_nopr_sha
5747         },
5748  };
5749  
5750 -static struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
5751 +static const struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
5752         {
5753                 .entropy = (unsigned char *)
5754                         "\xca\x85\x19\x11\x34\x93\x84\xbf\xfe\x89\xde\x1c"
5755 @@ -25695,7 +25786,7 @@ static struct drbg_testvec drbg_nopr_hma
5756         },
5757  };
5758  
5759 -static struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
5760 +static const struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
5761         {
5762                 .entropy = (unsigned char *)
5763                         "\xc3\x5c\x2f\xa2\xa8\x9d\x52\xa1\x1f\xa3\x2a\xa9"
5764 @@ -25719,7 +25810,7 @@ static struct drbg_testvec drbg_nopr_ctr
5765         },
5766  };
5767  
5768 -static struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
5769 +static const struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
5770         {
5771                 .entropy = (unsigned char *)
5772                         "\x36\x40\x19\x40\xfa\x8b\x1f\xba\x91\xa1\x66\x1f"
5773 @@ -25743,7 +25834,7 @@ static struct drbg_testvec drbg_nopr_ctr
5774         },
5775  };
5776  
5777 -static struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
5778 +static const struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
5779         {
5780                 .entropy = (unsigned char *)
5781                         "\x87\xe1\xc5\x32\x99\x7f\x57\xa3\x5c\x28\x6d\xe8"
5782 @@ -25832,14 +25923,7 @@ static struct drbg_testvec drbg_nopr_ctr
5783  };
5784  
5785  /* Cast5 test vectors from RFC 2144 */
5786 -#define CAST5_ENC_TEST_VECTORS         4
5787 -#define CAST5_DEC_TEST_VECTORS         4
5788 -#define CAST5_CBC_ENC_TEST_VECTORS     1
5789 -#define CAST5_CBC_DEC_TEST_VECTORS     1
5790 -#define CAST5_CTR_ENC_TEST_VECTORS     2
5791 -#define CAST5_CTR_DEC_TEST_VECTORS     2
5792 -
5793 -static struct cipher_testvec cast5_enc_tv_template[] = {
5794 +static const struct cipher_testvec cast5_enc_tv_template[] = {
5795         {
5796                 .key    = "\x01\x23\x45\x67\x12\x34\x56\x78"
5797                           "\x23\x45\x67\x89\x34\x56\x78\x9a",
5798 @@ -26000,7 +26084,7 @@ static struct cipher_testvec cast5_enc_t
5799         },
5800  };
5801  
5802 -static struct cipher_testvec cast5_dec_tv_template[] = {
5803 +static const struct cipher_testvec cast5_dec_tv_template[] = {
5804         {
5805                 .key    = "\x01\x23\x45\x67\x12\x34\x56\x78"
5806                           "\x23\x45\x67\x89\x34\x56\x78\x9a",
5807 @@ -26161,7 +26245,7 @@ static struct cipher_testvec cast5_dec_t
5808         },
5809  };
5810  
5811 -static struct cipher_testvec cast5_cbc_enc_tv_template[] = {
5812 +static const struct cipher_testvec cast5_cbc_enc_tv_template[] = {
5813         { /* Generated from TF test vectors */
5814                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5815                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5816 @@ -26299,7 +26383,7 @@ static struct cipher_testvec cast5_cbc_e
5817         },
5818  };
5819  
5820 -static struct cipher_testvec cast5_cbc_dec_tv_template[] = {
5821 +static const struct cipher_testvec cast5_cbc_dec_tv_template[] = {
5822         { /* Generated from TF test vectors */
5823                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5824                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5825 @@ -26437,7 +26521,7 @@ static struct cipher_testvec cast5_cbc_d
5826         },
5827  };
5828  
5829 -static struct cipher_testvec cast5_ctr_enc_tv_template[] = {
5830 +static const struct cipher_testvec cast5_ctr_enc_tv_template[] = {
5831         { /* Generated from TF test vectors */
5832                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5833                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5834 @@ -26588,7 +26672,7 @@ static struct cipher_testvec cast5_ctr_e
5835         },
5836  };
5837  
5838 -static struct cipher_testvec cast5_ctr_dec_tv_template[] = {
5839 +static const struct cipher_testvec cast5_ctr_dec_tv_template[] = {
5840         { /* Generated from TF test vectors */
5841                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5842                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5843 @@ -26742,10 +26826,7 @@ static struct cipher_testvec cast5_ctr_d
5844  /*
5845   * ARC4 test vectors from OpenSSL
5846   */
5847 -#define ARC4_ENC_TEST_VECTORS  7
5848 -#define ARC4_DEC_TEST_VECTORS  7
5849 -
5850 -static struct cipher_testvec arc4_enc_tv_template[] = {
5851 +static const struct cipher_testvec arc4_enc_tv_template[] = {
5852         {
5853                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5854                 .klen   = 8,
5855 @@ -26811,7 +26892,7 @@ static struct cipher_testvec arc4_enc_tv
5856         },
5857  };
5858  
5859 -static struct cipher_testvec arc4_dec_tv_template[] = {
5860 +static const struct cipher_testvec arc4_dec_tv_template[] = {
5861         {
5862                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5863                 .klen   = 8,
5864 @@ -26880,10 +26961,7 @@ static struct cipher_testvec arc4_dec_tv
5865  /*
5866   * TEA test vectors
5867   */
5868 -#define TEA_ENC_TEST_VECTORS   4
5869 -#define TEA_DEC_TEST_VECTORS   4
5870 -
5871 -static struct cipher_testvec tea_enc_tv_template[] = {
5872 +static const struct cipher_testvec tea_enc_tv_template[] = {
5873         {
5874                 .key    = zeroed_string,
5875                 .klen   = 16,
5876 @@ -26926,7 +27004,7 @@ static struct cipher_testvec tea_enc_tv_
5877         }
5878  };
5879  
5880 -static struct cipher_testvec tea_dec_tv_template[] = {
5881 +static const struct cipher_testvec tea_dec_tv_template[] = {
5882         {
5883                 .key    = zeroed_string,
5884                 .klen   = 16,
5885 @@ -26972,10 +27050,7 @@ static struct cipher_testvec tea_dec_tv_
5886  /*
5887   * XTEA test vectors
5888   */
5889 -#define XTEA_ENC_TEST_VECTORS  4
5890 -#define XTEA_DEC_TEST_VECTORS  4
5891 -
5892 -static struct cipher_testvec xtea_enc_tv_template[] = {
5893 +static const struct cipher_testvec xtea_enc_tv_template[] = {
5894         {
5895                 .key    = zeroed_string,
5896                 .klen   = 16,
5897 @@ -27018,7 +27093,7 @@ static struct cipher_testvec xtea_enc_tv
5898         }
5899  };
5900  
5901 -static struct cipher_testvec xtea_dec_tv_template[] = {
5902 +static const struct cipher_testvec xtea_dec_tv_template[] = {
5903         {
5904                 .key    = zeroed_string,
5905                 .klen   = 16,
5906 @@ -27064,10 +27139,7 @@ static struct cipher_testvec xtea_dec_tv
5907  /*
5908   * KHAZAD test vectors.
5909   */
5910 -#define KHAZAD_ENC_TEST_VECTORS 5
5911 -#define KHAZAD_DEC_TEST_VECTORS 5
5912 -
5913 -static struct cipher_testvec khazad_enc_tv_template[] = {
5914 +static const struct cipher_testvec khazad_enc_tv_template[] = {
5915         {
5916                 .key    = "\x80\x00\x00\x00\x00\x00\x00\x00"
5917                           "\x00\x00\x00\x00\x00\x00\x00\x00",
5918 @@ -27113,7 +27185,7 @@ static struct cipher_testvec khazad_enc_
5919         },
5920  };
5921  
5922 -static struct cipher_testvec khazad_dec_tv_template[] = {
5923 +static const struct cipher_testvec khazad_dec_tv_template[] = {
5924         {
5925                 .key    = "\x80\x00\x00\x00\x00\x00\x00\x00"
5926                           "\x00\x00\x00\x00\x00\x00\x00\x00",
5927 @@ -27163,12 +27235,7 @@ static struct cipher_testvec khazad_dec_
5928   * Anubis test vectors.
5929   */
5930  
5931 -#define ANUBIS_ENC_TEST_VECTORS                        5
5932 -#define ANUBIS_DEC_TEST_VECTORS                        5
5933 -#define ANUBIS_CBC_ENC_TEST_VECTORS            2
5934 -#define ANUBIS_CBC_DEC_TEST_VECTORS            2
5935 -
5936 -static struct cipher_testvec anubis_enc_tv_template[] = {
5937 +static const struct cipher_testvec anubis_enc_tv_template[] = {
5938         {
5939                 .key    = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5940                           "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5941 @@ -27231,7 +27298,7 @@ static struct cipher_testvec anubis_enc_
5942         },
5943  };
5944  
5945 -static struct cipher_testvec anubis_dec_tv_template[] = {
5946 +static const struct cipher_testvec anubis_dec_tv_template[] = {
5947         {
5948                 .key    = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5949                           "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5950 @@ -27294,7 +27361,7 @@ static struct cipher_testvec anubis_dec_
5951         },
5952  };
5953  
5954 -static struct cipher_testvec anubis_cbc_enc_tv_template[] = {
5955 +static const struct cipher_testvec anubis_cbc_enc_tv_template[] = {
5956         {
5957                 .key    = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5958                           "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5959 @@ -27329,7 +27396,7 @@ static struct cipher_testvec anubis_cbc_
5960         },
5961  };
5962  
5963 -static struct cipher_testvec anubis_cbc_dec_tv_template[] = {
5964 +static const struct cipher_testvec anubis_cbc_dec_tv_template[] = {
5965         {
5966                 .key    = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5967                           "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5968 @@ -27367,10 +27434,7 @@ static struct cipher_testvec anubis_cbc_
5969  /*
5970   * XETA test vectors
5971   */
5972 -#define XETA_ENC_TEST_VECTORS  4
5973 -#define XETA_DEC_TEST_VECTORS  4
5974 -
5975 -static struct cipher_testvec xeta_enc_tv_template[] = {
5976 +static const struct cipher_testvec xeta_enc_tv_template[] = {
5977         {
5978                 .key    = zeroed_string,
5979                 .klen   = 16,
5980 @@ -27413,7 +27477,7 @@ static struct cipher_testvec xeta_enc_tv
5981         }
5982  };
5983  
5984 -static struct cipher_testvec xeta_dec_tv_template[] = {
5985 +static const struct cipher_testvec xeta_dec_tv_template[] = {
5986         {
5987                 .key    = zeroed_string,
5988                 .klen   = 16,
5989 @@ -27459,10 +27523,7 @@ static struct cipher_testvec xeta_dec_tv
5990  /*
5991   * FCrypt test vectors
5992   */
5993 -#define FCRYPT_ENC_TEST_VECTORS        ARRAY_SIZE(fcrypt_pcbc_enc_tv_template)
5994 -#define FCRYPT_DEC_TEST_VECTORS        ARRAY_SIZE(fcrypt_pcbc_dec_tv_template)
5995 -
5996 -static struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
5997 +static const struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
5998         { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
5999                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00",
6000                 .klen   = 8,
6001 @@ -27523,7 +27584,7 @@ static struct cipher_testvec fcrypt_pcbc
6002         }
6003  };
6004  
6005 -static struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
6006 +static const struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
6007         { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
6008                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00",
6009                 .klen   = 8,
6010 @@ -27587,18 +27648,7 @@ static struct cipher_testvec fcrypt_pcbc
6011  /*
6012   * CAMELLIA test vectors.
6013   */
6014 -#define CAMELLIA_ENC_TEST_VECTORS 4
6015 -#define CAMELLIA_DEC_TEST_VECTORS 4
6016 -#define CAMELLIA_CBC_ENC_TEST_VECTORS 3
6017 -#define CAMELLIA_CBC_DEC_TEST_VECTORS 3
6018 -#define CAMELLIA_CTR_ENC_TEST_VECTORS 2
6019 -#define CAMELLIA_CTR_DEC_TEST_VECTORS 2
6020 -#define CAMELLIA_LRW_ENC_TEST_VECTORS 8
6021 -#define CAMELLIA_LRW_DEC_TEST_VECTORS 8
6022 -#define CAMELLIA_XTS_ENC_TEST_VECTORS 5
6023 -#define CAMELLIA_XTS_DEC_TEST_VECTORS 5
6024 -
6025 -static struct cipher_testvec camellia_enc_tv_template[] = {
6026 +static const struct cipher_testvec camellia_enc_tv_template[] = {
6027         {
6028                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6029                           "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6030 @@ -27898,7 +27948,7 @@ static struct cipher_testvec camellia_en
6031         },
6032  };
6033  
6034 -static struct cipher_testvec camellia_dec_tv_template[] = {
6035 +static const struct cipher_testvec camellia_dec_tv_template[] = {
6036         {
6037                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6038                           "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6039 @@ -28198,7 +28248,7 @@ static struct cipher_testvec camellia_de
6040         },
6041  };
6042  
6043 -static struct cipher_testvec camellia_cbc_enc_tv_template[] = {
6044 +static const struct cipher_testvec camellia_cbc_enc_tv_template[] = {
6045         {
6046                 .key    = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
6047                           "\x51\x2e\x03\xd5\x34\x12\x00\x06",
6048 @@ -28494,7 +28544,7 @@ static struct cipher_testvec camellia_cb
6049         },
6050  };
6051  
6052 -static struct cipher_testvec camellia_cbc_dec_tv_template[] = {
6053 +static const struct cipher_testvec camellia_cbc_dec_tv_template[] = {
6054         {
6055                 .key    = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
6056                           "\x51\x2e\x03\xd5\x34\x12\x00\x06",
6057 @@ -28790,7 +28840,7 @@ static struct cipher_testvec camellia_cb
6058         },
6059  };
6060  
6061 -static struct cipher_testvec camellia_ctr_enc_tv_template[] = {
6062 +static const struct cipher_testvec camellia_ctr_enc_tv_template[] = {
6063         { /* Generated with Crypto++ */
6064                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
6065                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
6066 @@ -29457,7 +29507,7 @@ static struct cipher_testvec camellia_ct
6067         },
6068  };
6069  
6070 -static struct cipher_testvec camellia_ctr_dec_tv_template[] = {
6071 +static const struct cipher_testvec camellia_ctr_dec_tv_template[] = {
6072         { /* Generated with Crypto++ */
6073                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
6074                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
6075 @@ -30124,7 +30174,7 @@ static struct cipher_testvec camellia_ct
6076         },
6077  };
6078  
6079 -static struct cipher_testvec camellia_lrw_enc_tv_template[] = {
6080 +static const struct cipher_testvec camellia_lrw_enc_tv_template[] = {
6081         /* Generated from AES-LRW test vectors */
6082         {
6083                 .key    = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
6084 @@ -30376,7 +30426,7 @@ static struct cipher_testvec camellia_lr
6085         },
6086  };
6087  
6088 -static struct cipher_testvec camellia_lrw_dec_tv_template[] = {
6089 +static const struct cipher_testvec camellia_lrw_dec_tv_template[] = {
6090         /* Generated from AES-LRW test vectors */
6091         /* same as enc vectors with input and result reversed */
6092         {
6093 @@ -30629,7 +30679,7 @@ static struct cipher_testvec camellia_lr
6094         },
6095  };
6096  
6097 -static struct cipher_testvec camellia_xts_enc_tv_template[] = {
6098 +static const struct cipher_testvec camellia_xts_enc_tv_template[] = {
6099         /* Generated from AES-XTS test vectors */
6100         {
6101                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00"
6102 @@ -30971,7 +31021,7 @@ static struct cipher_testvec camellia_xt
6103         },
6104  };
6105  
6106 -static struct cipher_testvec camellia_xts_dec_tv_template[] = {
6107 +static const struct cipher_testvec camellia_xts_dec_tv_template[] = {
6108         /* Generated from AES-XTS test vectors */
6109         /* same as enc vectors with input and result reversed */
6110         {
6111 @@ -31317,10 +31367,7 @@ static struct cipher_testvec camellia_xt
6112  /*
6113   * SEED test vectors
6114   */
6115 -#define SEED_ENC_TEST_VECTORS  4
6116 -#define SEED_DEC_TEST_VECTORS  4
6117 -
6118 -static struct cipher_testvec seed_enc_tv_template[] = {
6119 +static const struct cipher_testvec seed_enc_tv_template[] = {
6120         {
6121                 .key    = zeroed_string,
6122                 .klen   = 16,
6123 @@ -31362,7 +31409,7 @@ static struct cipher_testvec seed_enc_tv
6124         }
6125  };
6126  
6127 -static struct cipher_testvec seed_dec_tv_template[] = {
6128 +static const struct cipher_testvec seed_dec_tv_template[] = {
6129         {
6130                 .key    = zeroed_string,
6131                 .klen   = 16,
6132 @@ -31404,8 +31451,7 @@ static struct cipher_testvec seed_dec_tv
6133         }
6134  };
6135  
6136 -#define SALSA20_STREAM_ENC_TEST_VECTORS 5
6137 -static struct cipher_testvec salsa20_stream_enc_tv_template[] = {
6138 +static const struct cipher_testvec salsa20_stream_enc_tv_template[] = {
6139         /*
6140         * Testvectors from verified.test-vectors submitted to ECRYPT.
6141         * They are truncated to size 39, 64, 111, 129 to test a variety
6142 @@ -32574,8 +32620,7 @@ static struct cipher_testvec salsa20_str
6143         },
6144  };
6145  
6146 -#define CHACHA20_ENC_TEST_VECTORS 4
6147 -static struct cipher_testvec chacha20_enc_tv_template[] = {
6148 +static const struct cipher_testvec chacha20_enc_tv_template[] = {
6149         { /* RFC7539 A.2. Test Vector #1 */
6150                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00"
6151                           "\x00\x00\x00\x00\x00\x00\x00\x00"
6152 @@ -33086,9 +33131,7 @@ static struct cipher_testvec chacha20_en
6153  /*
6154   * CTS (Cipher Text Stealing) mode tests
6155   */
6156 -#define CTS_MODE_ENC_TEST_VECTORS 6
6157 -#define CTS_MODE_DEC_TEST_VECTORS 6
6158 -static struct cipher_testvec cts_mode_enc_tv_template[] = {
6159 +static const struct cipher_testvec cts_mode_enc_tv_template[] = {
6160         { /* from rfc3962 */
6161                 .klen   = 16,
6162                 .key    = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
6163 @@ -33190,7 +33233,7 @@ static struct cipher_testvec cts_mode_en
6164         }
6165  };
6166  
6167 -static struct cipher_testvec cts_mode_dec_tv_template[] = {
6168 +static const struct cipher_testvec cts_mode_dec_tv_template[] = {
6169         { /* from rfc3962 */
6170                 .klen   = 16,
6171                 .key    = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
6172 @@ -33308,10 +33351,7 @@ struct comp_testvec {
6173   * Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL.
6174   */
6175  
6176 -#define DEFLATE_COMP_TEST_VECTORS 2
6177 -#define DEFLATE_DECOMP_TEST_VECTORS 2
6178 -
6179 -static struct comp_testvec deflate_comp_tv_template[] = {
6180 +static const struct comp_testvec deflate_comp_tv_template[] = {
6181         {
6182                 .inlen  = 70,
6183                 .outlen = 38,
6184 @@ -33347,7 +33387,7 @@ static struct comp_testvec deflate_comp_
6185         },
6186  };
6187  
6188 -static struct comp_testvec deflate_decomp_tv_template[] = {
6189 +static const struct comp_testvec deflate_decomp_tv_template[] = {
6190         {
6191                 .inlen  = 122,
6192                 .outlen = 191,
6193 @@ -33386,10 +33426,7 @@ static struct comp_testvec deflate_decom
6194  /*
6195   * LZO test vectors (null-terminated strings).
6196   */
6197 -#define LZO_COMP_TEST_VECTORS 2
6198 -#define LZO_DECOMP_TEST_VECTORS 2
6199 -
6200 -static struct comp_testvec lzo_comp_tv_template[] = {
6201 +static const struct comp_testvec lzo_comp_tv_template[] = {
6202         {
6203                 .inlen  = 70,
6204                 .outlen = 57,
6205 @@ -33429,7 +33466,7 @@ static struct comp_testvec lzo_comp_tv_t
6206         },
6207  };
6208  
6209 -static struct comp_testvec lzo_decomp_tv_template[] = {
6210 +static const struct comp_testvec lzo_decomp_tv_template[] = {
6211         {
6212                 .inlen  = 133,
6213                 .outlen = 159,
6214 @@ -33472,7 +33509,7 @@ static struct comp_testvec lzo_decomp_tv
6215   */
6216  #define MICHAEL_MIC_TEST_VECTORS 6
6217  
6218 -static struct hash_testvec michael_mic_tv_template[] = {
6219 +static const struct hash_testvec michael_mic_tv_template[] = {
6220         {
6221                 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
6222                 .ksize = 8,
6223 @@ -33520,9 +33557,7 @@ static struct hash_testvec michael_mic_t
6224  /*
6225   * CRC32 test vectors
6226   */
6227 -#define CRC32_TEST_VECTORS 14
6228 -
6229 -static struct hash_testvec crc32_tv_template[] = {
6230 +static const struct hash_testvec crc32_tv_template[] = {
6231         {
6232                 .key = "\x87\xa9\xcb\xed",
6233                 .ksize = 4,
6234 @@ -33954,9 +33989,7 @@ static struct hash_testvec crc32_tv_temp
6235  /*
6236   * CRC32C test vectors
6237   */
6238 -#define CRC32C_TEST_VECTORS 15
6239 -
6240 -static struct hash_testvec crc32c_tv_template[] = {
6241 +static const struct hash_testvec crc32c_tv_template[] = {
6242         {
6243                 .psize = 0,
6244                 .digest = "\x00\x00\x00\x00",
6245 @@ -34392,9 +34425,7 @@ static struct hash_testvec crc32c_tv_tem
6246  /*
6247   * Blakcifn CRC test vectors
6248   */
6249 -#define BFIN_CRC_TEST_VECTORS 6
6250 -
6251 -static struct hash_testvec bfin_crc_tv_template[] = {
6252 +static const struct hash_testvec bfin_crc_tv_template[] = {
6253         {
6254                 .psize = 0,
6255                 .digest = "\x00\x00\x00\x00",
6256 @@ -34479,9 +34510,6 @@ static struct hash_testvec bfin_crc_tv_t
6257  
6258  };
6259  
6260 -#define LZ4_COMP_TEST_VECTORS 1
6261 -#define LZ4_DECOMP_TEST_VECTORS 1
6262 -
6263  static struct comp_testvec lz4_comp_tv_template[] = {
6264         {
6265                 .inlen  = 70,
6266 @@ -34512,9 +34540,6 @@ static struct comp_testvec lz4_decomp_tv
6267         },
6268  };
6269  
6270 -#define LZ4HC_COMP_TEST_VECTORS 1
6271 -#define LZ4HC_DECOMP_TEST_VECTORS 1
6272 -
6273  static struct comp_testvec lz4hc_comp_tv_template[] = {
6274         {
6275                 .inlen  = 70,
6276 --- /dev/null
6277 +++ b/crypto/tls.c
6278 @@ -0,0 +1,607 @@
6279 +/*
6280 + * Copyright 2013 Freescale Semiconductor, Inc.
6281 + * Copyright 2017 NXP Semiconductor, Inc.
6282 + *
6283 + * This program is free software; you can redistribute it and/or modify it
6284 + * under the terms of the GNU General Public License as published by the Free
6285 + * Software Foundation; either version 2 of the License, or (at your option)
6286 + * any later version.
6287 + *
6288 + */
6289 +
6290 +#include <crypto/internal/aead.h>
6291 +#include <crypto/internal/hash.h>
6292 +#include <crypto/internal/skcipher.h>
6293 +#include <crypto/authenc.h>
6294 +#include <crypto/null.h>
6295 +#include <crypto/scatterwalk.h>
6296 +#include <linux/err.h>
6297 +#include <linux/init.h>
6298 +#include <linux/module.h>
6299 +#include <linux/rtnetlink.h>
6300 +
6301 +struct tls_instance_ctx {
6302 +       struct crypto_ahash_spawn auth;
6303 +       struct crypto_skcipher_spawn enc;
6304 +};
6305 +
6306 +struct crypto_tls_ctx {
6307 +       unsigned int reqoff;
6308 +       struct crypto_ahash *auth;
6309 +       struct crypto_skcipher *enc;
6310 +       struct crypto_skcipher *null;
6311 +};
6312 +
6313 +struct tls_request_ctx {
6314 +       /*
6315 +        * cryptlen holds the payload length in the case of encryption or
6316 +        * payload_len + icv_len + padding_len in case of decryption
6317 +        */
6318 +       unsigned int cryptlen;
6319 +       /* working space for partial results */
6320 +       struct scatterlist tmp[2];
6321 +       struct scatterlist cipher[2];
6322 +       struct scatterlist dst[2];
6323 +       char tail[];
6324 +};
6325 +
6326 +struct async_op {
6327 +       struct completion completion;
6328 +       int err;
6329 +};
6330 +
6331 +static void tls_async_op_done(struct crypto_async_request *req, int err)
6332 +{
6333 +       struct async_op *areq = req->data;
6334 +
6335 +       if (err == -EINPROGRESS)
6336 +               return;
6337 +
6338 +       areq->err = err;
6339 +       complete(&areq->completion);
6340 +}
6341 +
6342 +static int crypto_tls_setkey(struct crypto_aead *tls, const u8 *key,
6343 +                            unsigned int keylen)
6344 +{
6345 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6346 +       struct crypto_ahash *auth = ctx->auth;
6347 +       struct crypto_skcipher *enc = ctx->enc;
6348 +       struct crypto_authenc_keys keys;
6349 +       int err = -EINVAL;
6350 +
6351 +       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
6352 +               goto badkey;
6353 +
6354 +       crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
6355 +       crypto_ahash_set_flags(auth, crypto_aead_get_flags(tls) &
6356 +                                   CRYPTO_TFM_REQ_MASK);
6357 +       err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
6358 +       crypto_aead_set_flags(tls, crypto_ahash_get_flags(auth) &
6359 +                                      CRYPTO_TFM_RES_MASK);
6360 +
6361 +       if (err)
6362 +               goto out;
6363 +
6364 +       crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
6365 +       crypto_skcipher_set_flags(enc, crypto_aead_get_flags(tls) &
6366 +                                        CRYPTO_TFM_REQ_MASK);
6367 +       err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
6368 +       crypto_aead_set_flags(tls, crypto_skcipher_get_flags(enc) &
6369 +                                      CRYPTO_TFM_RES_MASK);
6370 +
6371 +out:
6372 +       return err;
6373 +
6374 +badkey:
6375 +       crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
6376 +       goto out;
6377 +}
6378 +
6379 +/**
6380 + * crypto_tls_genicv - Calculate hmac digest for a TLS record
6381 + * @hash:      (output) buffer to save the digest into
6382 + * @src:       (input) scatterlist with the assoc and payload data
6383 + * @srclen:    (input) size of the source buffer (assoclen + cryptlen)
6384 + * @req:       (input) aead request
6385 + **/
6386 +static int crypto_tls_genicv(u8 *hash, struct scatterlist *src,
6387 +                            unsigned int srclen, struct aead_request *req)
6388 +{
6389 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
6390 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6391 +       struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6392 +       struct async_op ahash_op;
6393 +       struct ahash_request *ahreq = (void *)(treq_ctx->tail + ctx->reqoff);
6394 +       unsigned int flags = CRYPTO_TFM_REQ_MAY_SLEEP;
6395 +       int err = -EBADMSG;
6396 +
6397 +        /* Bail out if the request assoc len is 0 */
6398 +       if (!req->assoclen)
6399 +               return err;
6400 +
6401 +       init_completion(&ahash_op.completion);
6402 +
6403 +       /* the hash transform to be executed comes from the original request */
6404 +       ahash_request_set_tfm(ahreq, ctx->auth);
6405 +       /* prepare the hash request with input data and result pointer */
6406 +       ahash_request_set_crypt(ahreq, src, hash, srclen);
6407 +       /* set the notifier for when the async hash function returns */
6408 +       ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
6409 +                                  tls_async_op_done, &ahash_op);
6410 +
6411 +       /* Calculate the digest on the given data. The result is put in hash */
6412 +       err = crypto_ahash_digest(ahreq);
6413 +       if (err == -EINPROGRESS) {
6414 +               err = wait_for_completion_interruptible(&ahash_op.completion);
6415 +               if (!err)
6416 +                       err = ahash_op.err;
6417 +       }
6418 +
6419 +       return err;
6420 +}
6421 +
6422 +/**
6423 + * crypto_tls_gen_padicv - Calculate and pad hmac digest for a TLS record
6424 + * @hash:      (output) buffer to save the digest and padding into
6425 + * @phashlen:  (output) the size of digest + padding
6426 + * @req:       (input) aead request
6427 + **/
6428 +static int crypto_tls_gen_padicv(u8 *hash, unsigned int *phashlen,
6429 +                                struct aead_request *req)
6430 +{
6431 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
6432 +       unsigned int hash_size = crypto_aead_authsize(tls);
6433 +       unsigned int block_size = crypto_aead_blocksize(tls);
6434 +       unsigned int srclen = req->cryptlen + hash_size;
6435 +       unsigned int icvlen = req->cryptlen + req->assoclen;
6436 +       unsigned int padlen;
6437 +       int err;
6438 +
6439 +       err = crypto_tls_genicv(hash, req->src, icvlen, req);
6440 +       if (err)
6441 +               goto out;
6442 +
6443 +       /* add padding after digest */
6444 +       padlen = block_size - (srclen % block_size);
6445 +       memset(hash + hash_size, padlen - 1, padlen);
6446 +
6447 +       *phashlen = hash_size + padlen;
6448 +out:
6449 +       return err;
6450 +}
6451 +
6452 +static int crypto_tls_copy_data(struct aead_request *req,
6453 +                               struct scatterlist *src,
6454 +                               struct scatterlist *dst,
6455 +                               unsigned int len)
6456 +{
6457 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
6458 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6459 +       SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
6460 +
6461 +       skcipher_request_set_tfm(skreq, ctx->null);
6462 +       skcipher_request_set_callback(skreq, aead_request_flags(req),
6463 +                                     NULL, NULL);
6464 +       skcipher_request_set_crypt(skreq, src, dst, len, NULL);
6465 +
6466 +       return crypto_skcipher_encrypt(skreq);
6467 +}
6468 +
6469 +static int crypto_tls_encrypt(struct aead_request *req)
6470 +{
6471 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
6472 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6473 +       struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6474 +       struct skcipher_request *skreq;
6475 +       struct scatterlist *cipher = treq_ctx->cipher;
6476 +       struct scatterlist *tmp = treq_ctx->tmp;
6477 +       struct scatterlist *sg, *src, *dst;
6478 +       unsigned int cryptlen, phashlen;
6479 +       u8 *hash = treq_ctx->tail;
6480 +       int err;
6481 +
6482 +       /*
6483 +        * The hash result is saved at the beginning of the tls request ctx
6484 +        * and is aligned as required by the hash transform. Enough space was
6485 +        * allocated in crypto_tls_init_tfm to accommodate the difference. The
6486 +        * requests themselves start later at treq_ctx->tail + ctx->reqoff so
6487 +        * the result is not overwritten by the second (cipher) request.
6488 +        */
6489 +       hash = (u8 *)ALIGN((unsigned long)hash +
6490 +                          crypto_ahash_alignmask(ctx->auth),
6491 +                          crypto_ahash_alignmask(ctx->auth) + 1);
6492 +
6493 +       /*
6494 +        * STEP 1: create ICV together with necessary padding
6495 +        */
6496 +       err = crypto_tls_gen_padicv(hash, &phashlen, req);
6497 +       if (err)
6498 +               return err;
6499 +
6500 +       /*
6501 +        * STEP 2: Hash and padding are combined with the payload
6502 +        * depending on the form it arrives. Scatter tables must have at least
6503 +        * one page of data before chaining with another table and can't have
6504 +        * an empty data page. The following code addresses these requirements.
6505 +        *
6506 +        * If the payload is empty, only the hash is encrypted, otherwise the
6507 +        * payload scatterlist is merged with the hash. A special merging case
6508 +        * is when the payload has only one page of data. In that case the
6509 +        * payload page is moved to another scatterlist and prepared there for
6510 +        * encryption.
6511 +        */
6512 +       if (req->cryptlen) {
6513 +               src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
6514 +
6515 +               sg_init_table(cipher, 2);
6516 +               sg_set_buf(cipher + 1, hash, phashlen);
6517 +
6518 +               if (sg_is_last(src)) {
6519 +                       sg_set_page(cipher, sg_page(src), req->cryptlen,
6520 +                                   src->offset);
6521 +                       src = cipher;
6522 +               } else {
6523 +                       unsigned int rem_len = req->cryptlen;
6524 +
6525 +                       for (sg = src; rem_len > sg->length; sg = sg_next(sg))
6526 +                               rem_len -= min(rem_len, sg->length);
6527 +
6528 +                       sg_set_page(cipher, sg_page(sg), rem_len, sg->offset);
6529 +                       sg_chain(sg, 1, cipher);
6530 +               }
6531 +       } else {
6532 +               sg_init_one(cipher, hash, phashlen);
6533 +               src = cipher;
6534 +       }
6535 +
6536 +       /**
6537 +        * If src != dst copy the associated data from source to destination.
6538 +        * In both cases fast-forward passed the associated data in the dest.
6539 +        */
6540 +       if (req->src != req->dst) {
6541 +               err = crypto_tls_copy_data(req, req->src, req->dst,
6542 +                                          req->assoclen);
6543 +               if (err)
6544 +                       return err;
6545 +       }
6546 +       dst = scatterwalk_ffwd(treq_ctx->dst, req->dst, req->assoclen);
6547 +
6548 +       /*
6549 +        * STEP 3: encrypt the frame and return the result
6550 +        */
6551 +       cryptlen = req->cryptlen + phashlen;
6552 +
6553 +       /*
6554 +        * The hash and the cipher are applied at different times and their
6555 +        * requests can use the same memory space without interference
6556 +        */
6557 +       skreq = (void *)(treq_ctx->tail + ctx->reqoff);
6558 +       skcipher_request_set_tfm(skreq, ctx->enc);
6559 +       skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
6560 +       skcipher_request_set_callback(skreq, aead_request_flags(req),
6561 +                                     req->base.complete, req->base.data);
6562 +       /*
6563 +        * Apply the cipher transform. The result will be in req->dst when the
6564 +        * asynchronuous call terminates
6565 +        */
6566 +       err = crypto_skcipher_encrypt(skreq);
6567 +
6568 +       return err;
6569 +}
6570 +
6571 +static int crypto_tls_decrypt(struct aead_request *req)
6572 +{
6573 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
6574 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6575 +       struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6576 +       unsigned int cryptlen = req->cryptlen;
6577 +       unsigned int hash_size = crypto_aead_authsize(tls);
6578 +       unsigned int block_size = crypto_aead_blocksize(tls);
6579 +       struct skcipher_request *skreq = (void *)(treq_ctx->tail + ctx->reqoff);
6580 +       struct scatterlist *tmp = treq_ctx->tmp;
6581 +       struct scatterlist *src, *dst;
6582 +
6583 +       u8 padding[255]; /* padding can be 0-255 bytes */
6584 +       u8 pad_size;
6585 +       u16 *len_field;
6586 +       u8 *ihash, *hash = treq_ctx->tail;
6587 +
6588 +       int paderr = 0;
6589 +       int err = -EINVAL;
6590 +       int i;
6591 +       struct async_op ciph_op;
6592 +
6593 +       /*
6594 +        * Rule out bad packets. The input packet length must be at least one
6595 +        * byte more than the hash_size
6596 +        */
6597 +       if (cryptlen <= hash_size || cryptlen % block_size)
6598 +               goto out;
6599 +
6600 +       /*
6601 +        * Step 1 - Decrypt the source. Fast-forward past the associated data
6602 +        * to the encrypted data. The result will be overwritten in place so
6603 +        * that the decrypted data will be adjacent to the associated data. The
6604 +        * last step (computing the hash) will have it's input data already
6605 +        * prepared and ready to be accessed at req->src.
6606 +        */
6607 +       src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
6608 +       dst = src;
6609 +
6610 +       init_completion(&ciph_op.completion);
6611 +       skcipher_request_set_tfm(skreq, ctx->enc);
6612 +       skcipher_request_set_callback(skreq, aead_request_flags(req),
6613 +                                     tls_async_op_done, &ciph_op);
6614 +       skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
6615 +       err = crypto_skcipher_decrypt(skreq);
6616 +       if (err == -EINPROGRESS) {
6617 +               err = wait_for_completion_interruptible(&ciph_op.completion);
6618 +               if (!err)
6619 +                       err = ciph_op.err;
6620 +       }
6621 +       if (err)
6622 +               goto out;
6623 +
6624 +       /*
6625 +        * Step 2 - Verify padding
6626 +        * Retrieve the last byte of the payload; this is the padding size.
6627 +        */
6628 +       cryptlen -= 1;
6629 +       scatterwalk_map_and_copy(&pad_size, dst, cryptlen, 1, 0);
6630 +
6631 +       /* RFC recommendation for invalid padding size. */
6632 +       if (cryptlen < pad_size + hash_size) {
6633 +               pad_size = 0;
6634 +               paderr = -EBADMSG;
6635 +       }
6636 +       cryptlen -= pad_size;
6637 +       scatterwalk_map_and_copy(padding, dst, cryptlen, pad_size, 0);
6638 +
6639 +       /* Padding content must be equal with pad_size. We verify it all */
6640 +       for (i = 0; i < pad_size; i++)
6641 +               if (padding[i] != pad_size)
6642 +                       paderr = -EBADMSG;
6643 +
6644 +       /*
6645 +        * Step 3 - Verify hash
6646 +        * Align the digest result as required by the hash transform. Enough
6647 +        * space was allocated in crypto_tls_init_tfm
6648 +        */
6649 +       hash = (u8 *)ALIGN((unsigned long)hash +
6650 +                          crypto_ahash_alignmask(ctx->auth),
6651 +                          crypto_ahash_alignmask(ctx->auth) + 1);
6652 +       /*
6653 +        * Two bytes at the end of the associated data make the length field.
6654 +        * It must be updated with the length of the cleartext message before
6655 +        * the hash is calculated.
6656 +        */
6657 +       len_field = sg_virt(req->src) + req->assoclen - 2;
6658 +       cryptlen -= hash_size;
6659 +       *len_field = htons(cryptlen);
6660 +
6661 +       /* This is the hash from the decrypted packet. Save it for later */
6662 +       ihash = hash + hash_size;
6663 +       scatterwalk_map_and_copy(ihash, dst, cryptlen, hash_size, 0);
6664 +
6665 +       /* Now compute and compare our ICV with the one from the packet */
6666 +       err = crypto_tls_genicv(hash, req->src, cryptlen + req->assoclen, req);
6667 +       if (!err)
6668 +               err = memcmp(hash, ihash, hash_size) ? -EBADMSG : 0;
6669 +
6670 +       if (req->src != req->dst) {
6671 +               err = crypto_tls_copy_data(req, req->src, req->dst, cryptlen +
6672 +                                          req->assoclen);
6673 +               if (err)
6674 +                       goto out;
6675 +       }
6676 +
6677 +       /* return the first found error */
6678 +       if (paderr)
6679 +               err = paderr;
6680 +
6681 +out:
6682 +       aead_request_complete(req, err);
6683 +       return err;
6684 +}
6685 +
6686 +static int crypto_tls_init_tfm(struct crypto_aead *tfm)
6687 +{
6688 +       struct aead_instance *inst = aead_alg_instance(tfm);
6689 +       struct tls_instance_ctx *ictx = aead_instance_ctx(inst);
6690 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
6691 +       struct crypto_ahash *auth;
6692 +       struct crypto_skcipher *enc;
6693 +       struct crypto_skcipher *null;
6694 +       int err;
6695 +
6696 +       auth = crypto_spawn_ahash(&ictx->auth);
6697 +       if (IS_ERR(auth))
6698 +               return PTR_ERR(auth);
6699 +
6700 +       enc = crypto_spawn_skcipher(&ictx->enc);
6701 +       err = PTR_ERR(enc);
6702 +       if (IS_ERR(enc))
6703 +               goto err_free_ahash;
6704 +
6705 +       null = crypto_get_default_null_skcipher2();
6706 +       err = PTR_ERR(null);
6707 +       if (IS_ERR(null))
6708 +               goto err_free_skcipher;
6709 +
6710 +       ctx->auth = auth;
6711 +       ctx->enc = enc;
6712 +       ctx->null = null;
6713 +
6714 +       /*
6715 +        * Allow enough space for two digests. The two digests will be compared
6716 +        * during the decryption phase. One will come from the decrypted packet
6717 +        * and the other will be calculated. For encryption, one digest is
6718 +        * padded (up to a cipher blocksize) and chained with the payload
6719 +        */
6720 +       ctx->reqoff = ALIGN(crypto_ahash_digestsize(auth) +
6721 +                           crypto_ahash_alignmask(auth),
6722 +                           crypto_ahash_alignmask(auth) + 1) +
6723 +                           max(crypto_ahash_digestsize(auth),
6724 +                               crypto_skcipher_blocksize(enc));
6725 +
6726 +       crypto_aead_set_reqsize(tfm,
6727 +                               sizeof(struct tls_request_ctx) +
6728 +                               ctx->reqoff +
6729 +                               max_t(unsigned int,
6730 +                                     crypto_ahash_reqsize(auth) +
6731 +                                     sizeof(struct ahash_request),
6732 +                                     crypto_skcipher_reqsize(enc) +
6733 +                                     sizeof(struct skcipher_request)));
6734 +
6735 +       return 0;
6736 +
6737 +err_free_skcipher:
6738 +       crypto_free_skcipher(enc);
6739 +err_free_ahash:
6740 +       crypto_free_ahash(auth);
6741 +       return err;
6742 +}
6743 +
6744 +static void crypto_tls_exit_tfm(struct crypto_aead *tfm)
6745 +{
6746 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
6747 +
6748 +       crypto_free_ahash(ctx->auth);
6749 +       crypto_free_skcipher(ctx->enc);
6750 +       crypto_put_default_null_skcipher2();
6751 +}
6752 +
6753 +static void crypto_tls_free(struct aead_instance *inst)
6754 +{
6755 +       struct tls_instance_ctx *ctx = aead_instance_ctx(inst);
6756 +
6757 +       crypto_drop_skcipher(&ctx->enc);
6758 +       crypto_drop_ahash(&ctx->auth);
6759 +       kfree(inst);
6760 +}
6761 +
6762 +static int crypto_tls_create(struct crypto_template *tmpl, struct rtattr **tb)
6763 +{
6764 +       struct crypto_attr_type *algt;
6765 +       struct aead_instance *inst;
6766 +       struct hash_alg_common *auth;
6767 +       struct crypto_alg *auth_base;
6768 +       struct skcipher_alg *enc;
6769 +       struct tls_instance_ctx *ctx;
6770 +       const char *enc_name;
6771 +       int err;
6772 +
6773 +       algt = crypto_get_attr_type(tb);
6774 +       if (IS_ERR(algt))
6775 +               return PTR_ERR(algt);
6776 +
6777 +       if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
6778 +               return -EINVAL;
6779 +
6780 +       auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
6781 +                             CRYPTO_ALG_TYPE_AHASH_MASK |
6782 +                             crypto_requires_sync(algt->type, algt->mask));
6783 +       if (IS_ERR(auth))
6784 +               return PTR_ERR(auth);
6785 +
6786 +       auth_base = &auth->base;
6787 +
6788 +       enc_name = crypto_attr_alg_name(tb[2]);
6789 +       err = PTR_ERR(enc_name);
6790 +       if (IS_ERR(enc_name))
6791 +               goto out_put_auth;
6792 +
6793 +       inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
6794 +       err = -ENOMEM;
6795 +       if (!inst)
6796 +               goto out_put_auth;
6797 +
6798 +       ctx = aead_instance_ctx(inst);
6799 +
6800 +       err = crypto_init_ahash_spawn(&ctx->auth, auth,
6801 +                                     aead_crypto_instance(inst));
6802 +       if (err)
6803 +               goto err_free_inst;
6804 +
6805 +       crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
6806 +       err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
6807 +                                  crypto_requires_sync(algt->type,
6808 +                                                       algt->mask));
6809 +       if (err)
6810 +               goto err_drop_auth;
6811 +
6812 +       enc = crypto_spawn_skcipher_alg(&ctx->enc);
6813 +
6814 +       err = -ENAMETOOLONG;
6815 +       if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
6816 +                    "tls10(%s,%s)", auth_base->cra_name,
6817 +                    enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
6818 +               goto err_drop_enc;
6819 +
6820 +       if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
6821 +                    "tls10(%s,%s)", auth_base->cra_driver_name,
6822 +                    enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
6823 +               goto err_drop_enc;
6824 +
6825 +       inst->alg.base.cra_flags = (auth_base->cra_flags |
6826 +                                       enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
6827 +       inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
6828 +                                       auth_base->cra_priority;
6829 +       inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
6830 +       inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
6831 +                                       enc->base.cra_alignmask;
6832 +       inst->alg.base.cra_ctxsize = sizeof(struct crypto_tls_ctx);
6833 +
6834 +       inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
6835 +       inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
6836 +       inst->alg.maxauthsize = auth->digestsize;
6837 +
6838 +       inst->alg.init = crypto_tls_init_tfm;
6839 +       inst->alg.exit = crypto_tls_exit_tfm;
6840 +
6841 +       inst->alg.setkey = crypto_tls_setkey;
6842 +       inst->alg.encrypt = crypto_tls_encrypt;
6843 +       inst->alg.decrypt = crypto_tls_decrypt;
6844 +
6845 +       inst->free = crypto_tls_free;
6846 +
6847 +       err = aead_register_instance(tmpl, inst);
6848 +       if (err)
6849 +               goto err_drop_enc;
6850 +
6851 +out:
6852 +       crypto_mod_put(auth_base);
6853 +       return err;
6854 +
6855 +err_drop_enc:
6856 +       crypto_drop_skcipher(&ctx->enc);
6857 +err_drop_auth:
6858 +       crypto_drop_ahash(&ctx->auth);
6859 +err_free_inst:
6860 +       kfree(inst);
6861 +out_put_auth:
6862 +       goto out;
6863 +}
6864 +
6865 +static struct crypto_template crypto_tls_tmpl = {
6866 +       .name = "tls10",
6867 +       .create = crypto_tls_create,
6868 +       .module = THIS_MODULE,
6869 +};
6870 +
6871 +static int __init crypto_tls_module_init(void)
6872 +{
6873 +       return crypto_register_template(&crypto_tls_tmpl);
6874 +}
6875 +
6876 +static void __exit crypto_tls_module_exit(void)
6877 +{
6878 +       crypto_unregister_template(&crypto_tls_tmpl);
6879 +}
6880 +
6881 +module_init(crypto_tls_module_init);
6882 +module_exit(crypto_tls_module_exit);
6883 +
6884 +MODULE_LICENSE("GPL");
6885 +MODULE_DESCRIPTION("TLS 1.0 record encryption");
6886 --- a/drivers/crypto/caam/Kconfig
6887 +++ b/drivers/crypto/caam/Kconfig
6888 @@ -1,6 +1,11 @@
6889 +config CRYPTO_DEV_FSL_CAAM_COMMON
6890 +       tristate
6891 +
6892  config CRYPTO_DEV_FSL_CAAM
6893 -       tristate "Freescale CAAM-Multicore driver backend"
6894 +       tristate "Freescale CAAM-Multicore platform driver backend"
6895         depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
6896 +       select CRYPTO_DEV_FSL_CAAM_COMMON
6897 +       select SOC_BUS
6898         help
6899           Enables the driver module for Freescale's Cryptographic Accelerator
6900           and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
6901 @@ -11,9 +16,16 @@ config CRYPTO_DEV_FSL_CAAM
6902           To compile this driver as a module, choose M here: the module
6903           will be called caam.
6904  
6905 +if CRYPTO_DEV_FSL_CAAM
6906 +
6907 +config CRYPTO_DEV_FSL_CAAM_DEBUG
6908 +       bool "Enable debug output in CAAM driver"
6909 +       help
6910 +         Selecting this will enable printing of various debug
6911 +         information in the CAAM driver.
6912 +
6913  config CRYPTO_DEV_FSL_CAAM_JR
6914         tristate "Freescale CAAM Job Ring driver backend"
6915 -       depends on CRYPTO_DEV_FSL_CAAM
6916         default y
6917         help
6918           Enables the driver module for Job Rings which are part of
6919 @@ -24,9 +36,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
6920           To compile this driver as a module, choose M here: the module
6921           will be called caam_jr.
6922  
6923 +if CRYPTO_DEV_FSL_CAAM_JR
6924 +
6925  config CRYPTO_DEV_FSL_CAAM_RINGSIZE
6926         int "Job Ring size"
6927 -       depends on CRYPTO_DEV_FSL_CAAM_JR
6928         range 2 9
6929         default "9"
6930         help
6931 @@ -44,7 +57,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
6932  
6933  config CRYPTO_DEV_FSL_CAAM_INTC
6934         bool "Job Ring interrupt coalescing"
6935 -       depends on CRYPTO_DEV_FSL_CAAM_JR
6936         help
6937           Enable the Job Ring's interrupt coalescing feature.
6938  
6939 @@ -74,7 +86,6 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THL
6940  
6941  config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
6942         tristate "Register algorithm implementations with the Crypto API"
6943 -       depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
6944         default y
6945         select CRYPTO_AEAD
6946         select CRYPTO_AUTHENC
6947 @@ -87,9 +98,25 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
6948           To compile this as a module, choose M here: the module
6949           will be called caamalg.
6950  
6951 +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
6952 +       tristate "Queue Interface as Crypto API backend"
6953 +       depends on FSL_SDK_DPA && NET
6954 +       default y
6955 +       select CRYPTO_AUTHENC
6956 +       select CRYPTO_BLKCIPHER
6957 +       help
6958 +         Selecting this will use CAAM Queue Interface (QI) for sending
6959 +         & receiving crypto jobs to/from CAAM. This gives better performance
6960 +         than job ring interface when the number of cores are more than the
6961 +         number of job rings assigned to the kernel. The number of portals
6962 +         assigned to the kernel should also be more than the number of
6963 +         job rings.
6964 +
6965 +         To compile this as a module, choose M here: the module
6966 +         will be called caamalg_qi.
6967 +
6968  config CRYPTO_DEV_FSL_CAAM_AHASH_API
6969         tristate "Register hash algorithm implementations with Crypto API"
6970 -       depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
6971         default y
6972         select CRYPTO_HASH
6973         help
6974 @@ -101,7 +128,6 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
6975  
6976  config CRYPTO_DEV_FSL_CAAM_PKC_API
6977          tristate "Register public key cryptography implementations with Crypto API"
6978 -        depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
6979          default y
6980          select CRYPTO_RSA
6981          help
6982 @@ -113,7 +139,6 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
6983  
6984  config CRYPTO_DEV_FSL_CAAM_RNG_API
6985         tristate "Register caam device for hwrng API"
6986 -       depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
6987         default y
6988         select CRYPTO_RNG
6989         select HW_RANDOM
6990 @@ -124,13 +149,26 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
6991           To compile this as a module, choose M here: the module
6992           will be called caamrng.
6993  
6994 -config CRYPTO_DEV_FSL_CAAM_IMX
6995 -       def_bool SOC_IMX6 || SOC_IMX7D
6996 -       depends on CRYPTO_DEV_FSL_CAAM
6997 +endif # CRYPTO_DEV_FSL_CAAM_JR
6998  
6999 -config CRYPTO_DEV_FSL_CAAM_DEBUG
7000 -       bool "Enable debug output in CAAM driver"
7001 -       depends on CRYPTO_DEV_FSL_CAAM
7002 -       help
7003 -         Selecting this will enable printing of various debug
7004 -         information in the CAAM driver.
7005 +endif # CRYPTO_DEV_FSL_CAAM
7006 +
7007 +config CRYPTO_DEV_FSL_DPAA2_CAAM
7008 +       tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
7009 +       depends on FSL_MC_DPIO
7010 +       select CRYPTO_DEV_FSL_CAAM_COMMON
7011 +       select CRYPTO_BLKCIPHER
7012 +       select CRYPTO_AUTHENC
7013 +       select CRYPTO_AEAD
7014 +       ---help---
7015 +         CAAM driver for QorIQ Data Path Acceleration Architecture 2.
7016 +         It handles DPSECI DPAA2 objects that sit on the Management Complex
7017 +         (MC) fsl-mc bus.
7018 +
7019 +         To compile this as a module, choose M here: the module
7020 +         will be called dpaa2_caam.
7021 +
7022 +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
7023 +       def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
7024 +                     CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
7025 +                     CRYPTO_DEV_FSL_DPAA2_CAAM)
7026 --- a/drivers/crypto/caam/Makefile
7027 +++ b/drivers/crypto/caam/Makefile
7028 @@ -5,13 +5,26 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG
7029         ccflags-y := -DDEBUG
7030  endif
7031  
7032 +ccflags-y += -DVERSION=\"\"
7033 +
7034 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
7035  obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
7036  obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
7037  obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
7038 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
7039 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
7040  obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
7041  obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
7042  obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
7043  
7044  caam-objs := ctrl.o
7045 -caam_jr-objs := jr.o key_gen.o error.o
7046 +caam_jr-objs := jr.o key_gen.o
7047  caam_pkc-y := caampkc.o pkc_desc.o
7048 +ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
7049 +       ccflags-y += -DCONFIG_CAAM_QI
7050 +       caam-objs += qi.o
7051 +endif
7052 +
7053 +obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
7054 +
7055 +dpaa2_caam-y    := caamalg_qi2.o dpseci.o
7056 --- a/drivers/crypto/caam/caamalg.c
7057 +++ b/drivers/crypto/caam/caamalg.c
7058 @@ -2,6 +2,7 @@
7059   * caam - Freescale FSL CAAM support for crypto API
7060   *
7061   * Copyright 2008-2011 Freescale Semiconductor, Inc.
7062 + * Copyright 2016 NXP
7063   *
7064   * Based on talitos crypto API driver.
7065   *
7066 @@ -53,6 +54,7 @@
7067  #include "error.h"
7068  #include "sg_sw_sec4.h"
7069  #include "key_gen.h"
7070 +#include "caamalg_desc.h"
7071  
7072  /*
7073   * crypto alg
7074 @@ -62,8 +64,6 @@
7075  #define CAAM_MAX_KEY_SIZE              (AES_MAX_KEY_SIZE + \
7076                                          CTR_RFC3686_NONCE_SIZE + \
7077                                          SHA512_DIGEST_SIZE * 2)
7078 -/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
7079 -#define CAAM_MAX_IV_LENGTH             16
7080  
7081  #define AEAD_DESC_JOB_IO_LEN           (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
7082  #define GCM_DESC_JOB_IO_LEN            (AEAD_DESC_JOB_IO_LEN + \
7083 @@ -71,37 +71,6 @@
7084  #define AUTHENC_DESC_JOB_IO_LEN                (AEAD_DESC_JOB_IO_LEN + \
7085                                          CAAM_CMD_SZ * 5)
7086  
7087 -/* length of descriptors text */
7088 -#define DESC_AEAD_BASE                 (4 * CAAM_CMD_SZ)
7089 -#define DESC_AEAD_ENC_LEN              (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
7090 -#define DESC_AEAD_DEC_LEN              (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
7091 -#define DESC_AEAD_GIVENC_LEN           (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
7092 -
7093 -/* Note: Nonce is counted in enckeylen */
7094 -#define DESC_AEAD_CTR_RFC3686_LEN      (4 * CAAM_CMD_SZ)
7095 -
7096 -#define DESC_AEAD_NULL_BASE            (3 * CAAM_CMD_SZ)
7097 -#define DESC_AEAD_NULL_ENC_LEN         (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
7098 -#define DESC_AEAD_NULL_DEC_LEN         (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
7099 -
7100 -#define DESC_GCM_BASE                  (3 * CAAM_CMD_SZ)
7101 -#define DESC_GCM_ENC_LEN               (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
7102 -#define DESC_GCM_DEC_LEN               (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
7103 -
7104 -#define DESC_RFC4106_BASE              (3 * CAAM_CMD_SZ)
7105 -#define DESC_RFC4106_ENC_LEN           (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
7106 -#define DESC_RFC4106_DEC_LEN           (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
7107 -
7108 -#define DESC_RFC4543_BASE              (3 * CAAM_CMD_SZ)
7109 -#define DESC_RFC4543_ENC_LEN           (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
7110 -#define DESC_RFC4543_DEC_LEN           (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
7111 -
7112 -#define DESC_ABLKCIPHER_BASE           (3 * CAAM_CMD_SZ)
7113 -#define DESC_ABLKCIPHER_ENC_LEN                (DESC_ABLKCIPHER_BASE + \
7114 -                                        20 * CAAM_CMD_SZ)
7115 -#define DESC_ABLKCIPHER_DEC_LEN                (DESC_ABLKCIPHER_BASE + \
7116 -                                        15 * CAAM_CMD_SZ)
7117 -
7118  #define DESC_MAX_USED_BYTES            (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
7119  #define DESC_MAX_USED_LEN              (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
7120  
7121 @@ -112,47 +81,11 @@
7122  #define debug(format, arg...)
7123  #endif
7124  
7125 -#ifdef DEBUG
7126 -#include <linux/highmem.h>
7127 -
7128 -static void dbg_dump_sg(const char *level, const char *prefix_str,
7129 -                       int prefix_type, int rowsize, int groupsize,
7130 -                       struct scatterlist *sg, size_t tlen, bool ascii,
7131 -                       bool may_sleep)
7132 -{
7133 -       struct scatterlist *it;
7134 -       void *it_page;
7135 -       size_t len;
7136 -       void *buf;
7137 -
7138 -       for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
7139 -               /*
7140 -                * make sure the scatterlist's page
7141 -                * has a valid virtual memory mapping
7142 -                */
7143 -               it_page = kmap_atomic(sg_page(it));
7144 -               if (unlikely(!it_page)) {
7145 -                       printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
7146 -                       return;
7147 -               }
7148 -
7149 -               buf = it_page + it->offset;
7150 -               len = min_t(size_t, tlen, it->length);
7151 -               print_hex_dump(level, prefix_str, prefix_type, rowsize,
7152 -                              groupsize, buf, len, ascii);
7153 -               tlen -= len;
7154 -
7155 -               kunmap_atomic(it_page);
7156 -       }
7157 -}
7158 -#endif
7159 -
7160  static struct list_head alg_list;
7161  
7162  struct caam_alg_entry {
7163         int class1_alg_type;
7164         int class2_alg_type;
7165 -       int alg_op;
7166         bool rfc3686;
7167         bool geniv;
7168  };
7169 @@ -163,302 +96,67 @@ struct caam_aead_alg {
7170         bool registered;
7171  };
7172  
7173 -/* Set DK bit in class 1 operation if shared */
7174 -static inline void append_dec_op1(u32 *desc, u32 type)
7175 -{
7176 -       u32 *jump_cmd, *uncond_jump_cmd;
7177 -
7178 -       /* DK bit is valid only for AES */
7179 -       if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
7180 -               append_operation(desc, type | OP_ALG_AS_INITFINAL |
7181 -                                OP_ALG_DECRYPT);
7182 -               return;
7183 -       }
7184 -
7185 -       jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
7186 -       append_operation(desc, type | OP_ALG_AS_INITFINAL |
7187 -                        OP_ALG_DECRYPT);
7188 -       uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
7189 -       set_jump_tgt_here(desc, jump_cmd);
7190 -       append_operation(desc, type | OP_ALG_AS_INITFINAL |
7191 -                        OP_ALG_DECRYPT | OP_ALG_AAI_DK);
7192 -       set_jump_tgt_here(desc, uncond_jump_cmd);
7193 -}
7194 -
7195 -/*
7196 - * For aead functions, read payload and write payload,
7197 - * both of which are specified in req->src and req->dst
7198 - */
7199 -static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
7200 -{
7201 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7202 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
7203 -                            KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
7204 -}
7205 -
7206 -/*
7207 - * For ablkcipher encrypt and decrypt, read from req->src and
7208 - * write to req->dst
7209 - */
7210 -static inline void ablkcipher_append_src_dst(u32 *desc)
7211 -{
7212 -       append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7213 -       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7214 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
7215 -                            KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
7216 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7217 -}
7218 -
7219  /*
7220   * per-session context
7221   */
7222  struct caam_ctx {
7223 -       struct device *jrdev;
7224         u32 sh_desc_enc[DESC_MAX_USED_LEN];
7225         u32 sh_desc_dec[DESC_MAX_USED_LEN];
7226         u32 sh_desc_givenc[DESC_MAX_USED_LEN];
7227 +       u8 key[CAAM_MAX_KEY_SIZE];
7228         dma_addr_t sh_desc_enc_dma;
7229         dma_addr_t sh_desc_dec_dma;
7230         dma_addr_t sh_desc_givenc_dma;
7231 -       u32 class1_alg_type;
7232 -       u32 class2_alg_type;
7233 -       u32 alg_op;
7234 -       u8 key[CAAM_MAX_KEY_SIZE];
7235         dma_addr_t key_dma;
7236 -       unsigned int enckeylen;
7237 -       unsigned int split_key_len;
7238 -       unsigned int split_key_pad_len;
7239 +       struct device *jrdev;
7240 +       struct alginfo adata;
7241 +       struct alginfo cdata;
7242         unsigned int authsize;
7243  };
7244  
7245 -static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
7246 -                           int keys_fit_inline, bool is_rfc3686)
7247 -{
7248 -       u32 *nonce;
7249 -       unsigned int enckeylen = ctx->enckeylen;
7250 -
7251 -       /*
7252 -        * RFC3686 specific:
7253 -        *      | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
7254 -        *      | enckeylen = encryption key size + nonce size
7255 -        */
7256 -       if (is_rfc3686)
7257 -               enckeylen -= CTR_RFC3686_NONCE_SIZE;
7258 -
7259 -       if (keys_fit_inline) {
7260 -               append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7261 -                                 ctx->split_key_len, CLASS_2 |
7262 -                                 KEY_DEST_MDHA_SPLIT | KEY_ENC);
7263 -               append_key_as_imm(desc, (void *)ctx->key +
7264 -                                 ctx->split_key_pad_len, enckeylen,
7265 -                                 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7266 -       } else {
7267 -               append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7268 -                          KEY_DEST_MDHA_SPLIT | KEY_ENC);
7269 -               append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
7270 -                          enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7271 -       }
7272 -
7273 -       /* Load Counter into CONTEXT1 reg */
7274 -       if (is_rfc3686) {
7275 -               nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
7276 -                              enckeylen);
7277 -               append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
7278 -                                  LDST_CLASS_IND_CCB |
7279 -                                  LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
7280 -               append_move(desc,
7281 -                           MOVE_SRC_OUTFIFO |
7282 -                           MOVE_DEST_CLASS1CTX |
7283 -                           (16 << MOVE_OFFSET_SHIFT) |
7284 -                           (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
7285 -       }
7286 -}
7287 -
7288 -static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
7289 -                                 int keys_fit_inline, bool is_rfc3686)
7290 -{
7291 -       u32 *key_jump_cmd;
7292 -
7293 -       /* Note: Context registers are saved. */
7294 -       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
7295 -
7296 -       /* Skip if already shared */
7297 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7298 -                                  JUMP_COND_SHRD);
7299 -
7300 -       append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7301 -
7302 -       set_jump_tgt_here(desc, key_jump_cmd);
7303 -}
7304 -
7305  static int aead_null_set_sh_desc(struct crypto_aead *aead)
7306  {
7307         struct caam_ctx *ctx = crypto_aead_ctx(aead);
7308         struct device *jrdev = ctx->jrdev;
7309 -       bool keys_fit_inline = false;
7310 -       u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
7311         u32 *desc;
7312 +       int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
7313 +                       ctx->adata.keylen_pad;
7314  
7315         /*
7316          * Job Descriptor and Shared Descriptors
7317          * must all fit into the 64-word Descriptor h/w Buffer
7318          */
7319 -       if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
7320 -           ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
7321 -               keys_fit_inline = true;
7322 +       if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
7323 +               ctx->adata.key_inline = true;
7324 +               ctx->adata.key_virt = ctx->key;
7325 +       } else {
7326 +               ctx->adata.key_inline = false;
7327 +               ctx->adata.key_dma = ctx->key_dma;
7328 +       }
7329  
7330         /* aead_encrypt shared descriptor */
7331         desc = ctx->sh_desc_enc;
7332 -
7333 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
7334 -
7335 -       /* Skip if already shared */
7336 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7337 -                                  JUMP_COND_SHRD);
7338 -       if (keys_fit_inline)
7339 -               append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7340 -                                 ctx->split_key_len, CLASS_2 |
7341 -                                 KEY_DEST_MDHA_SPLIT | KEY_ENC);
7342 -       else
7343 -               append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7344 -                          KEY_DEST_MDHA_SPLIT | KEY_ENC);
7345 -       set_jump_tgt_here(desc, key_jump_cmd);
7346 -
7347 -       /* assoclen + cryptlen = seqinlen */
7348 -       append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
7349 -
7350 -       /* Prepare to read and write cryptlen + assoclen bytes */
7351 -       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7352 -       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7353 -
7354 -       /*
7355 -        * MOVE_LEN opcode is not available in all SEC HW revisions,
7356 -        * thus need to do some magic, i.e. self-patch the descriptor
7357 -        * buffer.
7358 -        */
7359 -       read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
7360 -                                   MOVE_DEST_MATH3 |
7361 -                                   (0x6 << MOVE_LEN_SHIFT));
7362 -       write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
7363 -                                    MOVE_DEST_DESCBUF |
7364 -                                    MOVE_WAITCOMP |
7365 -                                    (0x8 << MOVE_LEN_SHIFT));
7366 -
7367 -       /* Class 2 operation */
7368 -       append_operation(desc, ctx->class2_alg_type |
7369 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7370 -
7371 -       /* Read and write cryptlen bytes */
7372 -       aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
7373 -
7374 -       set_move_tgt_here(desc, read_move_cmd);
7375 -       set_move_tgt_here(desc, write_move_cmd);
7376 -       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7377 -       append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
7378 -                   MOVE_AUX_LS);
7379 -
7380 -       /* Write ICV */
7381 -       append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7382 -                        LDST_SRCDST_BYTE_CONTEXT);
7383 -
7384 -       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7385 -                                             desc_bytes(desc),
7386 -                                             DMA_TO_DEVICE);
7387 -       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7388 -               dev_err(jrdev, "unable to map shared descriptor\n");
7389 -               return -ENOMEM;
7390 -       }
7391 -#ifdef DEBUG
7392 -       print_hex_dump(KERN_ERR,
7393 -                      "aead null enc shdesc@"__stringify(__LINE__)": ",
7394 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
7395 -                      desc_bytes(desc), 1);
7396 -#endif
7397 +       cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
7398 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7399 +                                  desc_bytes(desc), DMA_TO_DEVICE);
7400  
7401         /*
7402          * Job Descriptor and Shared Descriptors
7403          * must all fit into the 64-word Descriptor h/w Buffer
7404          */
7405 -       keys_fit_inline = false;
7406 -       if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
7407 -           ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
7408 -               keys_fit_inline = true;
7409 -
7410 -       desc = ctx->sh_desc_dec;
7411 +       if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
7412 +               ctx->adata.key_inline = true;
7413 +               ctx->adata.key_virt = ctx->key;
7414 +       } else {
7415 +               ctx->adata.key_inline = false;
7416 +               ctx->adata.key_dma = ctx->key_dma;
7417 +       }
7418  
7419         /* aead_decrypt shared descriptor */
7420 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
7421 -
7422 -       /* Skip if already shared */
7423 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7424 -                                  JUMP_COND_SHRD);
7425 -       if (keys_fit_inline)
7426 -               append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7427 -                                 ctx->split_key_len, CLASS_2 |
7428 -                                 KEY_DEST_MDHA_SPLIT | KEY_ENC);
7429 -       else
7430 -               append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7431 -                          KEY_DEST_MDHA_SPLIT | KEY_ENC);
7432 -       set_jump_tgt_here(desc, key_jump_cmd);
7433 -
7434 -       /* Class 2 operation */
7435 -       append_operation(desc, ctx->class2_alg_type |
7436 -                        OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
7437 -
7438 -       /* assoclen + cryptlen = seqoutlen */
7439 -       append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7440 -
7441 -       /* Prepare to read and write cryptlen + assoclen bytes */
7442 -       append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
7443 -       append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
7444 -
7445 -       /*
7446 -        * MOVE_LEN opcode is not available in all SEC HW revisions,
7447 -        * thus need to do some magic, i.e. self-patch the descriptor
7448 -        * buffer.
7449 -        */
7450 -       read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
7451 -                                   MOVE_DEST_MATH2 |
7452 -                                   (0x6 << MOVE_LEN_SHIFT));
7453 -       write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
7454 -                                    MOVE_DEST_DESCBUF |
7455 -                                    MOVE_WAITCOMP |
7456 -                                    (0x8 << MOVE_LEN_SHIFT));
7457 -
7458 -       /* Read and write cryptlen bytes */
7459 -       aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
7460 -
7461 -       /*
7462 -        * Insert a NOP here, since we need at least 4 instructions between
7463 -        * code patching the descriptor buffer and the location being patched.
7464 -        */
7465 -       jump_cmd = append_jump(desc, JUMP_TEST_ALL);
7466 -       set_jump_tgt_here(desc, jump_cmd);
7467 -
7468 -       set_move_tgt_here(desc, read_move_cmd);
7469 -       set_move_tgt_here(desc, write_move_cmd);
7470 -       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7471 -       append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
7472 -                   MOVE_AUX_LS);
7473 -       append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
7474 -
7475 -       /* Load ICV */
7476 -       append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
7477 -                            FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
7478 -
7479 -       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
7480 -                                             desc_bytes(desc),
7481 -                                             DMA_TO_DEVICE);
7482 -       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
7483 -               dev_err(jrdev, "unable to map shared descriptor\n");
7484 -               return -ENOMEM;
7485 -       }
7486 -#ifdef DEBUG
7487 -       print_hex_dump(KERN_ERR,
7488 -                      "aead null dec shdesc@"__stringify(__LINE__)": ",
7489 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
7490 -                      desc_bytes(desc), 1);
7491 -#endif
7492 +       desc = ctx->sh_desc_dec;
7493 +       cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
7494 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
7495 +                                  desc_bytes(desc), DMA_TO_DEVICE);
7496  
7497         return 0;
7498  }
7499 @@ -470,11 +168,11 @@ static int aead_set_sh_desc(struct crypt
7500         unsigned int ivsize = crypto_aead_ivsize(aead);
7501         struct caam_ctx *ctx = crypto_aead_ctx(aead);
7502         struct device *jrdev = ctx->jrdev;
7503 -       bool keys_fit_inline;
7504 -       u32 geniv, moveiv;
7505         u32 ctx1_iv_off = 0;
7506 -       u32 *desc;
7507 -       const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
7508 +       u32 *desc, *nonce = NULL;
7509 +       u32 inl_mask;
7510 +       unsigned int data_len[2];
7511 +       const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
7512                                OP_ALG_AAI_CTR_MOD128);
7513         const bool is_rfc3686 = alg->caam.rfc3686;
7514  
7515 @@ -482,7 +180,7 @@ static int aead_set_sh_desc(struct crypt
7516                 return 0;
7517  
7518         /* NULL encryption / decryption */
7519 -       if (!ctx->enckeylen)
7520 +       if (!ctx->cdata.keylen)
7521                 return aead_null_set_sh_desc(aead);
7522  
7523         /*
7524 @@ -497,8 +195,14 @@ static int aead_set_sh_desc(struct crypt
7525          * RFC3686 specific:
7526          *      CONTEXT1[255:128] = {NONCE, IV, COUNTER}
7527          */
7528 -       if (is_rfc3686)
7529 +       if (is_rfc3686) {
7530                 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
7531 +               nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
7532 +                               ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
7533 +       }
7534 +
7535 +       data_len[0] = ctx->adata.keylen_pad;
7536 +       data_len[1] = ctx->cdata.keylen;
7537  
7538         if (alg->caam.geniv)
7539                 goto skip_enc;
7540 @@ -507,146 +211,64 @@ static int aead_set_sh_desc(struct crypt
7541          * Job Descriptor and Shared Descriptors
7542          * must all fit into the 64-word Descriptor h/w Buffer
7543          */
7544 -       keys_fit_inline = false;
7545 -       if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7546 -           ctx->split_key_pad_len + ctx->enckeylen +
7547 -           (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7548 -           CAAM_DESC_BYTES_MAX)
7549 -               keys_fit_inline = true;
7550 -
7551 -       /* aead_encrypt shared descriptor */
7552 -       desc = ctx->sh_desc_enc;
7553 -
7554 -       /* Note: Context registers are saved. */
7555 -       init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7556 -
7557 -       /* Class 2 operation */
7558 -       append_operation(desc, ctx->class2_alg_type |
7559 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7560 +       if (desc_inline_query(DESC_AEAD_ENC_LEN +
7561 +                             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7562 +                             AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7563 +                             ARRAY_SIZE(data_len)) < 0)
7564 +               return -EINVAL;
7565  
7566 -       /* Read and write assoclen bytes */
7567 -       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7568 -       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7569 +       if (inl_mask & 1)
7570 +               ctx->adata.key_virt = ctx->key;
7571 +       else
7572 +               ctx->adata.key_dma = ctx->key_dma;
7573  
7574 -       /* Skip assoc data */
7575 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7576 +       if (inl_mask & 2)
7577 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7578 +       else
7579 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7580  
7581 -       /* read assoc before reading payload */
7582 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7583 -                                     FIFOLDST_VLF);
7584 +       ctx->adata.key_inline = !!(inl_mask & 1);
7585 +       ctx->cdata.key_inline = !!(inl_mask & 2);
7586  
7587 -       /* Load Counter into CONTEXT1 reg */
7588 -       if (is_rfc3686)
7589 -               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7590 -                                    LDST_SRCDST_BYTE_CONTEXT |
7591 -                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7592 -                                     LDST_OFFSET_SHIFT));
7593 -
7594 -       /* Class 1 operation */
7595 -       append_operation(desc, ctx->class1_alg_type |
7596 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7597 -
7598 -       /* Read and write cryptlen bytes */
7599 -       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7600 -       append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7601 -       aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
7602 -
7603 -       /* Write ICV */
7604 -       append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7605 -                        LDST_SRCDST_BYTE_CONTEXT);
7606 -
7607 -       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7608 -                                             desc_bytes(desc),
7609 -                                             DMA_TO_DEVICE);
7610 -       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7611 -               dev_err(jrdev, "unable to map shared descriptor\n");
7612 -               return -ENOMEM;
7613 -       }
7614 -#ifdef DEBUG
7615 -       print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
7616 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
7617 -                      desc_bytes(desc), 1);
7618 -#endif
7619 +       /* aead_encrypt shared descriptor */
7620 +       desc = ctx->sh_desc_enc;
7621 +       cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
7622 +                              ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
7623 +                              false);
7624 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7625 +                                  desc_bytes(desc), DMA_TO_DEVICE);
7626  
7627  skip_enc:
7628         /*
7629          * Job Descriptor and Shared Descriptors
7630          * must all fit into the 64-word Descriptor h/w Buffer
7631          */
7632 -       keys_fit_inline = false;
7633 -       if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7634 -           ctx->split_key_pad_len + ctx->enckeylen +
7635 -           (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7636 -           CAAM_DESC_BYTES_MAX)
7637 -               keys_fit_inline = true;
7638 -
7639 -       /* aead_decrypt shared descriptor */
7640 -       desc = ctx->sh_desc_dec;
7641 -
7642 -       /* Note: Context registers are saved. */
7643 -       init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7644 -
7645 -       /* Class 2 operation */
7646 -       append_operation(desc, ctx->class2_alg_type |
7647 -                        OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
7648 +       if (desc_inline_query(DESC_AEAD_DEC_LEN +
7649 +                             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7650 +                             AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7651 +                             ARRAY_SIZE(data_len)) < 0)
7652 +               return -EINVAL;
7653  
7654 -       /* Read and write assoclen bytes */
7655 -       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7656 -       if (alg->caam.geniv)
7657 -               append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
7658 +       if (inl_mask & 1)
7659 +               ctx->adata.key_virt = ctx->key;
7660         else
7661 -               append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7662 -
7663 -       /* Skip assoc data */
7664 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7665 -
7666 -       /* read assoc before reading payload */
7667 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7668 -                            KEY_VLF);
7669 -
7670 -       if (alg->caam.geniv) {
7671 -               append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
7672 -                               LDST_SRCDST_BYTE_CONTEXT |
7673 -                               (ctx1_iv_off << LDST_OFFSET_SHIFT));
7674 -               append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
7675 -                           (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
7676 -       }
7677 -
7678 -       /* Load Counter into CONTEXT1 reg */
7679 -       if (is_rfc3686)
7680 -               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7681 -                                    LDST_SRCDST_BYTE_CONTEXT |
7682 -                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7683 -                                     LDST_OFFSET_SHIFT));
7684 +               ctx->adata.key_dma = ctx->key_dma;
7685  
7686 -       /* Choose operation */
7687 -       if (ctr_mode)
7688 -               append_operation(desc, ctx->class1_alg_type |
7689 -                                OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
7690 +       if (inl_mask & 2)
7691 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7692         else
7693 -               append_dec_op1(desc, ctx->class1_alg_type);
7694 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7695  
7696 -       /* Read and write cryptlen bytes */
7697 -       append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7698 -       append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7699 -       aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
7700 -
7701 -       /* Load ICV */
7702 -       append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
7703 -                            FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
7704 +       ctx->adata.key_inline = !!(inl_mask & 1);
7705 +       ctx->cdata.key_inline = !!(inl_mask & 2);
7706  
7707 -       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
7708 -                                             desc_bytes(desc),
7709 -                                             DMA_TO_DEVICE);
7710 -       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
7711 -               dev_err(jrdev, "unable to map shared descriptor\n");
7712 -               return -ENOMEM;
7713 -       }
7714 -#ifdef DEBUG
7715 -       print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
7716 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
7717 -                      desc_bytes(desc), 1);
7718 -#endif
7719 +       /* aead_decrypt shared descriptor */
7720 +       desc = ctx->sh_desc_dec;
7721 +       cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
7722 +                              ctx->authsize, alg->caam.geniv, is_rfc3686,
7723 +                              nonce, ctx1_iv_off, false);
7724 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
7725 +                                  desc_bytes(desc), DMA_TO_DEVICE);
7726  
7727         if (!alg->caam.geniv)
7728                 goto skip_givenc;
7729 @@ -655,107 +277,32 @@ skip_enc:
7730          * Job Descriptor and Shared Descriptors
7731          * must all fit into the 64-word Descriptor h/w Buffer
7732          */
7733 -       keys_fit_inline = false;
7734 -       if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7735 -           ctx->split_key_pad_len + ctx->enckeylen +
7736 -           (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7737 -           CAAM_DESC_BYTES_MAX)
7738 -               keys_fit_inline = true;
7739 -
7740 -       /* aead_givencrypt shared descriptor */
7741 -       desc = ctx->sh_desc_enc;
7742 -
7743 -       /* Note: Context registers are saved. */
7744 -       init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7745 +       if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
7746 +                             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7747 +                             AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7748 +                             ARRAY_SIZE(data_len)) < 0)
7749 +               return -EINVAL;
7750  
7751 -       if (is_rfc3686)
7752 -               goto copy_iv;
7753 +       if (inl_mask & 1)
7754 +               ctx->adata.key_virt = ctx->key;
7755 +       else
7756 +               ctx->adata.key_dma = ctx->key_dma;
7757  
7758 -       /* Generate IV */
7759 -       geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
7760 -               NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
7761 -               NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
7762 -       append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
7763 -                           LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
7764 -       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7765 -       append_move(desc, MOVE_WAITCOMP |
7766 -                   MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
7767 -                   (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
7768 -                   (ivsize << MOVE_LEN_SHIFT));
7769 -       append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
7770 -
7771 -copy_iv:
7772 -       /* Copy IV to class 1 context */
7773 -       append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
7774 -                   (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
7775 -                   (ivsize << MOVE_LEN_SHIFT));
7776 -
7777 -       /* Return to encryption */
7778 -       append_operation(desc, ctx->class2_alg_type |
7779 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7780 -
7781 -       /* Read and write assoclen bytes */
7782 -       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7783 -       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7784 -
7785 -       /* ivsize + cryptlen = seqoutlen - authsize */
7786 -       append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
7787 -
7788 -       /* Skip assoc data */
7789 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7790 -
7791 -       /* read assoc before reading payload */
7792 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7793 -                            KEY_VLF);
7794 -
7795 -       /* Copy iv from outfifo to class 2 fifo */
7796 -       moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
7797 -                NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
7798 -       append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
7799 -                           LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
7800 -       append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
7801 -                           LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
7802 +       if (inl_mask & 2)
7803 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7804 +       else
7805 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7806  
7807 -       /* Load Counter into CONTEXT1 reg */
7808 -       if (is_rfc3686)
7809 -               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7810 -                                    LDST_SRCDST_BYTE_CONTEXT |
7811 -                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7812 -                                     LDST_OFFSET_SHIFT));
7813 -
7814 -       /* Class 1 operation */
7815 -       append_operation(desc, ctx->class1_alg_type |
7816 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7817 -
7818 -       /* Will write ivsize + cryptlen */
7819 -       append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7820 -
7821 -       /* Not need to reload iv */
7822 -       append_seq_fifo_load(desc, ivsize,
7823 -                            FIFOLD_CLASS_SKIP);
7824 -
7825 -       /* Will read cryptlen */
7826 -       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7827 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
7828 -                            FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
7829 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7830 -
7831 -       /* Write ICV */
7832 -       append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7833 -                        LDST_SRCDST_BYTE_CONTEXT);
7834 +       ctx->adata.key_inline = !!(inl_mask & 1);
7835 +       ctx->cdata.key_inline = !!(inl_mask & 2);
7836  
7837 -       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7838 -                                             desc_bytes(desc),
7839 -                                             DMA_TO_DEVICE);
7840 -       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7841 -               dev_err(jrdev, "unable to map shared descriptor\n");
7842 -               return -ENOMEM;
7843 -       }
7844 -#ifdef DEBUG
7845 -       print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
7846 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
7847 -                      desc_bytes(desc), 1);
7848 -#endif
7849 +       /* aead_givencrypt shared descriptor */
7850 +       desc = ctx->sh_desc_enc;
7851 +       cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
7852 +                                 ctx->authsize, is_rfc3686, nonce,
7853 +                                 ctx1_iv_off, false);
7854 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7855 +                                  desc_bytes(desc), DMA_TO_DEVICE);
7856  
7857  skip_givenc:
7858         return 0;
7859 @@ -776,12 +323,12 @@ static int gcm_set_sh_desc(struct crypto
7860  {
7861         struct caam_ctx *ctx = crypto_aead_ctx(aead);
7862         struct device *jrdev = ctx->jrdev;
7863 -       bool keys_fit_inline = false;
7864 -       u32 *key_jump_cmd, *zero_payload_jump_cmd,
7865 -           *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
7866 +       unsigned int ivsize = crypto_aead_ivsize(aead);
7867         u32 *desc;
7868 +       int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
7869 +                       ctx->cdata.keylen;
7870  
7871 -       if (!ctx->enckeylen || !ctx->authsize)
7872 +       if (!ctx->cdata.keylen || !ctx->authsize)
7873                 return 0;
7874  
7875         /*
7876 @@ -789,175 +336,35 @@ static int gcm_set_sh_desc(struct crypto
7877          * Job Descriptor and Shared Descriptor
7878          * must fit into the 64-word Descriptor h/w Buffer
7879          */
7880 -       if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
7881 -           ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
7882 -               keys_fit_inline = true;
7883 +       if (rem_bytes >= DESC_GCM_ENC_LEN) {
7884 +               ctx->cdata.key_inline = true;
7885 +               ctx->cdata.key_virt = ctx->key;
7886 +       } else {
7887 +               ctx->cdata.key_inline = false;
7888 +               ctx->cdata.key_dma = ctx->key_dma;
7889 +       }
7890  
7891         desc = ctx->sh_desc_enc;
7892 -
7893 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
7894 -
7895 -       /* skip key loading if they are loaded due to sharing */
7896 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7897 -                                  JUMP_COND_SHRD | JUMP_COND_SELF);
7898 -       if (keys_fit_inline)
7899 -               append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
7900 -                                 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7901 -       else
7902 -               append_key(desc, ctx->key_dma, ctx->enckeylen,
7903 -                          CLASS_1 | KEY_DEST_CLASS_REG);
7904 -       set_jump_tgt_here(desc, key_jump_cmd);
7905 -
7906 -       /* class 1 operation */
7907 -       append_operation(desc, ctx->class1_alg_type |
7908 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7909 -
7910 -       /* if assoclen + cryptlen is ZERO, skip to ICV write */
7911 -       append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7912 -       zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
7913 -                                                JUMP_COND_MATH_Z);
7914 -
7915 -       /* if assoclen is ZERO, skip reading the assoc data */
7916 -       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7917 -       zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
7918 -                                                JUMP_COND_MATH_Z);
7919 -
7920 -       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7921 -
7922 -       /* skip assoc data */
7923 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7924 -
7925 -       /* cryptlen = seqinlen - assoclen */
7926 -       append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
7927 -
7928 -       /* if cryptlen is ZERO jump to zero-payload commands */
7929 -       zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
7930 -                                           JUMP_COND_MATH_Z);
7931 -
7932 -       /* read assoc data */
7933 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
7934 -                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
7935 -       set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
7936 -
7937 -       append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7938 -
7939 -       /* write encrypted data */
7940 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
7941 -
7942 -       /* read payload data */
7943 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
7944 -                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
7945 -
7946 -       /* jump the zero-payload commands */
7947 -       append_jump(desc, JUMP_TEST_ALL | 2);
7948 -
7949 -       /* zero-payload commands */
7950 -       set_jump_tgt_here(desc, zero_payload_jump_cmd);
7951 -
7952 -       /* read assoc data */
7953 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
7954 -                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
7955 -
7956 -       /* There is no input data */
7957 -       set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
7958 -
7959 -       /* write ICV */
7960 -       append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
7961 -                        LDST_SRCDST_BYTE_CONTEXT);
7962 -
7963 -       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7964 -                                             desc_bytes(desc),
7965 -                                             DMA_TO_DEVICE);
7966 -       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7967 -               dev_err(jrdev, "unable to map shared descriptor\n");
7968 -               return -ENOMEM;
7969 -       }
7970 -#ifdef DEBUG
7971 -       print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
7972 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
7973 -                      desc_bytes(desc), 1);
7974 -#endif
7975 +       cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
7976 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7977 +                                  desc_bytes(desc), DMA_TO_DEVICE);
7978  
7979         /*
7980          * Job Descriptor and Shared Descriptors
7981          * must all fit into the 64-word Descriptor h/w Buffer
7982          */
7983 -       keys_fit_inline = false;
7984 -       if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
7985 -           ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
7986 -               keys_fit_inline = true;
7987 +       if (rem_bytes >= DESC_GCM_DEC_LEN) {
7988 +               ctx->cdata.key_inline = true;
7989 +               ctx->cdata.key_virt = ctx->key;
7990 +       } else {
7991 +               ctx->cdata.key_inline = false;
7992 +               ctx->cdata.key_dma = ctx->key_dma;
7993 +       }
7994  
7995         desc = ctx->sh_desc_dec;
7996 -
7997 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
7998 -
7999 -       /* skip key loading if they are loaded due to sharing */
8000 -       key_jump_cmd = append_jump(desc, JUMP_JSL |
8001 -                                  JUMP_TEST_ALL | JUMP_COND_SHRD |
8002 -                                  JUMP_COND_SELF);
8003 -       if (keys_fit_inline)
8004 -               append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8005 -                                 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8006 -       else
8007 -               append_key(desc, ctx->key_dma, ctx->enckeylen,
8008 -                          CLASS_1 | KEY_DEST_CLASS_REG);
8009 -       set_jump_tgt_here(desc, key_jump_cmd);
8010 -
8011 -       /* class 1 operation */
8012 -       append_operation(desc, ctx->class1_alg_type |
8013 -                        OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8014 -
8015 -       /* if assoclen is ZERO, skip reading the assoc data */
8016 -       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
8017 -       zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
8018 -                                                JUMP_COND_MATH_Z);
8019 -
8020 -       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8021 -
8022 -       /* skip assoc data */
8023 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8024 -
8025 -       /* read assoc data */
8026 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8027 -                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8028 -
8029 -       set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
8030 -
8031 -       /* cryptlen = seqoutlen - assoclen */
8032 -       append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8033 -
8034 -       /* jump to zero-payload command if cryptlen is zero */
8035 -       zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
8036 -                                           JUMP_COND_MATH_Z);
8037 -
8038 -       append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8039 -
8040 -       /* store encrypted data */
8041 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8042 -
8043 -       /* read payload data */
8044 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8045 -                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
8046 -
8047 -       /* zero-payload command */
8048 -       set_jump_tgt_here(desc, zero_payload_jump_cmd);
8049 -
8050 -       /* read ICV */
8051 -       append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8052 -                            FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8053 -
8054 -       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8055 -                                             desc_bytes(desc),
8056 -                                             DMA_TO_DEVICE);
8057 -       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8058 -               dev_err(jrdev, "unable to map shared descriptor\n");
8059 -               return -ENOMEM;
8060 -       }
8061 -#ifdef DEBUG
8062 -       print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
8063 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
8064 -                      desc_bytes(desc), 1);
8065 -#endif
8066 +       cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
8067 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8068 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8069  
8070         return 0;
8071  }
8072 @@ -976,11 +383,12 @@ static int rfc4106_set_sh_desc(struct cr
8073  {
8074         struct caam_ctx *ctx = crypto_aead_ctx(aead);
8075         struct device *jrdev = ctx->jrdev;
8076 -       bool keys_fit_inline = false;
8077 -       u32 *key_jump_cmd;
8078 +       unsigned int ivsize = crypto_aead_ivsize(aead);
8079         u32 *desc;
8080 +       int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
8081 +                       ctx->cdata.keylen;
8082  
8083 -       if (!ctx->enckeylen || !ctx->authsize)
8084 +       if (!ctx->cdata.keylen || !ctx->authsize)
8085                 return 0;
8086  
8087         /*
8088 @@ -988,148 +396,37 @@ static int rfc4106_set_sh_desc(struct cr
8089          * Job Descriptor and Shared Descriptor
8090          * must fit into the 64-word Descriptor h/w Buffer
8091          */
8092 -       if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
8093 -           ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8094 -               keys_fit_inline = true;
8095 +       if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
8096 +               ctx->cdata.key_inline = true;
8097 +               ctx->cdata.key_virt = ctx->key;
8098 +       } else {
8099 +               ctx->cdata.key_inline = false;
8100 +               ctx->cdata.key_dma = ctx->key_dma;
8101 +       }
8102  
8103         desc = ctx->sh_desc_enc;
8104 -
8105 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
8106 -
8107 -       /* Skip key loading if it is loaded due to sharing */
8108 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8109 -                                  JUMP_COND_SHRD);
8110 -       if (keys_fit_inline)
8111 -               append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8112 -                                 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8113 -       else
8114 -               append_key(desc, ctx->key_dma, ctx->enckeylen,
8115 -                          CLASS_1 | KEY_DEST_CLASS_REG);
8116 -       set_jump_tgt_here(desc, key_jump_cmd);
8117 -
8118 -       /* Class 1 operation */
8119 -       append_operation(desc, ctx->class1_alg_type |
8120 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8121 -
8122 -       append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
8123 -       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8124 -
8125 -       /* Read assoc data */
8126 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8127 -                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8128 -
8129 -       /* Skip IV */
8130 -       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
8131 -
8132 -       /* Will read cryptlen bytes */
8133 -       append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8134 -
8135 -       /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
8136 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
8137 -
8138 -       /* Skip assoc data */
8139 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8140 -
8141 -       /* cryptlen = seqoutlen - assoclen */
8142 -       append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
8143 -
8144 -       /* Write encrypted data */
8145 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8146 -
8147 -       /* Read payload data */
8148 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8149 -                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
8150 -
8151 -       /* Write ICV */
8152 -       append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
8153 -                        LDST_SRCDST_BYTE_CONTEXT);
8154 -
8155 -       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8156 -                                             desc_bytes(desc),
8157 -                                             DMA_TO_DEVICE);
8158 -       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8159 -               dev_err(jrdev, "unable to map shared descriptor\n");
8160 -               return -ENOMEM;
8161 -       }
8162 -#ifdef DEBUG
8163 -       print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
8164 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
8165 -                      desc_bytes(desc), 1);
8166 -#endif
8167 +       cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
8168 +                                 false);
8169 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8170 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8171  
8172         /*
8173          * Job Descriptor and Shared Descriptors
8174          * must all fit into the 64-word Descriptor h/w Buffer
8175          */
8176 -       keys_fit_inline = false;
8177 -       if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
8178 -           ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8179 -               keys_fit_inline = true;
8180 +       if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
8181 +               ctx->cdata.key_inline = true;
8182 +               ctx->cdata.key_virt = ctx->key;
8183 +       } else {
8184 +               ctx->cdata.key_inline = false;
8185 +               ctx->cdata.key_dma = ctx->key_dma;
8186 +       }
8187  
8188         desc = ctx->sh_desc_dec;
8189 -
8190 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
8191 -
8192 -       /* Skip key loading if it is loaded due to sharing */
8193 -       key_jump_cmd = append_jump(desc, JUMP_JSL |
8194 -                                  JUMP_TEST_ALL | JUMP_COND_SHRD);
8195 -       if (keys_fit_inline)
8196 -               append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8197 -                                 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8198 -       else
8199 -               append_key(desc, ctx->key_dma, ctx->enckeylen,
8200 -                          CLASS_1 | KEY_DEST_CLASS_REG);
8201 -       set_jump_tgt_here(desc, key_jump_cmd);
8202 -
8203 -       /* Class 1 operation */
8204 -       append_operation(desc, ctx->class1_alg_type |
8205 -                        OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8206 -
8207 -       append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
8208 -       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8209 -
8210 -       /* Read assoc data */
8211 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8212 -                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8213 -
8214 -       /* Skip IV */
8215 -       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
8216 -
8217 -       /* Will read cryptlen bytes */
8218 -       append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
8219 -
8220 -       /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
8221 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
8222 -
8223 -       /* Skip assoc data */
8224 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8225 -
8226 -       /* Will write cryptlen bytes */
8227 -       append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8228 -
8229 -       /* Store payload data */
8230 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8231 -
8232 -       /* Read encrypted data */
8233 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8234 -                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
8235 -
8236 -       /* Read ICV */
8237 -       append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8238 -                            FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8239 -
8240 -       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8241 -                                             desc_bytes(desc),
8242 -                                             DMA_TO_DEVICE);
8243 -       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8244 -               dev_err(jrdev, "unable to map shared descriptor\n");
8245 -               return -ENOMEM;
8246 -       }
8247 -#ifdef DEBUG
8248 -       print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
8249 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
8250 -                      desc_bytes(desc), 1);
8251 -#endif
8252 +       cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
8253 +                                 false);
8254 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8255 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8256  
8257         return 0;
8258  }
8259 @@ -1149,12 +446,12 @@ static int rfc4543_set_sh_desc(struct cr
8260  {
8261         struct caam_ctx *ctx = crypto_aead_ctx(aead);
8262         struct device *jrdev = ctx->jrdev;
8263 -       bool keys_fit_inline = false;
8264 -       u32 *key_jump_cmd;
8265 -       u32 *read_move_cmd, *write_move_cmd;
8266 +       unsigned int ivsize = crypto_aead_ivsize(aead);
8267         u32 *desc;
8268 +       int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
8269 +                       ctx->cdata.keylen;
8270  
8271 -       if (!ctx->enckeylen || !ctx->authsize)
8272 +       if (!ctx->cdata.keylen || !ctx->authsize)
8273                 return 0;
8274  
8275         /*
8276 @@ -1162,151 +459,37 @@ static int rfc4543_set_sh_desc(struct cr
8277          * Job Descriptor and Shared Descriptor
8278          * must fit into the 64-word Descriptor h/w Buffer
8279          */
8280 -       if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
8281 -           ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8282 -               keys_fit_inline = true;
8283 +       if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
8284 +               ctx->cdata.key_inline = true;
8285 +               ctx->cdata.key_virt = ctx->key;
8286 +       } else {
8287 +               ctx->cdata.key_inline = false;
8288 +               ctx->cdata.key_dma = ctx->key_dma;
8289 +       }
8290  
8291         desc = ctx->sh_desc_enc;
8292 -
8293 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
8294 -
8295 -       /* Skip key loading if it is loaded due to sharing */
8296 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8297 -                                  JUMP_COND_SHRD);
8298 -       if (keys_fit_inline)
8299 -               append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8300 -                                 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8301 -       else
8302 -               append_key(desc, ctx->key_dma, ctx->enckeylen,
8303 -                          CLASS_1 | KEY_DEST_CLASS_REG);
8304 -       set_jump_tgt_here(desc, key_jump_cmd);
8305 -
8306 -       /* Class 1 operation */
8307 -       append_operation(desc, ctx->class1_alg_type |
8308 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8309 -
8310 -       /* assoclen + cryptlen = seqinlen */
8311 -       append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
8312 -
8313 -       /*
8314 -        * MOVE_LEN opcode is not available in all SEC HW revisions,
8315 -        * thus need to do some magic, i.e. self-patch the descriptor
8316 -        * buffer.
8317 -        */
8318 -       read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
8319 -                                   (0x6 << MOVE_LEN_SHIFT));
8320 -       write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
8321 -                                    (0x8 << MOVE_LEN_SHIFT));
8322 -
8323 -       /* Will read assoclen + cryptlen bytes */
8324 -       append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8325 -
8326 -       /* Will write assoclen + cryptlen bytes */
8327 -       append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8328 -
8329 -       /* Read and write assoclen + cryptlen bytes */
8330 -       aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
8331 -
8332 -       set_move_tgt_here(desc, read_move_cmd);
8333 -       set_move_tgt_here(desc, write_move_cmd);
8334 -       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8335 -       /* Move payload data to OFIFO */
8336 -       append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
8337 -
8338 -       /* Write ICV */
8339 -       append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
8340 -                        LDST_SRCDST_BYTE_CONTEXT);
8341 -
8342 -       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8343 -                                             desc_bytes(desc),
8344 -                                             DMA_TO_DEVICE);
8345 -       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8346 -               dev_err(jrdev, "unable to map shared descriptor\n");
8347 -               return -ENOMEM;
8348 -       }
8349 -#ifdef DEBUG
8350 -       print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
8351 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
8352 -                      desc_bytes(desc), 1);
8353 -#endif
8354 +       cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
8355 +                                 false);
8356 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8357 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8358  
8359         /*
8360          * Job Descriptor and Shared Descriptors
8361          * must all fit into the 64-word Descriptor h/w Buffer
8362          */
8363 -       keys_fit_inline = false;
8364 -       if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
8365 -           ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8366 -               keys_fit_inline = true;
8367 +       if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
8368 +               ctx->cdata.key_inline = true;
8369 +               ctx->cdata.key_virt = ctx->key;
8370 +       } else {
8371 +               ctx->cdata.key_inline = false;
8372 +               ctx->cdata.key_dma = ctx->key_dma;
8373 +       }
8374  
8375         desc = ctx->sh_desc_dec;
8376 -
8377 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
8378 -
8379 -       /* Skip key loading if it is loaded due to sharing */
8380 -       key_jump_cmd = append_jump(desc, JUMP_JSL |
8381 -                                  JUMP_TEST_ALL | JUMP_COND_SHRD);
8382 -       if (keys_fit_inline)
8383 -               append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8384 -                                 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8385 -       else
8386 -               append_key(desc, ctx->key_dma, ctx->enckeylen,
8387 -                          CLASS_1 | KEY_DEST_CLASS_REG);
8388 -       set_jump_tgt_here(desc, key_jump_cmd);
8389 -
8390 -       /* Class 1 operation */
8391 -       append_operation(desc, ctx->class1_alg_type |
8392 -                        OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8393 -
8394 -       /* assoclen + cryptlen = seqoutlen */
8395 -       append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8396 -
8397 -       /*
8398 -        * MOVE_LEN opcode is not available in all SEC HW revisions,
8399 -        * thus need to do some magic, i.e. self-patch the descriptor
8400 -        * buffer.
8401 -        */
8402 -       read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
8403 -                                   (0x6 << MOVE_LEN_SHIFT));
8404 -       write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
8405 -                                    (0x8 << MOVE_LEN_SHIFT));
8406 -
8407 -       /* Will read assoclen + cryptlen bytes */
8408 -       append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8409 -
8410 -       /* Will write assoclen + cryptlen bytes */
8411 -       append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8412 -
8413 -       /* Store payload data */
8414 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8415 -
8416 -       /* In-snoop assoclen + cryptlen data */
8417 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
8418 -                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
8419 -
8420 -       set_move_tgt_here(desc, read_move_cmd);
8421 -       set_move_tgt_here(desc, write_move_cmd);
8422 -       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8423 -       /* Move payload data to OFIFO */
8424 -       append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
8425 -       append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
8426 -
8427 -       /* Read ICV */
8428 -       append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8429 -                            FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8430 -
8431 -       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8432 -                                             desc_bytes(desc),
8433 -                                             DMA_TO_DEVICE);
8434 -       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8435 -               dev_err(jrdev, "unable to map shared descriptor\n");
8436 -               return -ENOMEM;
8437 -       }
8438 -#ifdef DEBUG
8439 -       print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
8440 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
8441 -                      desc_bytes(desc), 1);
8442 -#endif
8443 +       cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
8444 +                                 false);
8445 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8446 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8447  
8448         return 0;
8449  }
8450 @@ -1322,19 +505,9 @@ static int rfc4543_setauthsize(struct cr
8451         return 0;
8452  }
8453  
8454 -static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
8455 -                             u32 authkeylen)
8456 -{
8457 -       return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
8458 -                              ctx->split_key_pad_len, key_in, authkeylen,
8459 -                              ctx->alg_op);
8460 -}
8461 -
8462  static int aead_setkey(struct crypto_aead *aead,
8463                                const u8 *key, unsigned int keylen)
8464  {
8465 -       /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
8466 -       static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
8467         struct caam_ctx *ctx = crypto_aead_ctx(aead);
8468         struct device *jrdev = ctx->jrdev;
8469         struct crypto_authenc_keys keys;
8470 @@ -1343,53 +516,32 @@ static int aead_setkey(struct crypto_aea
8471         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
8472                 goto badkey;
8473  
8474 -       /* Pick class 2 key length from algorithm submask */
8475 -       ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
8476 -                                     OP_ALG_ALGSEL_SHIFT] * 2;
8477 -       ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
8478 -
8479 -       if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
8480 -               goto badkey;
8481 -
8482  #ifdef DEBUG
8483         printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
8484                keys.authkeylen + keys.enckeylen, keys.enckeylen,
8485                keys.authkeylen);
8486 -       printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
8487 -              ctx->split_key_len, ctx->split_key_pad_len);
8488         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8489                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
8490  #endif
8491  
8492 -       ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
8493 +       ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
8494 +                           keys.authkeylen, CAAM_MAX_KEY_SIZE -
8495 +                           keys.enckeylen);
8496         if (ret) {
8497                 goto badkey;
8498         }
8499  
8500         /* postpend encryption key to auth split key */
8501 -       memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
8502 -
8503 -       ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
8504 -                                     keys.enckeylen, DMA_TO_DEVICE);
8505 -       if (dma_mapping_error(jrdev, ctx->key_dma)) {
8506 -               dev_err(jrdev, "unable to map key i/o memory\n");
8507 -               return -ENOMEM;
8508 -       }
8509 +       memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
8510 +       dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
8511 +                                  keys.enckeylen, DMA_TO_DEVICE);
8512  #ifdef DEBUG
8513         print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
8514                        DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
8515 -                      ctx->split_key_pad_len + keys.enckeylen, 1);
8516 +                      ctx->adata.keylen_pad + keys.enckeylen, 1);
8517  #endif
8518 -
8519 -       ctx->enckeylen = keys.enckeylen;
8520 -
8521 -       ret = aead_set_sh_desc(aead);
8522 -       if (ret) {
8523 -               dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
8524 -                                keys.enckeylen, DMA_TO_DEVICE);
8525 -       }
8526 -
8527 -       return ret;
8528 +       ctx->cdata.keylen = keys.enckeylen;
8529 +       return aead_set_sh_desc(aead);
8530  badkey:
8531         crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
8532         return -EINVAL;
8533 @@ -1400,7 +552,6 @@ static int gcm_setkey(struct crypto_aead
8534  {
8535         struct caam_ctx *ctx = crypto_aead_ctx(aead);
8536         struct device *jrdev = ctx->jrdev;
8537 -       int ret = 0;
8538  
8539  #ifdef DEBUG
8540         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8541 @@ -1408,21 +559,10 @@ static int gcm_setkey(struct crypto_aead
8542  #endif
8543  
8544         memcpy(ctx->key, key, keylen);
8545 -       ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
8546 -                                     DMA_TO_DEVICE);
8547 -       if (dma_mapping_error(jrdev, ctx->key_dma)) {
8548 -               dev_err(jrdev, "unable to map key i/o memory\n");
8549 -               return -ENOMEM;
8550 -       }
8551 -       ctx->enckeylen = keylen;
8552 -
8553 -       ret = gcm_set_sh_desc(aead);
8554 -       if (ret) {
8555 -               dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8556 -                                DMA_TO_DEVICE);
8557 -       }
8558 +       dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
8559 +       ctx->cdata.keylen = keylen;
8560  
8561 -       return ret;
8562 +       return gcm_set_sh_desc(aead);
8563  }
8564  
8565  static int rfc4106_setkey(struct crypto_aead *aead,
8566 @@ -1430,7 +570,6 @@ static int rfc4106_setkey(struct crypto_
8567  {
8568         struct caam_ctx *ctx = crypto_aead_ctx(aead);
8569         struct device *jrdev = ctx->jrdev;
8570 -       int ret = 0;
8571  
8572         if (keylen < 4)
8573                 return -EINVAL;
8574 @@ -1446,22 +585,10 @@ static int rfc4106_setkey(struct crypto_
8575          * The last four bytes of the key material are used as the salt value
8576          * in the nonce. Update the AES key length.
8577          */
8578 -       ctx->enckeylen = keylen - 4;
8579 -
8580 -       ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
8581 -                                     DMA_TO_DEVICE);
8582 -       if (dma_mapping_error(jrdev, ctx->key_dma)) {
8583 -               dev_err(jrdev, "unable to map key i/o memory\n");
8584 -               return -ENOMEM;
8585 -       }
8586 -
8587 -       ret = rfc4106_set_sh_desc(aead);
8588 -       if (ret) {
8589 -               dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8590 -                                DMA_TO_DEVICE);
8591 -       }
8592 -
8593 -       return ret;
8594 +       ctx->cdata.keylen = keylen - 4;
8595 +       dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
8596 +                                  DMA_TO_DEVICE);
8597 +       return rfc4106_set_sh_desc(aead);
8598  }
8599  
8600  static int rfc4543_setkey(struct crypto_aead *aead,
8601 @@ -1469,7 +596,6 @@ static int rfc4543_setkey(struct crypto_
8602  {
8603         struct caam_ctx *ctx = crypto_aead_ctx(aead);
8604         struct device *jrdev = ctx->jrdev;
8605 -       int ret = 0;
8606  
8607         if (keylen < 4)
8608                 return -EINVAL;
8609 @@ -1485,43 +611,28 @@ static int rfc4543_setkey(struct crypto_
8610          * The last four bytes of the key material are used as the salt value
8611          * in the nonce. Update the AES key length.
8612          */
8613 -       ctx->enckeylen = keylen - 4;
8614 -
8615 -       ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
8616 -                                     DMA_TO_DEVICE);
8617 -       if (dma_mapping_error(jrdev, ctx->key_dma)) {
8618 -               dev_err(jrdev, "unable to map key i/o memory\n");
8619 -               return -ENOMEM;
8620 -       }
8621 -
8622 -       ret = rfc4543_set_sh_desc(aead);
8623 -       if (ret) {
8624 -               dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8625 -                                DMA_TO_DEVICE);
8626 -       }
8627 -
8628 -       return ret;
8629 +       ctx->cdata.keylen = keylen - 4;
8630 +       dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
8631 +                                  DMA_TO_DEVICE);
8632 +       return rfc4543_set_sh_desc(aead);
8633  }
8634  
8635  static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
8636                              const u8 *key, unsigned int keylen)
8637  {
8638         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
8639 -       struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
8640         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
8641         const char *alg_name = crypto_tfm_alg_name(tfm);
8642         struct device *jrdev = ctx->jrdev;
8643 -       int ret = 0;
8644 -       u32 *key_jump_cmd;
8645 +       unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
8646         u32 *desc;
8647 -       u8 *nonce;
8648 -       u32 geniv;
8649         u32 ctx1_iv_off = 0;
8650 -       const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
8651 +       const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
8652                                OP_ALG_AAI_CTR_MOD128);
8653         const bool is_rfc3686 = (ctr_mode &&
8654                                  (strstr(alg_name, "rfc3686") != NULL));
8655  
8656 +       memcpy(ctx->key, key, keylen);
8657  #ifdef DEBUG
8658         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8659                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
8660 @@ -1544,215 +655,33 @@ static int ablkcipher_setkey(struct cryp
8661                 keylen -= CTR_RFC3686_NONCE_SIZE;
8662         }
8663  
8664 -       memcpy(ctx->key, key, keylen);
8665 -       ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
8666 -                                     DMA_TO_DEVICE);
8667 -       if (dma_mapping_error(jrdev, ctx->key_dma)) {
8668 -               dev_err(jrdev, "unable to map key i/o memory\n");
8669 -               return -ENOMEM;
8670 -       }
8671 -       ctx->enckeylen = keylen;
8672 +       dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
8673 +       ctx->cdata.keylen = keylen;
8674 +       ctx->cdata.key_virt = ctx->key;
8675 +       ctx->cdata.key_inline = true;
8676  
8677         /* ablkcipher_encrypt shared descriptor */
8678         desc = ctx->sh_desc_enc;
8679 -       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8680 -       /* Skip if already shared */
8681 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8682 -                                  JUMP_COND_SHRD);
8683 -
8684 -       /* Load class1 key only */
8685 -       append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8686 -                         ctx->enckeylen, CLASS_1 |
8687 -                         KEY_DEST_CLASS_REG);
8688 -
8689 -       /* Load nonce into CONTEXT1 reg */
8690 -       if (is_rfc3686) {
8691 -               nonce = (u8 *)key + keylen;
8692 -               append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8693 -                                  LDST_CLASS_IND_CCB |
8694 -                                  LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8695 -               append_move(desc, MOVE_WAITCOMP |
8696 -                           MOVE_SRC_OUTFIFO |
8697 -                           MOVE_DEST_CLASS1CTX |
8698 -                           (16 << MOVE_OFFSET_SHIFT) |
8699 -                           (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8700 -       }
8701 -
8702 -       set_jump_tgt_here(desc, key_jump_cmd);
8703 -
8704 -       /* Load iv */
8705 -       append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
8706 -                       LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
8707 +       cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
8708 +                                    ctx1_iv_off);
8709 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8710 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8711  
8712 -       /* Load counter into CONTEXT1 reg */
8713 -       if (is_rfc3686)
8714 -               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8715 -                                    LDST_SRCDST_BYTE_CONTEXT |
8716 -                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8717 -                                     LDST_OFFSET_SHIFT));
8718 -
8719 -       /* Load operation */
8720 -       append_operation(desc, ctx->class1_alg_type |
8721 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8722 -
8723 -       /* Perform operation */
8724 -       ablkcipher_append_src_dst(desc);
8725 -
8726 -       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8727 -                                             desc_bytes(desc),
8728 -                                             DMA_TO_DEVICE);
8729 -       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8730 -               dev_err(jrdev, "unable to map shared descriptor\n");
8731 -               return -ENOMEM;
8732 -       }
8733 -#ifdef DEBUG
8734 -       print_hex_dump(KERN_ERR,
8735 -                      "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
8736 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
8737 -                      desc_bytes(desc), 1);
8738 -#endif
8739         /* ablkcipher_decrypt shared descriptor */
8740         desc = ctx->sh_desc_dec;
8741 +       cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
8742 +                                    ctx1_iv_off);
8743 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8744 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8745  
8746 -       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8747 -       /* Skip if already shared */
8748 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8749 -                                  JUMP_COND_SHRD);
8750 -
8751 -       /* Load class1 key only */
8752 -       append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8753 -                         ctx->enckeylen, CLASS_1 |
8754 -                         KEY_DEST_CLASS_REG);
8755 -
8756 -       /* Load nonce into CONTEXT1 reg */
8757 -       if (is_rfc3686) {
8758 -               nonce = (u8 *)key + keylen;
8759 -               append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8760 -                                  LDST_CLASS_IND_CCB |
8761 -                                  LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8762 -               append_move(desc, MOVE_WAITCOMP |
8763 -                           MOVE_SRC_OUTFIFO |
8764 -                           MOVE_DEST_CLASS1CTX |
8765 -                           (16 << MOVE_OFFSET_SHIFT) |
8766 -                           (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8767 -       }
8768 -
8769 -       set_jump_tgt_here(desc, key_jump_cmd);
8770 -
8771 -       /* load IV */
8772 -       append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
8773 -                       LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
8774 -
8775 -       /* Load counter into CONTEXT1 reg */
8776 -       if (is_rfc3686)
8777 -               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8778 -                                    LDST_SRCDST_BYTE_CONTEXT |
8779 -                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8780 -                                     LDST_OFFSET_SHIFT));
8781 -
8782 -       /* Choose operation */
8783 -       if (ctr_mode)
8784 -               append_operation(desc, ctx->class1_alg_type |
8785 -                                OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
8786 -       else
8787 -               append_dec_op1(desc, ctx->class1_alg_type);
8788 -
8789 -       /* Perform operation */
8790 -       ablkcipher_append_src_dst(desc);
8791 -
8792 -       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8793 -                                             desc_bytes(desc),
8794 -                                             DMA_TO_DEVICE);
8795 -       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8796 -               dev_err(jrdev, "unable to map shared descriptor\n");
8797 -               return -ENOMEM;
8798 -       }
8799 -
8800 -#ifdef DEBUG
8801 -       print_hex_dump(KERN_ERR,
8802 -                      "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
8803 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
8804 -                      desc_bytes(desc), 1);
8805 -#endif
8806         /* ablkcipher_givencrypt shared descriptor */
8807         desc = ctx->sh_desc_givenc;
8808 +       cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
8809 +                                       ctx1_iv_off);
8810 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
8811 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8812  
8813 -       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8814 -       /* Skip if already shared */
8815 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8816 -                                  JUMP_COND_SHRD);
8817 -
8818 -       /* Load class1 key only */
8819 -       append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8820 -                         ctx->enckeylen, CLASS_1 |
8821 -                         KEY_DEST_CLASS_REG);
8822 -
8823 -       /* Load Nonce into CONTEXT1 reg */
8824 -       if (is_rfc3686) {
8825 -               nonce = (u8 *)key + keylen;
8826 -               append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8827 -                                  LDST_CLASS_IND_CCB |
8828 -                                  LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8829 -               append_move(desc, MOVE_WAITCOMP |
8830 -                           MOVE_SRC_OUTFIFO |
8831 -                           MOVE_DEST_CLASS1CTX |
8832 -                           (16 << MOVE_OFFSET_SHIFT) |
8833 -                           (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8834 -       }
8835 -       set_jump_tgt_here(desc, key_jump_cmd);
8836 -
8837 -       /* Generate IV */
8838 -       geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
8839 -               NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
8840 -               NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
8841 -       append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
8842 -                           LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
8843 -       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8844 -       append_move(desc, MOVE_WAITCOMP |
8845 -                   MOVE_SRC_INFIFO |
8846 -                   MOVE_DEST_CLASS1CTX |
8847 -                   (crt->ivsize << MOVE_LEN_SHIFT) |
8848 -                   (ctx1_iv_off << MOVE_OFFSET_SHIFT));
8849 -       append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
8850 -
8851 -       /* Copy generated IV to memory */
8852 -       append_seq_store(desc, crt->ivsize,
8853 -                        LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
8854 -                        (ctx1_iv_off << LDST_OFFSET_SHIFT));
8855 -
8856 -       /* Load Counter into CONTEXT1 reg */
8857 -       if (is_rfc3686)
8858 -               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8859 -                                    LDST_SRCDST_BYTE_CONTEXT |
8860 -                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8861 -                                     LDST_OFFSET_SHIFT));
8862 -
8863 -       if (ctx1_iv_off)
8864 -               append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
8865 -                           (1 << JUMP_OFFSET_SHIFT));
8866 -
8867 -       /* Load operation */
8868 -       append_operation(desc, ctx->class1_alg_type |
8869 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8870 -
8871 -       /* Perform operation */
8872 -       ablkcipher_append_src_dst(desc);
8873 -
8874 -       ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
8875 -                                                desc_bytes(desc),
8876 -                                                DMA_TO_DEVICE);
8877 -       if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
8878 -               dev_err(jrdev, "unable to map shared descriptor\n");
8879 -               return -ENOMEM;
8880 -       }
8881 -#ifdef DEBUG
8882 -       print_hex_dump(KERN_ERR,
8883 -                      "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
8884 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
8885 -                      desc_bytes(desc), 1);
8886 -#endif
8887 -
8888 -       return ret;
8889 +       return 0;
8890  }
8891  
8892  static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
8893 @@ -1760,8 +689,7 @@ static int xts_ablkcipher_setkey(struct
8894  {
8895         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
8896         struct device *jrdev = ctx->jrdev;
8897 -       u32 *key_jump_cmd, *desc;
8898 -       __be64 sector_size = cpu_to_be64(512);
8899 +       u32 *desc;
8900  
8901         if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
8902                 crypto_ablkcipher_set_flags(ablkcipher,
8903 @@ -1771,126 +699,38 @@ static int xts_ablkcipher_setkey(struct
8904         }
8905  
8906         memcpy(ctx->key, key, keylen);
8907 -       ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
8908 -       if (dma_mapping_error(jrdev, ctx->key_dma)) {
8909 -               dev_err(jrdev, "unable to map key i/o memory\n");
8910 -               return -ENOMEM;
8911 -       }
8912 -       ctx->enckeylen = keylen;
8913 +       dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
8914 +       ctx->cdata.keylen = keylen;
8915 +       ctx->cdata.key_virt = ctx->key;
8916 +       ctx->cdata.key_inline = true;
8917  
8918         /* xts_ablkcipher_encrypt shared descriptor */
8919         desc = ctx->sh_desc_enc;
8920 -       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8921 -       /* Skip if already shared */
8922 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8923 -                                  JUMP_COND_SHRD);
8924 -
8925 -       /* Load class1 keys only */
8926 -       append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8927 -                         ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8928 -
8929 -       /* Load sector size with index 40 bytes (0x28) */
8930 -       append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
8931 -                  LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
8932 -       append_data(desc, (void *)&sector_size, 8);
8933 -
8934 -       set_jump_tgt_here(desc, key_jump_cmd);
8935 -
8936 -       /*
8937 -        * create sequence for loading the sector index
8938 -        * Upper 8B of IV - will be used as sector index
8939 -        * Lower 8B of IV - will be discarded
8940 -        */
8941 -       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
8942 -                  LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
8943 -       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
8944 -
8945 -       /* Load operation */
8946 -       append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
8947 -                        OP_ALG_ENCRYPT);
8948 -
8949 -       /* Perform operation */
8950 -       ablkcipher_append_src_dst(desc);
8951 -
8952 -       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
8953 -                                             DMA_TO_DEVICE);
8954 -       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8955 -               dev_err(jrdev, "unable to map shared descriptor\n");
8956 -               return -ENOMEM;
8957 -       }
8958 -#ifdef DEBUG
8959 -       print_hex_dump(KERN_ERR,
8960 -                      "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
8961 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
8962 -#endif
8963 +       cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
8964 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8965 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8966  
8967         /* xts_ablkcipher_decrypt shared descriptor */
8968         desc = ctx->sh_desc_dec;
8969 -
8970 -       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8971 -       /* Skip if already shared */
8972 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8973 -                                  JUMP_COND_SHRD);
8974 -
8975 -       /* Load class1 key only */
8976 -       append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8977 -                         ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8978 -
8979 -       /* Load sector size with index 40 bytes (0x28) */
8980 -       append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
8981 -                  LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
8982 -       append_data(desc, (void *)&sector_size, 8);
8983 -
8984 -       set_jump_tgt_here(desc, key_jump_cmd);
8985 -
8986 -       /*
8987 -        * create sequence for loading the sector index
8988 -        * Upper 8B of IV - will be used as sector index
8989 -        * Lower 8B of IV - will be discarded
8990 -        */
8991 -       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
8992 -                  LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
8993 -       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
8994 -
8995 -       /* Load operation */
8996 -       append_dec_op1(desc, ctx->class1_alg_type);
8997 -
8998 -       /* Perform operation */
8999 -       ablkcipher_append_src_dst(desc);
9000 -
9001 -       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
9002 -                                             DMA_TO_DEVICE);
9003 -       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
9004 -               dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
9005 -                                desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
9006 -               dev_err(jrdev, "unable to map shared descriptor\n");
9007 -               return -ENOMEM;
9008 -       }
9009 -#ifdef DEBUG
9010 -       print_hex_dump(KERN_ERR,
9011 -                      "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
9012 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
9013 -#endif
9014 +       cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
9015 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
9016 +                                  desc_bytes(desc), DMA_TO_DEVICE);
9017  
9018         return 0;
9019  }
9020  
9021  /*
9022   * aead_edesc - s/w-extended aead descriptor
9023 - * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
9024 - * @src_nents: number of segments in input scatterlist
9025 - * @dst_nents: number of segments in output scatterlist
9026 - * @iv_dma: dma address of iv for checking continuity and link table
9027 - * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
9028 + * @src_nents: number of segments in input s/w scatterlist
9029 + * @dst_nents: number of segments in output s/w scatterlist
9030   * @sec4_sg_bytes: length of dma mapped sec4_sg space
9031   * @sec4_sg_dma: bus physical mapped address of h/w link table
9032 + * @sec4_sg: pointer to h/w link table
9033   * @hw_desc: the h/w job descriptor followed by any referenced link tables
9034   */
9035  struct aead_edesc {
9036 -       int assoc_nents;
9037         int src_nents;
9038         int dst_nents;
9039 -       dma_addr_t iv_dma;
9040         int sec4_sg_bytes;
9041         dma_addr_t sec4_sg_dma;
9042         struct sec4_sg_entry *sec4_sg;
9043 @@ -1899,12 +739,12 @@ struct aead_edesc {
9044  
9045  /*
9046   * ablkcipher_edesc - s/w-extended ablkcipher descriptor
9047 - * @src_nents: number of segments in input scatterlist
9048 - * @dst_nents: number of segments in output scatterlist
9049 + * @src_nents: number of segments in input s/w scatterlist
9050 + * @dst_nents: number of segments in output s/w scatterlist
9051   * @iv_dma: dma address of iv for checking continuity and link table
9052 - * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
9053   * @sec4_sg_bytes: length of dma mapped sec4_sg space
9054   * @sec4_sg_dma: bus physical mapped address of h/w link table
9055 + * @sec4_sg: pointer to h/w link table
9056   * @hw_desc: the h/w job descriptor followed by any referenced link tables
9057   */
9058  struct ablkcipher_edesc {
9059 @@ -1924,10 +764,11 @@ static void caam_unmap(struct device *de
9060                        int sec4_sg_bytes)
9061  {
9062         if (dst != src) {
9063 -               dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
9064 -               dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
9065 +               if (src_nents)
9066 +                       dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
9067 +               dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
9068         } else {
9069 -               dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
9070 +               dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
9071         }
9072  
9073         if (iv_dma)
9074 @@ -2021,8 +862,7 @@ static void ablkcipher_encrypt_done(stru
9075         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
9076  #endif
9077  
9078 -       edesc = (struct ablkcipher_edesc *)((char *)desc -
9079 -                offsetof(struct ablkcipher_edesc, hw_desc));
9080 +       edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
9081  
9082         if (err)
9083                 caam_jr_strstatus(jrdev, err);
9084 @@ -2031,10 +871,10 @@ static void ablkcipher_encrypt_done(stru
9085         print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
9086                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9087                        edesc->src_nents > 1 ? 100 : ivsize, 1);
9088 -       dbg_dump_sg(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
9089 -                   DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9090 -                   edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
9091  #endif
9092 +       caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
9093 +                    DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9094 +                    edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
9095  
9096         ablkcipher_unmap(jrdev, edesc, req);
9097  
9098 @@ -2062,8 +902,7 @@ static void ablkcipher_decrypt_done(stru
9099         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
9100  #endif
9101  
9102 -       edesc = (struct ablkcipher_edesc *)((char *)desc -
9103 -                offsetof(struct ablkcipher_edesc, hw_desc));
9104 +       edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
9105         if (err)
9106                 caam_jr_strstatus(jrdev, err);
9107  
9108 @@ -2071,10 +910,10 @@ static void ablkcipher_decrypt_done(stru
9109         print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
9110                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9111                        ivsize, 1);
9112 -       dbg_dump_sg(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
9113 -                   DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9114 -                   edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
9115  #endif
9116 +       caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
9117 +                    DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9118 +                    edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
9119  
9120         ablkcipher_unmap(jrdev, edesc, req);
9121  
9122 @@ -2114,7 +953,7 @@ static void init_aead_job(struct aead_re
9123         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9124  
9125         if (all_contig) {
9126 -               src_dma = sg_dma_address(req->src);
9127 +               src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0;
9128                 in_options = 0;
9129         } else {
9130                 src_dma = edesc->sec4_sg_dma;
9131 @@ -2129,7 +968,7 @@ static void init_aead_job(struct aead_re
9132         out_options = in_options;
9133  
9134         if (unlikely(req->src != req->dst)) {
9135 -               if (!edesc->dst_nents) {
9136 +               if (edesc->dst_nents == 1) {
9137                         dst_dma = sg_dma_address(req->dst);
9138                 } else {
9139                         dst_dma = edesc->sec4_sg_dma +
9140 @@ -2175,7 +1014,7 @@ static void init_gcm_job(struct aead_req
9141                          FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
9142         /* Append Salt */
9143         if (!generic_gcm)
9144 -               append_data(desc, ctx->key + ctx->enckeylen, 4);
9145 +               append_data(desc, ctx->key + ctx->cdata.keylen, 4);
9146         /* Append IV */
9147         append_data(desc, req->iv, ivsize);
9148         /* End of blank commands */
9149 @@ -2190,7 +1029,7 @@ static void init_authenc_job(struct aead
9150                                                  struct caam_aead_alg, aead);
9151         unsigned int ivsize = crypto_aead_ivsize(aead);
9152         struct caam_ctx *ctx = crypto_aead_ctx(aead);
9153 -       const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
9154 +       const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
9155                                OP_ALG_AAI_CTR_MOD128);
9156         const bool is_rfc3686 = alg->caam.rfc3686;
9157         u32 *desc = edesc->hw_desc;
9158 @@ -2236,16 +1075,15 @@ static void init_ablkcipher_job(u32 *sh_
9159         int len, sec4_sg_index = 0;
9160  
9161  #ifdef DEBUG
9162 -       bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9163 -                                             CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9164         print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
9165                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9166                        ivsize, 1);
9167 -       printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
9168 -       dbg_dump_sg(KERN_ERR, "src    @"__stringify(__LINE__)": ",
9169 -                   DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9170 -                   edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
9171 +       pr_err("asked=%d, nbytes%d\n",
9172 +              (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
9173  #endif
9174 +       caam_dump_sg(KERN_ERR, "src    @" __stringify(__LINE__)": ",
9175 +                    DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9176 +                    edesc->src_nents > 1 ? 100 : req->nbytes, 1);
9177  
9178         len = desc_len(sh_desc);
9179         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9180 @@ -2261,7 +1099,7 @@ static void init_ablkcipher_job(u32 *sh_
9181         append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
9182  
9183         if (likely(req->src == req->dst)) {
9184 -               if (!edesc->src_nents && iv_contig) {
9185 +               if (edesc->src_nents == 1 && iv_contig) {
9186                         dst_dma = sg_dma_address(req->src);
9187                 } else {
9188                         dst_dma = edesc->sec4_sg_dma +
9189 @@ -2269,7 +1107,7 @@ static void init_ablkcipher_job(u32 *sh_
9190                         out_options = LDST_SGF;
9191                 }
9192         } else {
9193 -               if (!edesc->dst_nents) {
9194 +               if (edesc->dst_nents == 1) {
9195                         dst_dma = sg_dma_address(req->dst);
9196                 } else {
9197                         dst_dma = edesc->sec4_sg_dma +
9198 @@ -2296,20 +1134,18 @@ static void init_ablkcipher_giv_job(u32
9199         int len, sec4_sg_index = 0;
9200  
9201  #ifdef DEBUG
9202 -       bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9203 -                                             CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9204         print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
9205                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9206                        ivsize, 1);
9207 -       dbg_dump_sg(KERN_ERR, "src    @" __stringify(__LINE__) ": ",
9208 -                   DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9209 -                   edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
9210  #endif
9211 +       caam_dump_sg(KERN_ERR, "src    @" __stringify(__LINE__) ": ",
9212 +                    DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9213 +                    edesc->src_nents > 1 ? 100 : req->nbytes, 1);
9214  
9215         len = desc_len(sh_desc);
9216         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9217  
9218 -       if (!edesc->src_nents) {
9219 +       if (edesc->src_nents == 1) {
9220                 src_dma = sg_dma_address(req->src);
9221                 in_options = 0;
9222         } else {
9223 @@ -2340,87 +1176,100 @@ static struct aead_edesc *aead_edesc_all
9224         struct crypto_aead *aead = crypto_aead_reqtfm(req);
9225         struct caam_ctx *ctx = crypto_aead_ctx(aead);
9226         struct device *jrdev = ctx->jrdev;
9227 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9228 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
9229 -       int src_nents, dst_nents = 0;
9230 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9231 +                      GFP_KERNEL : GFP_ATOMIC;
9232 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
9233         struct aead_edesc *edesc;
9234 -       int sgc;
9235 -       bool all_contig = true;
9236 -       int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
9237 +       int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
9238         unsigned int authsize = ctx->authsize;
9239  
9240         if (unlikely(req->dst != req->src)) {
9241 -               src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
9242 -               dst_nents = sg_count(req->dst,
9243 -                                    req->assoclen + req->cryptlen +
9244 -                                       (encrypt ? authsize : (-authsize)));
9245 -       } else {
9246 -               src_nents = sg_count(req->src,
9247 -                                    req->assoclen + req->cryptlen +
9248 -                                       (encrypt ? authsize : 0));
9249 -       }
9250 -
9251 -       /* Check if data are contiguous. */
9252 -       all_contig = !src_nents;
9253 -       if (!all_contig) {
9254 -               src_nents = src_nents ? : 1;
9255 -               sec4_sg_len = src_nents;
9256 -       }
9257 -
9258 -       sec4_sg_len += dst_nents;
9259 -
9260 -       sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
9261 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
9262 +                                            req->cryptlen);
9263 +               if (unlikely(src_nents < 0)) {
9264 +                       dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9265 +                               req->assoclen + req->cryptlen);
9266 +                       return ERR_PTR(src_nents);
9267 +               }
9268  
9269 -       /* allocate space for base edesc and hw desc commands, link tables */
9270 -       edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9271 -                       GFP_DMA | flags);
9272 -       if (!edesc) {
9273 -               dev_err(jrdev, "could not allocate extended descriptor\n");
9274 -               return ERR_PTR(-ENOMEM);
9275 +               dst_nents = sg_nents_for_len(req->dst, req->assoclen +
9276 +                                            req->cryptlen +
9277 +                                               (encrypt ? authsize :
9278 +                                                          (-authsize)));
9279 +               if (unlikely(dst_nents < 0)) {
9280 +                       dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9281 +                               req->assoclen + req->cryptlen +
9282 +                               (encrypt ? authsize : (-authsize)));
9283 +                       return ERR_PTR(dst_nents);
9284 +               }
9285 +       } else {
9286 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
9287 +                                            req->cryptlen +
9288 +                                            (encrypt ? authsize : 0));
9289 +               if (unlikely(src_nents < 0)) {
9290 +                       dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9291 +                               req->assoclen + req->cryptlen +
9292 +                               (encrypt ? authsize : 0));
9293 +                       return ERR_PTR(src_nents);
9294 +               }
9295         }
9296  
9297         if (likely(req->src == req->dst)) {
9298 -               sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9299 -                                DMA_BIDIRECTIONAL);
9300 -               if (unlikely(!sgc)) {
9301 +               mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9302 +                                             DMA_BIDIRECTIONAL);
9303 +               if (unlikely(!mapped_src_nents)) {
9304                         dev_err(jrdev, "unable to map source\n");
9305 -                       kfree(edesc);
9306                         return ERR_PTR(-ENOMEM);
9307                 }
9308         } else {
9309 -               sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9310 -                                DMA_TO_DEVICE);
9311 -               if (unlikely(!sgc)) {
9312 -                       dev_err(jrdev, "unable to map source\n");
9313 -                       kfree(edesc);
9314 -                       return ERR_PTR(-ENOMEM);
9315 +               /* Cover also the case of null (zero length) input data */
9316 +               if (src_nents) {
9317 +                       mapped_src_nents = dma_map_sg(jrdev, req->src,
9318 +                                                     src_nents, DMA_TO_DEVICE);
9319 +                       if (unlikely(!mapped_src_nents)) {
9320 +                               dev_err(jrdev, "unable to map source\n");
9321 +                               return ERR_PTR(-ENOMEM);
9322 +                       }
9323 +               } else {
9324 +                       mapped_src_nents = 0;
9325                 }
9326  
9327 -               sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9328 -                                DMA_FROM_DEVICE);
9329 -               if (unlikely(!sgc)) {
9330 +               mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9331 +                                             DMA_FROM_DEVICE);
9332 +               if (unlikely(!mapped_dst_nents)) {
9333                         dev_err(jrdev, "unable to map destination\n");
9334 -                       dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
9335 -                                    DMA_TO_DEVICE);
9336 -                       kfree(edesc);
9337 +                       dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9338                         return ERR_PTR(-ENOMEM);
9339                 }
9340         }
9341  
9342 +       sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
9343 +       sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
9344 +       sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
9345 +
9346 +       /* allocate space for base edesc and hw desc commands, link tables */
9347 +       edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9348 +                       GFP_DMA | flags);
9349 +       if (!edesc) {
9350 +               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9351 +                          0, 0, 0);
9352 +               return ERR_PTR(-ENOMEM);
9353 +       }
9354 +
9355         edesc->src_nents = src_nents;
9356         edesc->dst_nents = dst_nents;
9357         edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
9358                          desc_bytes;
9359 -       *all_contig_ptr = all_contig;
9360 +       *all_contig_ptr = !(mapped_src_nents > 1);
9361  
9362         sec4_sg_index = 0;
9363 -       if (!all_contig) {
9364 -               sg_to_sec4_sg_last(req->src, src_nents,
9365 -                             edesc->sec4_sg + sec4_sg_index, 0);
9366 -               sec4_sg_index += src_nents;
9367 +       if (mapped_src_nents > 1) {
9368 +               sg_to_sec4_sg_last(req->src, mapped_src_nents,
9369 +                                  edesc->sec4_sg + sec4_sg_index, 0);
9370 +               sec4_sg_index += mapped_src_nents;
9371         }
9372 -       if (dst_nents) {
9373 -               sg_to_sec4_sg_last(req->dst, dst_nents,
9374 +       if (mapped_dst_nents > 1) {
9375 +               sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9376                                    edesc->sec4_sg + sec4_sg_index, 0);
9377         }
9378  
9379 @@ -2573,13 +1422,9 @@ static int aead_decrypt(struct aead_requ
9380         u32 *desc;
9381         int ret = 0;
9382  
9383 -#ifdef DEBUG
9384 -       bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9385 -                                             CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9386 -       dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
9387 -                   DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9388 -                   req->assoclen + req->cryptlen, 1, may_sleep);
9389 -#endif
9390 +       caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
9391 +                    DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9392 +                    req->assoclen + req->cryptlen, 1);
9393  
9394         /* allocate extended descriptor */
9395         edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
9396 @@ -2619,51 +1464,80 @@ static struct ablkcipher_edesc *ablkciph
9397         struct device *jrdev = ctx->jrdev;
9398         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9399                        GFP_KERNEL : GFP_ATOMIC;
9400 -       int src_nents, dst_nents = 0, sec4_sg_bytes;
9401 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
9402         struct ablkcipher_edesc *edesc;
9403         dma_addr_t iv_dma = 0;
9404 -       bool iv_contig = false;
9405 -       int sgc;
9406 +       bool in_contig;
9407         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
9408 -       int sec4_sg_index;
9409 +       int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
9410  
9411 -       src_nents = sg_count(req->src, req->nbytes);
9412 +       src_nents = sg_nents_for_len(req->src, req->nbytes);
9413 +       if (unlikely(src_nents < 0)) {
9414 +               dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9415 +                       req->nbytes);
9416 +               return ERR_PTR(src_nents);
9417 +       }
9418  
9419 -       if (req->dst != req->src)
9420 -               dst_nents = sg_count(req->dst, req->nbytes);
9421 +       if (req->dst != req->src) {
9422 +               dst_nents = sg_nents_for_len(req->dst, req->nbytes);
9423 +               if (unlikely(dst_nents < 0)) {
9424 +                       dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9425 +                               req->nbytes);
9426 +                       return ERR_PTR(dst_nents);
9427 +               }
9428 +       }
9429  
9430         if (likely(req->src == req->dst)) {
9431 -               sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9432 -                                DMA_BIDIRECTIONAL);
9433 +               mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9434 +                                             DMA_BIDIRECTIONAL);
9435 +               if (unlikely(!mapped_src_nents)) {
9436 +                       dev_err(jrdev, "unable to map source\n");
9437 +                       return ERR_PTR(-ENOMEM);
9438 +               }
9439         } else {
9440 -               sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9441 -                                DMA_TO_DEVICE);
9442 -               sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9443 -                                DMA_FROM_DEVICE);
9444 +               mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9445 +                                             DMA_TO_DEVICE);
9446 +               if (unlikely(!mapped_src_nents)) {
9447 +                       dev_err(jrdev, "unable to map source\n");
9448 +                       return ERR_PTR(-ENOMEM);
9449 +               }
9450 +
9451 +               mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9452 +                                             DMA_FROM_DEVICE);
9453 +               if (unlikely(!mapped_dst_nents)) {
9454 +                       dev_err(jrdev, "unable to map destination\n");
9455 +                       dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9456 +                       return ERR_PTR(-ENOMEM);
9457 +               }
9458         }
9459  
9460         iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
9461         if (dma_mapping_error(jrdev, iv_dma)) {
9462                 dev_err(jrdev, "unable to map IV\n");
9463 +               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9464 +                          0, 0, 0);
9465                 return ERR_PTR(-ENOMEM);
9466         }
9467  
9468 -       /*
9469 -        * Check if iv can be contiguous with source and destination.
9470 -        * If so, include it. If not, create scatterlist.
9471 -        */
9472 -       if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
9473 -               iv_contig = true;
9474 -       else
9475 -               src_nents = src_nents ? : 1;
9476 -       sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
9477 -                       sizeof(struct sec4_sg_entry);
9478 +       if (mapped_src_nents == 1 &&
9479 +           iv_dma + ivsize == sg_dma_address(req->src)) {
9480 +               in_contig = true;
9481 +               sec4_sg_ents = 0;
9482 +       } else {
9483 +               in_contig = false;
9484 +               sec4_sg_ents = 1 + mapped_src_nents;
9485 +       }
9486 +       dst_sg_idx = sec4_sg_ents;
9487 +       sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
9488 +       sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
9489  
9490         /* allocate space for base edesc and hw desc commands, link tables */
9491         edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9492                         GFP_DMA | flags);
9493         if (!edesc) {
9494                 dev_err(jrdev, "could not allocate extended descriptor\n");
9495 +               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9496 +                          iv_dma, ivsize, 0, 0);
9497                 return ERR_PTR(-ENOMEM);
9498         }
9499  
9500 @@ -2673,23 +1547,24 @@ static struct ablkcipher_edesc *ablkciph
9501         edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
9502                          desc_bytes;
9503  
9504 -       sec4_sg_index = 0;
9505 -       if (!iv_contig) {
9506 +       if (!in_contig) {
9507                 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
9508 -               sg_to_sec4_sg_last(req->src, src_nents,
9509 +               sg_to_sec4_sg_last(req->src, mapped_src_nents,
9510                                    edesc->sec4_sg + 1, 0);
9511 -               sec4_sg_index += 1 + src_nents;
9512         }
9513  
9514 -       if (dst_nents) {
9515 -               sg_to_sec4_sg_last(req->dst, dst_nents,
9516 -                       edesc->sec4_sg + sec4_sg_index, 0);
9517 +       if (mapped_dst_nents > 1) {
9518 +               sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9519 +                                  edesc->sec4_sg + dst_sg_idx, 0);
9520         }
9521  
9522         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
9523                                             sec4_sg_bytes, DMA_TO_DEVICE);
9524         if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
9525                 dev_err(jrdev, "unable to map S/G table\n");
9526 +               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9527 +                          iv_dma, ivsize, 0, 0);
9528 +               kfree(edesc);
9529                 return ERR_PTR(-ENOMEM);
9530         }
9531  
9532 @@ -2701,7 +1576,7 @@ static struct ablkcipher_edesc *ablkciph
9533                        sec4_sg_bytes, 1);
9534  #endif
9535  
9536 -       *iv_contig_out = iv_contig;
9537 +       *iv_contig_out = in_contig;
9538         return edesc;
9539  }
9540  
9541 @@ -2792,30 +1667,54 @@ static struct ablkcipher_edesc *ablkciph
9542         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
9543         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
9544         struct device *jrdev = ctx->jrdev;
9545 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9546 -                                         CRYPTO_TFM_REQ_MAY_SLEEP)) ?
9547 +       gfp_t flags = (req->base.flags &  CRYPTO_TFM_REQ_MAY_SLEEP) ?
9548                        GFP_KERNEL : GFP_ATOMIC;
9549 -       int src_nents, dst_nents = 0, sec4_sg_bytes;
9550 +       int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
9551         struct ablkcipher_edesc *edesc;
9552         dma_addr_t iv_dma = 0;
9553 -       bool iv_contig = false;
9554 -       int sgc;
9555 +       bool out_contig;
9556         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
9557 -       int sec4_sg_index;
9558 -
9559 -       src_nents = sg_count(req->src, req->nbytes);
9560 +       int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
9561  
9562 -       if (unlikely(req->dst != req->src))
9563 -               dst_nents = sg_count(req->dst, req->nbytes);
9564 +       src_nents = sg_nents_for_len(req->src, req->nbytes);
9565 +       if (unlikely(src_nents < 0)) {
9566 +               dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9567 +                       req->nbytes);
9568 +               return ERR_PTR(src_nents);
9569 +       }
9570  
9571         if (likely(req->src == req->dst)) {
9572 -               sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9573 -                                DMA_BIDIRECTIONAL);
9574 +               mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9575 +                                             DMA_BIDIRECTIONAL);
9576 +               if (unlikely(!mapped_src_nents)) {
9577 +                       dev_err(jrdev, "unable to map source\n");
9578 +                       return ERR_PTR(-ENOMEM);
9579 +               }
9580 +
9581 +               dst_nents = src_nents;
9582 +               mapped_dst_nents = src_nents;
9583         } else {
9584 -               sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9585 -                                DMA_TO_DEVICE);
9586 -               sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9587 -                                DMA_FROM_DEVICE);
9588 +               mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9589 +                                             DMA_TO_DEVICE);
9590 +               if (unlikely(!mapped_src_nents)) {
9591 +                       dev_err(jrdev, "unable to map source\n");
9592 +                       return ERR_PTR(-ENOMEM);
9593 +               }
9594 +
9595 +               dst_nents = sg_nents_for_len(req->dst, req->nbytes);
9596 +               if (unlikely(dst_nents < 0)) {
9597 +                       dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9598 +                               req->nbytes);
9599 +                       return ERR_PTR(dst_nents);
9600 +               }
9601 +
9602 +               mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9603 +                                             DMA_FROM_DEVICE);
9604 +               if (unlikely(!mapped_dst_nents)) {
9605 +                       dev_err(jrdev, "unable to map destination\n");
9606 +                       dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9607 +                       return ERR_PTR(-ENOMEM);
9608 +               }
9609         }
9610  
9611         /*
9612 @@ -2825,21 +1724,29 @@ static struct ablkcipher_edesc *ablkciph
9613         iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
9614         if (dma_mapping_error(jrdev, iv_dma)) {
9615                 dev_err(jrdev, "unable to map IV\n");
9616 +               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9617 +                          0, 0, 0);
9618                 return ERR_PTR(-ENOMEM);
9619         }
9620  
9621 -       if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
9622 -               iv_contig = true;
9623 -       else
9624 -               dst_nents = dst_nents ? : 1;
9625 -       sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
9626 -                       sizeof(struct sec4_sg_entry);
9627 +       sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
9628 +       dst_sg_idx = sec4_sg_ents;
9629 +       if (mapped_dst_nents == 1 &&
9630 +           iv_dma + ivsize == sg_dma_address(req->dst)) {
9631 +               out_contig = true;
9632 +       } else {
9633 +               out_contig = false;
9634 +               sec4_sg_ents += 1 + mapped_dst_nents;
9635 +       }
9636  
9637         /* allocate space for base edesc and hw desc commands, link tables */
9638 +       sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
9639         edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9640                         GFP_DMA | flags);
9641         if (!edesc) {
9642                 dev_err(jrdev, "could not allocate extended descriptor\n");
9643 +               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9644 +                          iv_dma, ivsize, 0, 0);
9645                 return ERR_PTR(-ENOMEM);
9646         }
9647  
9648 @@ -2849,24 +1756,24 @@ static struct ablkcipher_edesc *ablkciph
9649         edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
9650                          desc_bytes;
9651  
9652 -       sec4_sg_index = 0;
9653 -       if (src_nents) {
9654 -               sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
9655 -               sec4_sg_index += src_nents;
9656 -       }
9657 +       if (mapped_src_nents > 1)
9658 +               sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
9659 +                                  0);
9660  
9661 -       if (!iv_contig) {
9662 -               dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
9663 +       if (!out_contig) {
9664 +               dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx,
9665                                    iv_dma, ivsize, 0);
9666 -               sec4_sg_index += 1;
9667 -               sg_to_sec4_sg_last(req->dst, dst_nents,
9668 -                                  edesc->sec4_sg + sec4_sg_index, 0);
9669 +               sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9670 +                                  edesc->sec4_sg + dst_sg_idx + 1, 0);
9671         }
9672  
9673         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
9674                                             sec4_sg_bytes, DMA_TO_DEVICE);
9675         if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
9676                 dev_err(jrdev, "unable to map S/G table\n");
9677 +               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9678 +                          iv_dma, ivsize, 0, 0);
9679 +               kfree(edesc);
9680                 return ERR_PTR(-ENOMEM);
9681         }
9682         edesc->iv_dma = iv_dma;
9683 @@ -2878,7 +1785,7 @@ static struct ablkcipher_edesc *ablkciph
9684                        sec4_sg_bytes, 1);
9685  #endif
9686  
9687 -       *iv_contig_out = iv_contig;
9688 +       *iv_contig_out = out_contig;
9689         return edesc;
9690  }
9691  
9692 @@ -2889,7 +1796,7 @@ static int ablkcipher_givencrypt(struct
9693         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
9694         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
9695         struct device *jrdev = ctx->jrdev;
9696 -       bool iv_contig;
9697 +       bool iv_contig = false;
9698         u32 *desc;
9699         int ret = 0;
9700  
9701 @@ -2933,7 +1840,6 @@ struct caam_alg_template {
9702         } template_u;
9703         u32 class1_alg_type;
9704         u32 class2_alg_type;
9705 -       u32 alg_op;
9706  };
9707  
9708  static struct caam_alg_template driver_algs[] = {
9709 @@ -3118,7 +2024,6 @@ static struct caam_aead_alg driver_aeads
9710                 .caam = {
9711                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9712                                            OP_ALG_AAI_HMAC_PRECOMP,
9713 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9714                 },
9715         },
9716         {
9717 @@ -3140,7 +2045,6 @@ static struct caam_aead_alg driver_aeads
9718                 .caam = {
9719                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9720                                            OP_ALG_AAI_HMAC_PRECOMP,
9721 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9722                 },
9723         },
9724         {
9725 @@ -3162,7 +2066,6 @@ static struct caam_aead_alg driver_aeads
9726                 .caam = {
9727                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9728                                            OP_ALG_AAI_HMAC_PRECOMP,
9729 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9730                 },
9731         },
9732         {
9733 @@ -3184,7 +2087,6 @@ static struct caam_aead_alg driver_aeads
9734                 .caam = {
9735                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9736                                            OP_ALG_AAI_HMAC_PRECOMP,
9737 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9738                 },
9739         },
9740         {
9741 @@ -3206,7 +2108,6 @@ static struct caam_aead_alg driver_aeads
9742                 .caam = {
9743                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9744                                            OP_ALG_AAI_HMAC_PRECOMP,
9745 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9746                 },
9747         },
9748         {
9749 @@ -3228,7 +2129,6 @@ static struct caam_aead_alg driver_aeads
9750                 .caam = {
9751                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9752                                            OP_ALG_AAI_HMAC_PRECOMP,
9753 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9754                 },
9755         },
9756         {
9757 @@ -3250,7 +2150,6 @@ static struct caam_aead_alg driver_aeads
9758                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9759                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9760                                            OP_ALG_AAI_HMAC_PRECOMP,
9761 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9762                 },
9763         },
9764         {
9765 @@ -3273,7 +2172,6 @@ static struct caam_aead_alg driver_aeads
9766                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9767                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9768                                            OP_ALG_AAI_HMAC_PRECOMP,
9769 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9770                         .geniv = true,
9771                 },
9772         },
9773 @@ -3296,7 +2194,6 @@ static struct caam_aead_alg driver_aeads
9774                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9775                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9776                                            OP_ALG_AAI_HMAC_PRECOMP,
9777 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9778                 },
9779         },
9780         {
9781 @@ -3319,7 +2216,6 @@ static struct caam_aead_alg driver_aeads
9782                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9783                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9784                                            OP_ALG_AAI_HMAC_PRECOMP,
9785 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9786                         .geniv = true,
9787                 },
9788         },
9789 @@ -3342,7 +2238,6 @@ static struct caam_aead_alg driver_aeads
9790                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9791                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9792                                            OP_ALG_AAI_HMAC_PRECOMP,
9793 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9794                 },
9795         },
9796         {
9797 @@ -3365,7 +2260,6 @@ static struct caam_aead_alg driver_aeads
9798                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9799                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9800                                            OP_ALG_AAI_HMAC_PRECOMP,
9801 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9802                         .geniv = true,
9803                 },
9804         },
9805 @@ -3388,7 +2282,6 @@ static struct caam_aead_alg driver_aeads
9806                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9807                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9808                                            OP_ALG_AAI_HMAC_PRECOMP,
9809 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9810                 },
9811         },
9812         {
9813 @@ -3411,7 +2304,6 @@ static struct caam_aead_alg driver_aeads
9814                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9815                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9816                                            OP_ALG_AAI_HMAC_PRECOMP,
9817 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9818                         .geniv = true,
9819                 },
9820         },
9821 @@ -3434,7 +2326,6 @@ static struct caam_aead_alg driver_aeads
9822                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9823                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9824                                            OP_ALG_AAI_HMAC_PRECOMP,
9825 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9826                 },
9827         },
9828         {
9829 @@ -3457,7 +2348,6 @@ static struct caam_aead_alg driver_aeads
9830                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9831                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9832                                            OP_ALG_AAI_HMAC_PRECOMP,
9833 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9834                         .geniv = true,
9835                 },
9836         },
9837 @@ -3480,7 +2370,6 @@ static struct caam_aead_alg driver_aeads
9838                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9839                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9840                                            OP_ALG_AAI_HMAC_PRECOMP,
9841 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9842                 },
9843         },
9844         {
9845 @@ -3503,7 +2392,6 @@ static struct caam_aead_alg driver_aeads
9846                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9847                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9848                                            OP_ALG_AAI_HMAC_PRECOMP,
9849 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9850                         .geniv = true,
9851                 },
9852         },
9853 @@ -3526,7 +2414,6 @@ static struct caam_aead_alg driver_aeads
9854                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9855                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9856                                            OP_ALG_AAI_HMAC_PRECOMP,
9857 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9858                 }
9859         },
9860         {
9861 @@ -3549,7 +2436,6 @@ static struct caam_aead_alg driver_aeads
9862                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9863                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9864                                            OP_ALG_AAI_HMAC_PRECOMP,
9865 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9866                         .geniv = true,
9867                 }
9868         },
9869 @@ -3573,7 +2459,6 @@ static struct caam_aead_alg driver_aeads
9870                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9871                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9872                                            OP_ALG_AAI_HMAC_PRECOMP,
9873 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9874                 },
9875         },
9876         {
9877 @@ -3597,7 +2482,6 @@ static struct caam_aead_alg driver_aeads
9878                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9879                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9880                                            OP_ALG_AAI_HMAC_PRECOMP,
9881 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9882                         .geniv = true,
9883                 },
9884         },
9885 @@ -3621,7 +2505,6 @@ static struct caam_aead_alg driver_aeads
9886                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9887                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9888                                            OP_ALG_AAI_HMAC_PRECOMP,
9889 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9890                 },
9891         },
9892         {
9893 @@ -3645,7 +2528,6 @@ static struct caam_aead_alg driver_aeads
9894                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9895                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9896                                            OP_ALG_AAI_HMAC_PRECOMP,
9897 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9898                         .geniv = true,
9899                 },
9900         },
9901 @@ -3669,7 +2551,6 @@ static struct caam_aead_alg driver_aeads
9902                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9903                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9904                                            OP_ALG_AAI_HMAC_PRECOMP,
9905 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9906                 },
9907         },
9908         {
9909 @@ -3693,7 +2574,6 @@ static struct caam_aead_alg driver_aeads
9910                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9911                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9912                                            OP_ALG_AAI_HMAC_PRECOMP,
9913 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9914                         .geniv = true,
9915                 },
9916         },
9917 @@ -3717,7 +2597,6 @@ static struct caam_aead_alg driver_aeads
9918                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9919                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9920                                            OP_ALG_AAI_HMAC_PRECOMP,
9921 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9922                 },
9923         },
9924         {
9925 @@ -3741,7 +2620,6 @@ static struct caam_aead_alg driver_aeads
9926                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9927                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9928                                            OP_ALG_AAI_HMAC_PRECOMP,
9929 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9930                         .geniv = true,
9931                 },
9932         },
9933 @@ -3765,7 +2643,6 @@ static struct caam_aead_alg driver_aeads
9934                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9935                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9936                                            OP_ALG_AAI_HMAC_PRECOMP,
9937 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9938                 },
9939         },
9940         {
9941 @@ -3789,7 +2666,6 @@ static struct caam_aead_alg driver_aeads
9942                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9943                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9944                                            OP_ALG_AAI_HMAC_PRECOMP,
9945 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9946                         .geniv = true,
9947                 },
9948         },
9949 @@ -3812,7 +2688,6 @@ static struct caam_aead_alg driver_aeads
9950                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
9951                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9952                                            OP_ALG_AAI_HMAC_PRECOMP,
9953 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9954                 },
9955         },
9956         {
9957 @@ -3835,7 +2710,6 @@ static struct caam_aead_alg driver_aeads
9958                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
9959                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9960                                            OP_ALG_AAI_HMAC_PRECOMP,
9961 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9962                         .geniv = true,
9963                 },
9964         },
9965 @@ -3858,7 +2732,6 @@ static struct caam_aead_alg driver_aeads
9966                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
9967                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9968                                            OP_ALG_AAI_HMAC_PRECOMP,
9969 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9970                 },
9971         },
9972         {
9973 @@ -3881,7 +2754,6 @@ static struct caam_aead_alg driver_aeads
9974                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
9975                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9976                                            OP_ALG_AAI_HMAC_PRECOMP,
9977 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9978                         .geniv = true,
9979                 },
9980         },
9981 @@ -3904,7 +2776,6 @@ static struct caam_aead_alg driver_aeads
9982                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
9983                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9984                                            OP_ALG_AAI_HMAC_PRECOMP,
9985 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9986                 },
9987         },
9988         {
9989 @@ -3927,7 +2798,6 @@ static struct caam_aead_alg driver_aeads
9990                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
9991                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9992                                            OP_ALG_AAI_HMAC_PRECOMP,
9993 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9994                         .geniv = true,
9995                 },
9996         },
9997 @@ -3950,7 +2820,6 @@ static struct caam_aead_alg driver_aeads
9998                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
9999                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10000                                            OP_ALG_AAI_HMAC_PRECOMP,
10001 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10002                 },
10003         },
10004         {
10005 @@ -3973,7 +2842,6 @@ static struct caam_aead_alg driver_aeads
10006                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10007                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10008                                            OP_ALG_AAI_HMAC_PRECOMP,
10009 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10010                         .geniv = true,
10011                 },
10012         },
10013 @@ -3996,7 +2864,6 @@ static struct caam_aead_alg driver_aeads
10014                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10015                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10016                                            OP_ALG_AAI_HMAC_PRECOMP,
10017 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10018                 },
10019         },
10020         {
10021 @@ -4019,7 +2886,6 @@ static struct caam_aead_alg driver_aeads
10022                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10023                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10024                                            OP_ALG_AAI_HMAC_PRECOMP,
10025 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10026                         .geniv = true,
10027                 },
10028         },
10029 @@ -4042,7 +2908,6 @@ static struct caam_aead_alg driver_aeads
10030                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10031                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10032                                            OP_ALG_AAI_HMAC_PRECOMP,
10033 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10034                 },
10035         },
10036         {
10037 @@ -4065,7 +2930,6 @@ static struct caam_aead_alg driver_aeads
10038                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10039                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10040                                            OP_ALG_AAI_HMAC_PRECOMP,
10041 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10042                         .geniv = true,
10043                 },
10044         },
10045 @@ -4090,7 +2954,6 @@ static struct caam_aead_alg driver_aeads
10046                                            OP_ALG_AAI_CTR_MOD128,
10047                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
10048                                            OP_ALG_AAI_HMAC_PRECOMP,
10049 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
10050                         .rfc3686 = true,
10051                 },
10052         },
10053 @@ -4115,7 +2978,6 @@ static struct caam_aead_alg driver_aeads
10054                                            OP_ALG_AAI_CTR_MOD128,
10055                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
10056                                            OP_ALG_AAI_HMAC_PRECOMP,
10057 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
10058                         .rfc3686 = true,
10059                         .geniv = true,
10060                 },
10061 @@ -4141,7 +3003,6 @@ static struct caam_aead_alg driver_aeads
10062                                            OP_ALG_AAI_CTR_MOD128,
10063                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10064                                            OP_ALG_AAI_HMAC_PRECOMP,
10065 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10066                         .rfc3686 = true,
10067                 },
10068         },
10069 @@ -4166,7 +3027,6 @@ static struct caam_aead_alg driver_aeads
10070                                            OP_ALG_AAI_CTR_MOD128,
10071                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10072                                            OP_ALG_AAI_HMAC_PRECOMP,
10073 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10074                         .rfc3686 = true,
10075                         .geniv = true,
10076                 },
10077 @@ -4192,7 +3052,6 @@ static struct caam_aead_alg driver_aeads
10078                                            OP_ALG_AAI_CTR_MOD128,
10079                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10080                                            OP_ALG_AAI_HMAC_PRECOMP,
10081 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10082                         .rfc3686 = true,
10083                 },
10084         },
10085 @@ -4217,7 +3076,6 @@ static struct caam_aead_alg driver_aeads
10086                                            OP_ALG_AAI_CTR_MOD128,
10087                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10088                                            OP_ALG_AAI_HMAC_PRECOMP,
10089 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10090                         .rfc3686 = true,
10091                         .geniv = true,
10092                 },
10093 @@ -4243,7 +3101,6 @@ static struct caam_aead_alg driver_aeads
10094                                            OP_ALG_AAI_CTR_MOD128,
10095                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10096                                            OP_ALG_AAI_HMAC_PRECOMP,
10097 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10098                         .rfc3686 = true,
10099                 },
10100         },
10101 @@ -4268,7 +3125,6 @@ static struct caam_aead_alg driver_aeads
10102                                            OP_ALG_AAI_CTR_MOD128,
10103                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10104                                            OP_ALG_AAI_HMAC_PRECOMP,
10105 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10106                         .rfc3686 = true,
10107                         .geniv = true,
10108                 },
10109 @@ -4294,7 +3150,6 @@ static struct caam_aead_alg driver_aeads
10110                                            OP_ALG_AAI_CTR_MOD128,
10111                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10112                                            OP_ALG_AAI_HMAC_PRECOMP,
10113 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10114                         .rfc3686 = true,
10115                 },
10116         },
10117 @@ -4319,7 +3174,6 @@ static struct caam_aead_alg driver_aeads
10118                                            OP_ALG_AAI_CTR_MOD128,
10119                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10120                                            OP_ALG_AAI_HMAC_PRECOMP,
10121 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10122                         .rfc3686 = true,
10123                         .geniv = true,
10124                 },
10125 @@ -4345,7 +3199,6 @@ static struct caam_aead_alg driver_aeads
10126                                            OP_ALG_AAI_CTR_MOD128,
10127                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10128                                            OP_ALG_AAI_HMAC_PRECOMP,
10129 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10130                         .rfc3686 = true,
10131                 },
10132         },
10133 @@ -4370,7 +3223,6 @@ static struct caam_aead_alg driver_aeads
10134                                            OP_ALG_AAI_CTR_MOD128,
10135                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10136                                            OP_ALG_AAI_HMAC_PRECOMP,
10137 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10138                         .rfc3686 = true,
10139                         .geniv = true,
10140                 },
10141 @@ -4385,16 +3237,34 @@ struct caam_crypto_alg {
10142  
10143  static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
10144  {
10145 +       dma_addr_t dma_addr;
10146 +
10147         ctx->jrdev = caam_jr_alloc();
10148         if (IS_ERR(ctx->jrdev)) {
10149                 pr_err("Job Ring Device allocation for transform failed\n");
10150                 return PTR_ERR(ctx->jrdev);
10151         }
10152  
10153 +       dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
10154 +                                       offsetof(struct caam_ctx,
10155 +                                                sh_desc_enc_dma),
10156 +                                       DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
10157 +       if (dma_mapping_error(ctx->jrdev, dma_addr)) {
10158 +               dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
10159 +               caam_jr_free(ctx->jrdev);
10160 +               return -ENOMEM;
10161 +       }
10162 +
10163 +       ctx->sh_desc_enc_dma = dma_addr;
10164 +       ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
10165 +                                                  sh_desc_dec);
10166 +       ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
10167 +                                                     sh_desc_givenc);
10168 +       ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
10169 +
10170         /* copy descriptor header template value */
10171 -       ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
10172 -       ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
10173 -       ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
10174 +       ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
10175 +       ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
10176  
10177         return 0;
10178  }
10179 @@ -4421,25 +3291,9 @@ static int caam_aead_init(struct crypto_
10180  
10181  static void caam_exit_common(struct caam_ctx *ctx)
10182  {
10183 -       if (ctx->sh_desc_enc_dma &&
10184 -           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
10185 -               dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
10186 -                                desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
10187 -       if (ctx->sh_desc_dec_dma &&
10188 -           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
10189 -               dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
10190 -                                desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
10191 -       if (ctx->sh_desc_givenc_dma &&
10192 -           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
10193 -               dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
10194 -                                desc_bytes(ctx->sh_desc_givenc),
10195 -                                DMA_TO_DEVICE);
10196 -       if (ctx->key_dma &&
10197 -           !dma_mapping_error(ctx->jrdev, ctx->key_dma))
10198 -               dma_unmap_single(ctx->jrdev, ctx->key_dma,
10199 -                                ctx->enckeylen + ctx->split_key_pad_len,
10200 -                                DMA_TO_DEVICE);
10201 -
10202 +       dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
10203 +                              offsetof(struct caam_ctx, sh_desc_enc_dma),
10204 +                              DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
10205         caam_jr_free(ctx->jrdev);
10206  }
10207  
10208 @@ -4515,7 +3369,6 @@ static struct caam_crypto_alg *caam_alg_
10209  
10210         t_alg->caam.class1_alg_type = template->class1_alg_type;
10211         t_alg->caam.class2_alg_type = template->class2_alg_type;
10212 -       t_alg->caam.alg_op = template->alg_op;
10213  
10214         return t_alg;
10215  }
10216 --- /dev/null
10217 +++ b/drivers/crypto/caam/caamalg_desc.c
10218 @@ -0,0 +1,1913 @@
10219 +/*
10220 + * Shared descriptors for aead, ablkcipher algorithms
10221 + *
10222 + * Copyright 2016 NXP
10223 + */
10224 +
10225 +#include "compat.h"
10226 +#include "desc_constr.h"
10227 +#include "caamalg_desc.h"
10228 +
10229 +/*
10230 + * For aead functions, read payload and write payload,
10231 + * both of which are specified in req->src and req->dst
10232 + */
10233 +static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
10234 +{
10235 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
10236 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
10237 +                            KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
10238 +}
10239 +
10240 +/* Set DK bit in class 1 operation if shared */
10241 +static inline void append_dec_op1(u32 *desc, u32 type)
10242 +{
10243 +       u32 *jump_cmd, *uncond_jump_cmd;
10244 +
10245 +       /* DK bit is valid only for AES */
10246 +       if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
10247 +               append_operation(desc, type | OP_ALG_AS_INITFINAL |
10248 +                                OP_ALG_DECRYPT);
10249 +               return;
10250 +       }
10251 +
10252 +       jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
10253 +       append_operation(desc, type | OP_ALG_AS_INITFINAL |
10254 +                        OP_ALG_DECRYPT);
10255 +       uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
10256 +       set_jump_tgt_here(desc, jump_cmd);
10257 +       append_operation(desc, type | OP_ALG_AS_INITFINAL |
10258 +                        OP_ALG_DECRYPT | OP_ALG_AAI_DK);
10259 +       set_jump_tgt_here(desc, uncond_jump_cmd);
10260 +}
10261 +
10262 +/**
10263 + * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
10264 + *                               (non-protocol) with no (null) encryption.
10265 + * @desc: pointer to buffer used for descriptor construction
10266 + * @adata: pointer to authentication transform definitions. Note that since a
10267 + *         split key is to be used, the size of the split key itself is
10268 + *         specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
10269 + *         SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10270 + * @icvsize: integrity check value (ICV) size (truncated or full)
10271 + *
10272 + * Note: Requires an MDHA split key.
10273 + */
10274 +void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
10275 +                                unsigned int icvsize)
10276 +{
10277 +       u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
10278 +
10279 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
10280 +
10281 +       /* Skip if already shared */
10282 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10283 +                                  JUMP_COND_SHRD);
10284 +       if (adata->key_inline)
10285 +               append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
10286 +                                 adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
10287 +                                 KEY_ENC);
10288 +       else
10289 +               append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
10290 +                          KEY_DEST_MDHA_SPLIT | KEY_ENC);
10291 +       set_jump_tgt_here(desc, key_jump_cmd);
10292 +
10293 +       /* assoclen + cryptlen = seqinlen */
10294 +       append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
10295 +
10296 +       /* Prepare to read and write cryptlen + assoclen bytes */
10297 +       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10298 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10299 +
10300 +       /*
10301 +        * MOVE_LEN opcode is not available in all SEC HW revisions,
10302 +        * thus need to do some magic, i.e. self-patch the descriptor
10303 +        * buffer.
10304 +        */
10305 +       read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
10306 +                                   MOVE_DEST_MATH3 |
10307 +                                   (0x6 << MOVE_LEN_SHIFT));
10308 +       write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
10309 +                                    MOVE_DEST_DESCBUF |
10310 +                                    MOVE_WAITCOMP |
10311 +                                    (0x8 << MOVE_LEN_SHIFT));
10312 +
10313 +       /* Class 2 operation */
10314 +       append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10315 +                        OP_ALG_ENCRYPT);
10316 +
10317 +       /* Read and write cryptlen bytes */
10318 +       aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
10319 +
10320 +       set_move_tgt_here(desc, read_move_cmd);
10321 +       set_move_tgt_here(desc, write_move_cmd);
10322 +       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10323 +       append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
10324 +                   MOVE_AUX_LS);
10325 +
10326 +       /* Write ICV */
10327 +       append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10328 +                        LDST_SRCDST_BYTE_CONTEXT);
10329 +
10330 +#ifdef DEBUG
10331 +       print_hex_dump(KERN_ERR,
10332 +                      "aead null enc shdesc@" __stringify(__LINE__)": ",
10333 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10334 +#endif
10335 +}
10336 +EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
10337 +
10338 +/**
10339 + * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
10340 + *                               (non-protocol) with no (null) decryption.
10341 + * @desc: pointer to buffer used for descriptor construction
10342 + * @adata: pointer to authentication transform definitions. Note that since a
10343 + *         split key is to be used, the size of the split key itself is
10344 + *         specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
10345 + *         SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10346 + * @icvsize: integrity check value (ICV) size (truncated or full)
10347 + *
10348 + * Note: Requires an MDHA split key.
10349 + */
10350 +void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
10351 +                                unsigned int icvsize)
10352 +{
10353 +       u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
10354 +
10355 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
10356 +
10357 +       /* Skip if already shared */
10358 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10359 +                                  JUMP_COND_SHRD);
10360 +       if (adata->key_inline)
10361 +               append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
10362 +                                 adata->keylen, CLASS_2 |
10363 +                                 KEY_DEST_MDHA_SPLIT | KEY_ENC);
10364 +       else
10365 +               append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
10366 +                          KEY_DEST_MDHA_SPLIT | KEY_ENC);
10367 +       set_jump_tgt_here(desc, key_jump_cmd);
10368 +
10369 +       /* Class 2 operation */
10370 +       append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10371 +                        OP_ALG_DECRYPT | OP_ALG_ICV_ON);
10372 +
10373 +       /* assoclen + cryptlen = seqoutlen */
10374 +       append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10375 +
10376 +       /* Prepare to read and write cryptlen + assoclen bytes */
10377 +       append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
10378 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
10379 +
10380 +       /*
10381 +        * MOVE_LEN opcode is not available in all SEC HW revisions,
10382 +        * thus need to do some magic, i.e. self-patch the descriptor
10383 +        * buffer.
10384 +        */
10385 +       read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
10386 +                                   MOVE_DEST_MATH2 |
10387 +                                   (0x6 << MOVE_LEN_SHIFT));
10388 +       write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
10389 +                                    MOVE_DEST_DESCBUF |
10390 +                                    MOVE_WAITCOMP |
10391 +                                    (0x8 << MOVE_LEN_SHIFT));
10392 +
10393 +       /* Read and write cryptlen bytes */
10394 +       aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
10395 +
10396 +       /*
10397 +        * Insert a NOP here, since we need at least 4 instructions between
10398 +        * code patching the descriptor buffer and the location being patched.
10399 +        */
10400 +       jump_cmd = append_jump(desc, JUMP_TEST_ALL);
10401 +       set_jump_tgt_here(desc, jump_cmd);
10402 +
10403 +       set_move_tgt_here(desc, read_move_cmd);
10404 +       set_move_tgt_here(desc, write_move_cmd);
10405 +       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10406 +       append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
10407 +                   MOVE_AUX_LS);
10408 +       append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
10409 +
10410 +       /* Load ICV */
10411 +       append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
10412 +                            FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
10413 +
10414 +#ifdef DEBUG
10415 +       print_hex_dump(KERN_ERR,
10416 +                      "aead null dec shdesc@" __stringify(__LINE__)": ",
10417 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10418 +#endif
10419 +}
10420 +EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
10421 +
10422 +static void init_sh_desc_key_aead(u32 * const desc,
10423 +                                 struct alginfo * const cdata,
10424 +                                 struct alginfo * const adata,
10425 +                                 const bool is_rfc3686, u32 *nonce)
10426 +{
10427 +       u32 *key_jump_cmd;
10428 +       unsigned int enckeylen = cdata->keylen;
10429 +
10430 +       /* Note: Context registers are saved. */
10431 +       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
10432 +
10433 +       /* Skip if already shared */
10434 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10435 +                                  JUMP_COND_SHRD);
10436 +
10437 +       /*
10438 +        * RFC3686 specific:
10439 +        *      | key = {AUTH_KEY, ENC_KEY, NONCE}
10440 +        *      | enckeylen = encryption key size + nonce size
10441 +        */
10442 +       if (is_rfc3686)
10443 +               enckeylen -= CTR_RFC3686_NONCE_SIZE;
10444 +
10445 +       if (adata->key_inline)
10446 +               append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
10447 +                                 adata->keylen, CLASS_2 |
10448 +                                 KEY_DEST_MDHA_SPLIT | KEY_ENC);
10449 +       else
10450 +               append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
10451 +                          KEY_DEST_MDHA_SPLIT | KEY_ENC);
10452 +
10453 +       if (cdata->key_inline)
10454 +               append_key_as_imm(desc, cdata->key_virt, enckeylen,
10455 +                                 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
10456 +       else
10457 +               append_key(desc, cdata->key_dma, enckeylen, CLASS_1 |
10458 +                          KEY_DEST_CLASS_REG);
10459 +
10460 +       /* Load Counter into CONTEXT1 reg */
10461 +       if (is_rfc3686) {
10462 +               append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
10463 +                                  LDST_CLASS_IND_CCB |
10464 +                                  LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
10465 +               append_move(desc,
10466 +                           MOVE_SRC_OUTFIFO |
10467 +                           MOVE_DEST_CLASS1CTX |
10468 +                           (16 << MOVE_OFFSET_SHIFT) |
10469 +                           (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
10470 +       }
10471 +
10472 +       set_jump_tgt_here(desc, key_jump_cmd);
10473 +}
10474 +
10475 +/**
10476 + * cnstr_shdsc_aead_encap - IPSec ESP encapsulation shared descriptor
10477 + *                          (non-protocol).
10478 + * @desc: pointer to buffer used for descriptor construction
10479 + * @cdata: pointer to block cipher transform definitions
10480 + *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10481 + *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10482 + * @adata: pointer to authentication transform definitions. Note that since a
10483 + *         split key is to be used, the size of the split key itself is
10484 + *         specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
10485 + *         SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10486 + * @ivsize: initialization vector size
10487 + * @icvsize: integrity check value (ICV) size (truncated or full)
10488 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10489 + * @nonce: pointer to rfc3686 nonce
10490 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10491 + * @is_qi: true when called from caam/qi
10492 + *
10493 + * Note: Requires an MDHA split key.
10494 + */
10495 +void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
10496 +                           struct alginfo *adata, unsigned int ivsize,
10497 +                           unsigned int icvsize, const bool is_rfc3686,
10498 +                           u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
10499 +{
10500 +       /* Note: Context registers are saved. */
10501 +       init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
10502 +
10503 +       /* Class 2 operation */
10504 +       append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10505 +                        OP_ALG_ENCRYPT);
10506 +
10507 +       if (is_qi) {
10508 +               u32 *wait_load_cmd;
10509 +
10510 +               /* REG3 = assoclen */
10511 +               append_seq_load(desc, 4, LDST_CLASS_DECO |
10512 +                               LDST_SRCDST_WORD_DECO_MATH3 |
10513 +                               (4 << LDST_OFFSET_SHIFT));
10514 +
10515 +               wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10516 +                                           JUMP_COND_CALM | JUMP_COND_NCP |
10517 +                                           JUMP_COND_NOP | JUMP_COND_NIP |
10518 +                                           JUMP_COND_NIFP);
10519 +               set_jump_tgt_here(desc, wait_load_cmd);
10520 +
10521 +               append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10522 +                               LDST_SRCDST_BYTE_CONTEXT |
10523 +                               (ctx1_iv_off << LDST_OFFSET_SHIFT));
10524 +       }
10525 +
10526 +       /* Read and write assoclen bytes */
10527 +       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10528 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10529 +
10530 +       /* Skip assoc data */
10531 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10532 +
10533 +       /* read assoc before reading payload */
10534 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10535 +                                     FIFOLDST_VLF);
10536 +
10537 +       /* Load Counter into CONTEXT1 reg */
10538 +       if (is_rfc3686)
10539 +               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10540 +                                    LDST_SRCDST_BYTE_CONTEXT |
10541 +                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10542 +                                     LDST_OFFSET_SHIFT));
10543 +
10544 +       /* Class 1 operation */
10545 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10546 +                        OP_ALG_ENCRYPT);
10547 +
10548 +       /* Read and write cryptlen bytes */
10549 +       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10550 +       append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10551 +       aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
10552 +
10553 +       /* Write ICV */
10554 +       append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10555 +                        LDST_SRCDST_BYTE_CONTEXT);
10556 +
10557 +#ifdef DEBUG
10558 +       print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ",
10559 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10560 +#endif
10561 +}
10562 +EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
10563 +
10564 +/**
10565 + * cnstr_shdsc_aead_decap - IPSec ESP decapsulation shared descriptor
10566 + *                          (non-protocol).
10567 + * @desc: pointer to buffer used for descriptor construction
10568 + * @cdata: pointer to block cipher transform definitions
10569 + *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10570 + *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10571 + * @adata: pointer to authentication transform definitions. Note that since a
10572 + *         split key is to be used, the size of the split key itself is
10573 + *         specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
10574 + *         SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10575 + * @ivsize: initialization vector size
10576 + * @icvsize: integrity check value (ICV) size (truncated or full)
10577 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10578 + * @nonce: pointer to rfc3686 nonce
10579 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10580 + * @is_qi: true when called from caam/qi
10581 + *
10582 + * Note: Requires an MDHA split key.
10583 + */
10584 +void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
10585 +                           struct alginfo *adata, unsigned int ivsize,
10586 +                           unsigned int icvsize, const bool geniv,
10587 +                           const bool is_rfc3686, u32 *nonce,
10588 +                           const u32 ctx1_iv_off, const bool is_qi)
10589 +{
10590 +       /* Note: Context registers are saved. */
10591 +       init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
10592 +
10593 +       /* Class 2 operation */
10594 +       append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10595 +                        OP_ALG_DECRYPT | OP_ALG_ICV_ON);
10596 +
10597 +       if (is_qi) {
10598 +               u32 *wait_load_cmd;
10599 +
10600 +               /* REG3 = assoclen */
10601 +               append_seq_load(desc, 4, LDST_CLASS_DECO |
10602 +                               LDST_SRCDST_WORD_DECO_MATH3 |
10603 +                               (4 << LDST_OFFSET_SHIFT));
10604 +
10605 +               wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10606 +                                           JUMP_COND_CALM | JUMP_COND_NCP |
10607 +                                           JUMP_COND_NOP | JUMP_COND_NIP |
10608 +                                           JUMP_COND_NIFP);
10609 +               set_jump_tgt_here(desc, wait_load_cmd);
10610 +
10611 +               if (!geniv)
10612 +                       append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10613 +                                       LDST_SRCDST_BYTE_CONTEXT |
10614 +                                       (ctx1_iv_off << LDST_OFFSET_SHIFT));
10615 +       }
10616 +
10617 +       /* Read and write assoclen bytes */
10618 +       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10619 +       if (geniv)
10620 +               append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
10621 +       else
10622 +               append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10623 +
10624 +       /* Skip assoc data */
10625 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10626 +
10627 +       /* read assoc before reading payload */
10628 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10629 +                            KEY_VLF);
10630 +
10631 +       if (geniv) {
10632 +               append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10633 +                               LDST_SRCDST_BYTE_CONTEXT |
10634 +                               (ctx1_iv_off << LDST_OFFSET_SHIFT));
10635 +               append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
10636 +                           (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
10637 +       }
10638 +
10639 +       /* Load Counter into CONTEXT1 reg */
10640 +       if (is_rfc3686)
10641 +               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10642 +                                    LDST_SRCDST_BYTE_CONTEXT |
10643 +                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10644 +                                     LDST_OFFSET_SHIFT));
10645 +
10646 +       /* Choose operation */
10647 +       if (ctx1_iv_off)
10648 +               append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10649 +                                OP_ALG_DECRYPT);
10650 +       else
10651 +               append_dec_op1(desc, cdata->algtype);
10652 +
10653 +       /* Read and write cryptlen bytes */
10654 +       append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10655 +       append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10656 +       aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
10657 +
10658 +       /* Load ICV */
10659 +       append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
10660 +                            FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
10661 +
10662 +#ifdef DEBUG
10663 +       print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ",
10664 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10665 +#endif
10666 +}
10667 +EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
10668 +
10669 +/**
10670 + * cnstr_shdsc_aead_givencap - IPSec ESP encapsulation shared descriptor
10671 + *                             (non-protocol) with HW-generated initialization
10672 + *                             vector.
10673 + * @desc: pointer to buffer used for descriptor construction
10674 + * @cdata: pointer to block cipher transform definitions
10675 + *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10676 + *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10677 + * @adata: pointer to authentication transform definitions. Note that since a
10678 + *         split key is to be used, the size of the split key itself is
10679 + *         specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
10680 + *         SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10681 + * @ivsize: initialization vector size
10682 + * @icvsize: integrity check value (ICV) size (truncated or full)
10683 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10684 + * @nonce: pointer to rfc3686 nonce
10685 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10686 + * @is_qi: true when called from caam/qi
10687 + *
10688 + * Note: Requires an MDHA split key.
10689 + */
10690 +void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
10691 +                              struct alginfo *adata, unsigned int ivsize,
10692 +                              unsigned int icvsize, const bool is_rfc3686,
10693 +                              u32 *nonce, const u32 ctx1_iv_off,
10694 +                              const bool is_qi)
10695 +{
10696 +       u32 geniv, moveiv;
10697 +
10698 +       /* Note: Context registers are saved. */
10699 +       init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
10700 +
10701 +       if (is_qi) {
10702 +               u32 *wait_load_cmd;
10703 +
10704 +               /* REG3 = assoclen */
10705 +               append_seq_load(desc, 4, LDST_CLASS_DECO |
10706 +                               LDST_SRCDST_WORD_DECO_MATH3 |
10707 +                               (4 << LDST_OFFSET_SHIFT));
10708 +
10709 +               wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10710 +                                           JUMP_COND_CALM | JUMP_COND_NCP |
10711 +                                           JUMP_COND_NOP | JUMP_COND_NIP |
10712 +                                           JUMP_COND_NIFP);
10713 +               set_jump_tgt_here(desc, wait_load_cmd);
10714 +       }
10715 +
10716 +       if (is_rfc3686) {
10717 +               if (is_qi)
10718 +                       append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10719 +                                       LDST_SRCDST_BYTE_CONTEXT |
10720 +                                       (ctx1_iv_off << LDST_OFFSET_SHIFT));
10721 +
10722 +               goto copy_iv;
10723 +       }
10724 +
10725 +       /* Generate IV */
10726 +       geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
10727 +               NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
10728 +               NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
10729 +       append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
10730 +                           LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
10731 +       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10732 +       append_move(desc, MOVE_WAITCOMP |
10733 +                   MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
10734 +                   (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
10735 +                   (ivsize << MOVE_LEN_SHIFT));
10736 +       append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
10737 +
10738 +copy_iv:
10739 +       /* Copy IV to class 1 context */
10740 +       append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
10741 +                   (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
10742 +                   (ivsize << MOVE_LEN_SHIFT));
10743 +
10744 +       /* Return to encryption */
10745 +       append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10746 +                        OP_ALG_ENCRYPT);
10747 +
10748 +       /* Read and write assoclen bytes */
10749 +       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10750 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10751 +
10752 +       /* Skip assoc data */
10753 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10754 +
10755 +       /* read assoc before reading payload */
10756 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10757 +                            KEY_VLF);
10758 +
10759 +       /* Copy iv from outfifo to class 2 fifo */
10760 +       moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
10761 +                NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
10762 +       append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
10763 +                           LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
10764 +       append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
10765 +                           LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
10766 +
10767 +       /* Load Counter into CONTEXT1 reg */
10768 +       if (is_rfc3686)
10769 +               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10770 +                                    LDST_SRCDST_BYTE_CONTEXT |
10771 +                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10772 +                                     LDST_OFFSET_SHIFT));
10773 +
10774 +       /* Class 1 operation */
10775 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10776 +                        OP_ALG_ENCRYPT);
10777 +
10778 +       /* Will write ivsize + cryptlen */
10779 +       append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10780 +
10781 +       /* Not need to reload iv */
10782 +       append_seq_fifo_load(desc, ivsize,
10783 +                            FIFOLD_CLASS_SKIP);
10784 +
10785 +       /* Will read cryptlen */
10786 +       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10787 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
10788 +                            FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
10789 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
10790 +
10791 +       /* Write ICV */
10792 +       append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10793 +                        LDST_SRCDST_BYTE_CONTEXT);
10794 +
10795 +#ifdef DEBUG
10796 +       print_hex_dump(KERN_ERR,
10797 +                      "aead givenc shdesc@" __stringify(__LINE__)": ",
10798 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10799 +#endif
10800 +}
10801 +EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
10802 +
10803 +/**
10804 + * cnstr_shdsc_tls_encap - tls encapsulation shared descriptor
10805 + * @desc: pointer to buffer used for descriptor construction
10806 + * @cdata: pointer to block cipher transform definitions
10807 + *         Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
10808 + *         with OP_ALG_AAI_CBC
10809 + * @adata: pointer to authentication transform definitions. Note that since a
10810 + *         split key is to be used, the size of the split key itself is
10811 + *         specified. Valid algorithm values OP_ALG_ALGSEL_SHA1 ANDed with
10812 + *         OP_ALG_AAI_HMAC_PRECOMP.
10813 + * @assoclen: associated data length
10814 + * @ivsize: initialization vector size
10815 + * @authsize: authentication data size
10816 + * @blocksize: block cipher size
10817 + */
10818 +void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
10819 +                          struct alginfo *adata, unsigned int assoclen,
10820 +                          unsigned int ivsize, unsigned int authsize,
10821 +                          unsigned int blocksize)
10822 +{
10823 +       u32 *key_jump_cmd, *zero_payload_jump_cmd;
10824 +       u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
10825 +
10826 +       /*
10827 +        * Compute the index (in bytes) for the LOAD with destination of
10828 +        * Class 1 Data Size Register and for the LOAD that generates padding
10829 +        */
10830 +       if (adata->key_inline) {
10831 +               idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad +
10832 +                               cdata->keylen - 4 * CAAM_CMD_SZ;
10833 +               idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad +
10834 +                            cdata->keylen - 2 * CAAM_CMD_SZ;
10835 +       } else {
10836 +               idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
10837 +                               4 * CAAM_CMD_SZ;
10838 +               idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
10839 +                            2 * CAAM_CMD_SZ;
10840 +       }
10841 +
10842 +       stidx = 1 << HDR_START_IDX_SHIFT;
10843 +       init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
10844 +
10845 +       /* skip key loading if they are loaded due to sharing */
10846 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10847 +                                  JUMP_COND_SHRD);
10848 +
10849 +       if (adata->key_inline)
10850 +               append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
10851 +                                 adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
10852 +                                 KEY_ENC);
10853 +       else
10854 +               append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
10855 +                          KEY_DEST_MDHA_SPLIT | KEY_ENC);
10856 +
10857 +       if (cdata->key_inline)
10858 +               append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
10859 +                                 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
10860 +       else
10861 +               append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
10862 +                          KEY_DEST_CLASS_REG);
10863 +
10864 +       set_jump_tgt_here(desc, key_jump_cmd);
10865 +
10866 +       /* class 2 operation */
10867 +       append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10868 +                        OP_ALG_ENCRYPT);
10869 +       /* class 1 operation */
10870 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10871 +                        OP_ALG_ENCRYPT);
10872 +
10873 +       /* payloadlen = input data length - (assoclen + ivlen) */
10874 +       append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize);
10875 +
10876 +       /* math1 = payloadlen + icvlen */
10877 +       append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize);
10878 +
10879 +       /* padlen = block_size - math1 % block_size */
10880 +       append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
10881 +       append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
10882 +
10883 +       /* cryptlen = payloadlen + icvlen + padlen */
10884 +       append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
10885 +
10886 +       /*
10887 +        * update immediate data with the padding length value
10888 +        * for the LOAD in the class 1 data size register.
10889 +        */
10890 +       append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
10891 +                       (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7);
10892 +       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
10893 +                       (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8);
10894 +
10895 +       /* overwrite PL field for the padding iNFO FIFO entry  */
10896 +       append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
10897 +                       (idx_ld_pad << MOVE_OFFSET_SHIFT) | 7);
10898 +       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
10899 +                       (idx_ld_pad << MOVE_OFFSET_SHIFT) | 8);
10900 +
10901 +       /* store encrypted payload, icv and padding */
10902 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
10903 +
10904 +       /* if payload length is zero, jump to zero-payload commands */
10905 +       append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4);
10906 +       zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
10907 +                                           JUMP_COND_MATH_Z);
10908 +
10909 +       /* load iv in context1 */
10910 +       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
10911 +                  LDST_CLASS_1_CCB | ivsize);
10912 +
10913 +       /* read assoc for authentication */
10914 +       append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
10915 +                            FIFOLD_TYPE_MSG);
10916 +       /* insnoop payload */
10917 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
10918 +                            FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
10919 +
10920 +       /* jump the zero-payload commands */
10921 +       append_jump(desc, JUMP_TEST_ALL | 3);
10922 +
10923 +       /* zero-payload commands */
10924 +       set_jump_tgt_here(desc, zero_payload_jump_cmd);
10925 +
10926 +       /* load iv in context1 */
10927 +       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
10928 +                  LDST_CLASS_1_CCB | ivsize);
10929 +
10930 +       /* assoc data is the only data for authentication */
10931 +       append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
10932 +                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
10933 +
10934 +       /* send icv to encryption */
10935 +       append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
10936 +                   authsize);
10937 +
10938 +       /* update class 1 data size register with padding length */
10939 +       append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB |
10940 +                           LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
10941 +
10942 +       /* generate padding and send it to encryption */
10943 +       genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
10944 +             NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
10945 +       append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
10946 +                           LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
10947 +
10948 +#ifdef DEBUG
10949 +       print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ",
10950 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
10951 +                      desc_bytes(desc), 1);
10952 +#endif
10953 +}
10954 +EXPORT_SYMBOL(cnstr_shdsc_tls_encap);
10955 +
10956 +/**
10957 + * cnstr_shdsc_tls_decap - tls decapsulation shared descriptor
10958 + * @desc: pointer to buffer used for descriptor construction
10959 + * @cdata: pointer to block cipher transform definitions
10960 + *         Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
10961 + *         with OP_ALG_AAI_CBC
10962 + * @adata: pointer to authentication transform definitions. Note that since a
10963 + *         split key is to be used, the size of the split key itself is
10964 + *         specified. Valid algorithm values OP_ALG_ALGSEL_ SHA1 ANDed with
10965 + *         OP_ALG_AAI_HMAC_PRECOMP.
10966 + * @assoclen: associated data length
10967 + * @ivsize: initialization vector size
10968 + * @authsize: authentication data size
10969 + * @blocksize: block cipher size
10970 + */
10971 +void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
10972 +                          struct alginfo *adata, unsigned int assoclen,
10973 +                          unsigned int ivsize, unsigned int authsize,
10974 +                          unsigned int blocksize)
10975 +{
10976 +       u32 stidx, jumpback;
10977 +       u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
10978 +       /*
10979 +        * Pointer Size bool determines the size of address pointers.
10980 +        * false - Pointers fit in one 32-bit word.
10981 +        * true - Pointers fit in two 32-bit words.
10982 +        */
10983 +       static const bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ);
10984 +
10985 +       stidx = 1 << HDR_START_IDX_SHIFT;
10986 +       init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
10987 +
10988 +       /* skip key loading if they are loaded due to sharing */
10989 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10990 +                                  JUMP_COND_SHRD);
10991 +
10992 +       append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
10993 +                  KEY_DEST_MDHA_SPLIT | KEY_ENC);
10994 +
10995 +       append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
10996 +                  KEY_DEST_CLASS_REG);
10997 +
10998 +       set_jump_tgt_here(desc, key_jump_cmd);
10999 +
11000 +       /* class 2 operation */
11001 +       append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
11002 +                        OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11003 +       /* class 1 operation */
11004 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11005 +                        OP_ALG_DECRYPT);
11006 +
11007 +       /* VSIL = input data length - 2 * block_size */
11008 +       append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
11009 +                               blocksize);
11010 +
11011 +       /*
11012 +        * payloadlen + icvlen + padlen = input data length - (assoclen +
11013 +        * ivsize)
11014 +        */
11015 +       append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize);
11016 +
11017 +       /* skip data to the last but one cipher block */
11018 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
11019 +
11020 +       /* load iv for the last cipher block */
11021 +       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
11022 +                  LDST_CLASS_1_CCB | ivsize);
11023 +
11024 +       /* read last cipher block */
11025 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
11026 +                            FIFOLD_TYPE_LAST1 | blocksize);
11027 +
11028 +       /* move decrypted block into math0 and math1 */
11029 +       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
11030 +                   blocksize);
11031 +
11032 +       /* reset AES CHA */
11033 +       append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB |
11034 +                           LDST_SRCDST_WORD_CHACTRL | LDST_IMM);
11035 +
11036 +       /* rewind input sequence */
11037 +       append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
11038 +
11039 +       /* key1 is in decryption form */
11040 +       append_operation(desc, cdata->algtype | OP_ALG_AAI_DK |
11041 +                        OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
11042 +
11043 +       /* load iv in context1 */
11044 +       append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
11045 +                  LDST_SRCDST_WORD_CLASS_CTX | ivsize);
11046 +
11047 +       /* read sequence number */
11048 +       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
11049 +       /* load Type, Version and Len fields in math0 */
11050 +       append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
11051 +                  LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5);
11052 +
11053 +       /* compute (padlen - 1) */
11054 +       append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
11055 +
11056 +       /* math2 = icvlen + (padlen - 1) + 1 */
11057 +       append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1);
11058 +
11059 +       append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11060 +
11061 +       /* VSOL = payloadlen + icvlen + padlen */
11062 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
11063 +
11064 +#ifdef __LITTLE_ENDIAN
11065 +       append_moveb(desc, MOVE_WAITCOMP |
11066 +                    MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
11067 +#endif
11068 +       /* update Len field */
11069 +       append_math_sub(desc, REG0, REG0, REG2, 8);
11070 +
11071 +       /* store decrypted payload, icv and padding */
11072 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
11073 +
11074 +       /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
11075 +       append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
11076 +
11077 +       zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11078 +                                           JUMP_COND_MATH_Z);
11079 +
11080 +       /* send Type, Version and Len(pre ICV) fields to authentication */
11081 +       append_move(desc, MOVE_WAITCOMP |
11082 +                   MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
11083 +                   (3 << MOVE_OFFSET_SHIFT) | 5);
11084 +
11085 +       /* outsnooping payload */
11086 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
11087 +                            FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
11088 +                            FIFOLDST_VLF);
11089 +       skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2);
11090 +
11091 +       set_jump_tgt_here(desc, zero_payload_jump_cmd);
11092 +       /* send Type, Version and Len(pre ICV) fields to authentication */
11093 +       append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS |
11094 +                   MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
11095 +                   (3 << MOVE_OFFSET_SHIFT) | 5);
11096 +
11097 +       set_jump_tgt_here(desc, skip_zero_jump_cmd);
11098 +       append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
11099 +
11100 +       /* load icvlen and padlen */
11101 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
11102 +                            FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
11103 +
11104 +       /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
11105 +       append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
11106 +
11107 +       /*
11108 +        * Start a new input sequence using the SEQ OUT PTR command options,
11109 +        * pointer and length used when the current output sequence was defined.
11110 +        */
11111 +       if (ps) {
11112 +               /*
11113 +                * Move the lower 32 bits of Shared Descriptor address, the
11114 +                * SEQ OUT PTR command, Output Pointer (2 words) and
11115 +                * Output Length into math registers.
11116 +                */
11117 +#ifdef __LITTLE_ENDIAN
11118 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11119 +                           MOVE_DEST_MATH0 | (55 * 4 << MOVE_OFFSET_SHIFT) |
11120 +                           20);
11121 +#else
11122 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11123 +                           MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) |
11124 +                           20);
11125 +#endif
11126 +               /* Transform SEQ OUT PTR command in SEQ IN PTR command */
11127 +               append_math_and_imm_u32(desc, REG0, REG0, IMM,
11128 +                                       ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
11129 +               /* Append a JUMP command after the copied fields */
11130 +               jumpback = CMD_JUMP | (char)-9;
11131 +               append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
11132 +                                   LDST_SRCDST_WORD_DECO_MATH2 |
11133 +                                   (4 << LDST_OFFSET_SHIFT));
11134 +               append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11135 +               /* Move the updated fields back to the Job Descriptor */
11136 +#ifdef __LITTLE_ENDIAN
11137 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11138 +                           MOVE_DEST_DESCBUF | (55 * 4 << MOVE_OFFSET_SHIFT) |
11139 +                           24);
11140 +#else
11141 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11142 +                           MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) |
11143 +                           24);
11144 +#endif
11145 +               /*
11146 +                * Read the new SEQ IN PTR command, Input Pointer, Input Length
11147 +                * and then jump back to the next command from the
11148 +                * Shared Descriptor.
11149 +                */
11150 +               append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
11151 +       } else {
11152 +               /*
11153 +                * Move the SEQ OUT PTR command, Output Pointer (1 word) and
11154 +                * Output Length into math registers.
11155 +                */
11156 +#ifdef __LITTLE_ENDIAN
11157 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11158 +                           MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) |
11159 +                           12);
11160 +#else
11161 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11162 +                           MOVE_DEST_MATH0 | (53 * 4 << MOVE_OFFSET_SHIFT) |
11163 +                           12);
11164 +#endif
11165 +               /* Transform SEQ OUT PTR command in SEQ IN PTR command */
11166 +               append_math_and_imm_u64(desc, REG0, REG0, IMM,
11167 +                                       ~(((u64)(CMD_SEQ_IN_PTR ^
11168 +                                                CMD_SEQ_OUT_PTR)) << 32));
11169 +               /* Append a JUMP command after the copied fields */
11170 +               jumpback = CMD_JUMP | (char)-7;
11171 +               append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
11172 +                                   LDST_SRCDST_WORD_DECO_MATH1 |
11173 +                                   (4 << LDST_OFFSET_SHIFT));
11174 +               append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11175 +               /* Move the updated fields back to the Job Descriptor */
11176 +#ifdef __LITTLE_ENDIAN
11177 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11178 +                           MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) |
11179 +                           16);
11180 +#else
11181 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11182 +                           MOVE_DEST_DESCBUF | (53 * 4 << MOVE_OFFSET_SHIFT) |
11183 +                           16);
11184 +#endif
11185 +               /*
11186 +                * Read the new SEQ IN PTR command, Input Pointer, Input Length
11187 +                * and then jump back to the next command from the
11188 +                * Shared Descriptor.
11189 +                */
11190 +                append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5);
11191 +       }
11192 +
11193 +       /* skip payload */
11194 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
11195 +       /* check icv */
11196 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
11197 +                            FIFOLD_TYPE_LAST2 | authsize);
11198 +
11199 +#ifdef DEBUG
11200 +       print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ",
11201 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
11202 +                      desc_bytes(desc), 1);
11203 +#endif
11204 +}
11205 +EXPORT_SYMBOL(cnstr_shdsc_tls_decap);
11206 +
11207 +/**
11208 + * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
11209 + * @desc: pointer to buffer used for descriptor construction
11210 + * @cdata: pointer to block cipher transform definitions
11211 + *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11212 + * @ivsize: initialization vector size
11213 + * @icvsize: integrity check value (ICV) size (truncated or full)
11214 + * @is_qi: true when called from caam/qi
11215 + */
11216 +void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
11217 +                          unsigned int ivsize, unsigned int icvsize,
11218 +                          const bool is_qi)
11219 +{
11220 +       u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
11221 +           *zero_assoc_jump_cmd2;
11222 +
11223 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
11224 +
11225 +       /* skip key loading if they are loaded due to sharing */
11226 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11227 +                                  JUMP_COND_SHRD);
11228 +       if (cdata->key_inline)
11229 +               append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11230 +                                 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11231 +       else
11232 +               append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11233 +                          KEY_DEST_CLASS_REG);
11234 +       set_jump_tgt_here(desc, key_jump_cmd);
11235 +
11236 +       /* class 1 operation */
11237 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11238 +                        OP_ALG_ENCRYPT);
11239 +
11240 +       if (is_qi) {
11241 +               u32 *wait_load_cmd;
11242 +
11243 +               /* REG3 = assoclen */
11244 +               append_seq_load(desc, 4, LDST_CLASS_DECO |
11245 +                               LDST_SRCDST_WORD_DECO_MATH3 |
11246 +                               (4 << LDST_OFFSET_SHIFT));
11247 +
11248 +               wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11249 +                                           JUMP_COND_CALM | JUMP_COND_NCP |
11250 +                                           JUMP_COND_NOP | JUMP_COND_NIP |
11251 +                                           JUMP_COND_NIFP);
11252 +               set_jump_tgt_here(desc, wait_load_cmd);
11253 +
11254 +               append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
11255 +                                       ivsize);
11256 +       } else {
11257 +               append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
11258 +                               CAAM_CMD_SZ);
11259 +       }
11260 +
11261 +       /* if assoclen + cryptlen is ZERO, skip to ICV write */
11262 +       zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
11263 +                                                JUMP_COND_MATH_Z);
11264 +
11265 +       if (is_qi)
11266 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11267 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11268 +
11269 +       /* if assoclen is ZERO, skip reading the assoc data */
11270 +       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
11271 +       zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
11272 +                                          JUMP_COND_MATH_Z);
11273 +
11274 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11275 +
11276 +       /* skip assoc data */
11277 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11278 +
11279 +       /* cryptlen = seqinlen - assoclen */
11280 +       append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
11281 +
11282 +       /* if cryptlen is ZERO jump to zero-payload commands */
11283 +       zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11284 +                                           JUMP_COND_MATH_Z);
11285 +
11286 +       /* read assoc data */
11287 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11288 +                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11289 +       set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
11290 +
11291 +       append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11292 +
11293 +       /* write encrypted data */
11294 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11295 +
11296 +       /* read payload data */
11297 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11298 +                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11299 +
11300 +       /* jump to ICV writing */
11301 +       if (is_qi)
11302 +               append_jump(desc, JUMP_TEST_ALL | 4);
11303 +       else
11304 +               append_jump(desc, JUMP_TEST_ALL | 2);
11305 +
11306 +       /* zero-payload commands */
11307 +       set_jump_tgt_here(desc, zero_payload_jump_cmd);
11308 +
11309 +       /* read assoc data */
11310 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11311 +                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
11312 +       if (is_qi)
11313 +               /* jump to ICV writing */
11314 +               append_jump(desc, JUMP_TEST_ALL | 2);
11315 +
11316 +       /* There is no input data */
11317 +       set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
11318 +
11319 +       if (is_qi)
11320 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11321 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
11322 +                                    FIFOLD_TYPE_LAST1);
11323 +
11324 +       /* write ICV */
11325 +       append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11326 +                        LDST_SRCDST_BYTE_CONTEXT);
11327 +
11328 +#ifdef DEBUG
11329 +       print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ",
11330 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11331 +#endif
11332 +}
11333 +EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
11334 +
11335 +/**
11336 + * cnstr_shdsc_gcm_decap - gcm decapsulation shared descriptor
11337 + * @desc: pointer to buffer used for descriptor construction
11338 + * @cdata: pointer to block cipher transform definitions
11339 + *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11340 + * @ivsize: initialization vector size
11341 + * @icvsize: integrity check value (ICV) size (truncated or full)
11342 + * @is_qi: true when called from caam/qi
11343 + */
11344 +void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
11345 +                          unsigned int ivsize, unsigned int icvsize,
11346 +                          const bool is_qi)
11347 +{
11348 +       u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
11349 +
11350 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
11351 +
11352 +       /* skip key loading if they are loaded due to sharing */
11353 +       key_jump_cmd = append_jump(desc, JUMP_JSL |
11354 +                                  JUMP_TEST_ALL | JUMP_COND_SHRD);
11355 +       if (cdata->key_inline)
11356 +               append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11357 +                                 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11358 +       else
11359 +               append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11360 +                          KEY_DEST_CLASS_REG);
11361 +       set_jump_tgt_here(desc, key_jump_cmd);
11362 +
11363 +       /* class 1 operation */
11364 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11365 +                        OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11366 +
11367 +       if (is_qi) {
11368 +               u32 *wait_load_cmd;
11369 +
11370 +               /* REG3 = assoclen */
11371 +               append_seq_load(desc, 4, LDST_CLASS_DECO |
11372 +                               LDST_SRCDST_WORD_DECO_MATH3 |
11373 +                               (4 << LDST_OFFSET_SHIFT));
11374 +
11375 +               wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11376 +                                           JUMP_COND_CALM | JUMP_COND_NCP |
11377 +                                           JUMP_COND_NOP | JUMP_COND_NIP |
11378 +                                           JUMP_COND_NIFP);
11379 +               set_jump_tgt_here(desc, wait_load_cmd);
11380 +
11381 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11382 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11383 +       }
11384 +
11385 +       /* if assoclen is ZERO, skip reading the assoc data */
11386 +       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
11387 +       zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
11388 +                                                JUMP_COND_MATH_Z);
11389 +
11390 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11391 +
11392 +       /* skip assoc data */
11393 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11394 +
11395 +       /* read assoc data */
11396 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11397 +                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11398 +
11399 +       set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
11400 +
11401 +       /* cryptlen = seqoutlen - assoclen */
11402 +       append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11403 +
11404 +       /* jump to zero-payload command if cryptlen is zero */
11405 +       zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11406 +                                           JUMP_COND_MATH_Z);
11407 +
11408 +       append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11409 +
11410 +       /* store encrypted data */
11411 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11412 +
11413 +       /* read payload data */
11414 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11415 +                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
11416 +
11417 +       /* zero-payload command */
11418 +       set_jump_tgt_here(desc, zero_payload_jump_cmd);
11419 +
11420 +       /* read ICV */
11421 +       append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11422 +                            FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11423 +
11424 +#ifdef DEBUG
11425 +       print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ",
11426 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11427 +#endif
11428 +}
11429 +EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
11430 +
11431 +/**
11432 + * cnstr_shdsc_rfc4106_encap - IPSec ESP gcm encapsulation shared descriptor
11433 + *                             (non-protocol).
11434 + * @desc: pointer to buffer used for descriptor construction
11435 + * @cdata: pointer to block cipher transform definitions
11436 + *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11437 + * @ivsize: initialization vector size
11438 + * @icvsize: integrity check value (ICV) size (truncated or full)
11439 + * @is_qi: true when called from caam/qi
11440 + */
11441 +void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
11442 +                              unsigned int ivsize, unsigned int icvsize,
11443 +                              const bool is_qi)
11444 +{
11445 +       u32 *key_jump_cmd;
11446 +
11447 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
11448 +
11449 +       /* Skip key loading if it is loaded due to sharing */
11450 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11451 +                                  JUMP_COND_SHRD);
11452 +       if (cdata->key_inline)
11453 +               append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11454 +                                 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11455 +       else
11456 +               append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11457 +                          KEY_DEST_CLASS_REG);
11458 +       set_jump_tgt_here(desc, key_jump_cmd);
11459 +
11460 +       /* Class 1 operation */
11461 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11462 +                        OP_ALG_ENCRYPT);
11463 +
11464 +       if (is_qi) {
11465 +               u32 *wait_load_cmd;
11466 +
11467 +               /* REG3 = assoclen */
11468 +               append_seq_load(desc, 4, LDST_CLASS_DECO |
11469 +                               LDST_SRCDST_WORD_DECO_MATH3 |
11470 +                               (4 << LDST_OFFSET_SHIFT));
11471 +
11472 +               wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11473 +                                           JUMP_COND_CALM | JUMP_COND_NCP |
11474 +                                           JUMP_COND_NOP | JUMP_COND_NIP |
11475 +                                           JUMP_COND_NIFP);
11476 +               set_jump_tgt_here(desc, wait_load_cmd);
11477 +
11478 +               /* Read salt and IV */
11479 +               append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11480 +                                       cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11481 +                                       FIFOLD_TYPE_IV);
11482 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11483 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11484 +       }
11485 +
11486 +       append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
11487 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11488 +
11489 +       /* Read assoc data */
11490 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11491 +                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11492 +
11493 +       /* Skip IV */
11494 +       append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
11495 +
11496 +       /* Will read cryptlen bytes */
11497 +       append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11498 +
11499 +       /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
11500 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
11501 +
11502 +       /* Skip assoc data */
11503 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11504 +
11505 +       /* cryptlen = seqoutlen - assoclen */
11506 +       append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
11507 +
11508 +       /* Write encrypted data */
11509 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11510 +
11511 +       /* Read payload data */
11512 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11513 +                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11514 +
11515 +       /* Write ICV */
11516 +       append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11517 +                        LDST_SRCDST_BYTE_CONTEXT);
11518 +
11519 +#ifdef DEBUG
11520 +       print_hex_dump(KERN_ERR,
11521 +                      "rfc4106 enc shdesc@" __stringify(__LINE__)": ",
11522 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11523 +#endif
11524 +}
11525 +EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
11526 +
11527 +/**
11528 + * cnstr_shdsc_rfc4106_decap - IPSec ESP gcm decapsulation shared descriptor
11529 + *                             (non-protocol).
11530 + * @desc: pointer to buffer used for descriptor construction
11531 + * @cdata: pointer to block cipher transform definitions
11532 + *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11533 + * @ivsize: initialization vector size
11534 + * @icvsize: integrity check value (ICV) size (truncated or full)
11535 + * @is_qi: true when called from caam/qi
11536 + */
11537 +void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
11538 +                              unsigned int ivsize, unsigned int icvsize,
11539 +                              const bool is_qi)
11540 +{
11541 +       u32 *key_jump_cmd;
11542 +
11543 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
11544 +
11545 +       /* Skip key loading if it is loaded due to sharing */
11546 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11547 +                                  JUMP_COND_SHRD);
11548 +       if (cdata->key_inline)
11549 +               append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11550 +                                 cdata->keylen, CLASS_1 |
11551 +                                 KEY_DEST_CLASS_REG);
11552 +       else
11553 +               append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11554 +                          KEY_DEST_CLASS_REG);
11555 +       set_jump_tgt_here(desc, key_jump_cmd);
11556 +
11557 +       /* Class 1 operation */
11558 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11559 +                        OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11560 +
11561 +       if (is_qi) {
11562 +               u32 *wait_load_cmd;
11563 +
11564 +               /* REG3 = assoclen */
11565 +               append_seq_load(desc, 4, LDST_CLASS_DECO |
11566 +                               LDST_SRCDST_WORD_DECO_MATH3 |
11567 +                               (4 << LDST_OFFSET_SHIFT));
11568 +
11569 +               wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11570 +                                           JUMP_COND_CALM | JUMP_COND_NCP |
11571 +                                           JUMP_COND_NOP | JUMP_COND_NIP |
11572 +                                           JUMP_COND_NIFP);
11573 +               set_jump_tgt_here(desc, wait_load_cmd);
11574 +
11575 +               /* Read salt and IV */
11576 +               append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11577 +                                       cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11578 +                                       FIFOLD_TYPE_IV);
11579 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11580 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11581 +       }
11582 +
11583 +       append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
11584 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11585 +
11586 +       /* Read assoc data */
11587 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11588 +                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11589 +
11590 +       /* Skip IV */
11591 +       append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
11592 +
11593 +       /* Will read cryptlen bytes */
11594 +       append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
11595 +
11596 +       /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
11597 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
11598 +
11599 +       /* Skip assoc data */
11600 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11601 +
11602 +       /* Will write cryptlen bytes */
11603 +       append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11604 +
11605 +       /* Store payload data */
11606 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11607 +
11608 +       /* Read encrypted data */
11609 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11610 +                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
11611 +
11612 +       /* Read ICV */
11613 +       append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11614 +                            FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11615 +
11616 +#ifdef DEBUG
11617 +       print_hex_dump(KERN_ERR,
11618 +                      "rfc4106 dec shdesc@" __stringify(__LINE__)": ",
11619 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11620 +#endif
11621 +}
11622 +EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
11623 +
11624 +/**
11625 + * cnstr_shdsc_rfc4543_encap - IPSec ESP gmac encapsulation shared descriptor
11626 + *                             (non-protocol).
11627 + * @desc: pointer to buffer used for descriptor construction
11628 + * @cdata: pointer to block cipher transform definitions
11629 + *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11630 + * @ivsize: initialization vector size
11631 + * @icvsize: integrity check value (ICV) size (truncated or full)
11632 + * @is_qi: true when called from caam/qi
11633 + */
11634 +void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
11635 +                              unsigned int ivsize, unsigned int icvsize,
11636 +                              const bool is_qi)
11637 +{
11638 +       u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
11639 +
11640 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
11641 +
11642 +       /* Skip key loading if it is loaded due to sharing */
11643 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11644 +                                  JUMP_COND_SHRD);
11645 +       if (cdata->key_inline)
11646 +               append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11647 +                                 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11648 +       else
11649 +               append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11650 +                          KEY_DEST_CLASS_REG);
11651 +       set_jump_tgt_here(desc, key_jump_cmd);
11652 +
11653 +       /* Class 1 operation */
11654 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11655 +                        OP_ALG_ENCRYPT);
11656 +
11657 +       if (is_qi) {
11658 +               /* assoclen is not needed, skip it */
11659 +               append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
11660 +
11661 +               /* Read salt and IV */
11662 +               append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11663 +                                       cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11664 +                                       FIFOLD_TYPE_IV);
11665 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11666 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11667 +       }
11668 +
11669 +       /* assoclen + cryptlen = seqinlen */
11670 +       append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
11671 +
11672 +       /*
11673 +        * MOVE_LEN opcode is not available in all SEC HW revisions,
11674 +        * thus need to do some magic, i.e. self-patch the descriptor
11675 +        * buffer.
11676 +        */
11677 +       read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
11678 +                                   (0x6 << MOVE_LEN_SHIFT));
11679 +       write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
11680 +                                    (0x8 << MOVE_LEN_SHIFT));
11681 +
11682 +       /* Will read assoclen + cryptlen bytes */
11683 +       append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11684 +
11685 +       /* Will write assoclen + cryptlen bytes */
11686 +       append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11687 +
11688 +       /* Read and write assoclen + cryptlen bytes */
11689 +       aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
11690 +
11691 +       set_move_tgt_here(desc, read_move_cmd);
11692 +       set_move_tgt_here(desc, write_move_cmd);
11693 +       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
11694 +       /* Move payload data to OFIFO */
11695 +       append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
11696 +
11697 +       /* Write ICV */
11698 +       append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11699 +                        LDST_SRCDST_BYTE_CONTEXT);
11700 +
11701 +#ifdef DEBUG
11702 +       print_hex_dump(KERN_ERR,
11703 +                      "rfc4543 enc shdesc@" __stringify(__LINE__)": ",
11704 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11705 +#endif
11706 +}
11707 +EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
11708 +
11709 +/**
11710 + * cnstr_shdsc_rfc4543_decap - IPSec ESP gmac decapsulation shared descriptor
11711 + *                             (non-protocol).
11712 + * @desc: pointer to buffer used for descriptor construction
11713 + * @cdata: pointer to block cipher transform definitions
11714 + *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11715 + * @ivsize: initialization vector size
11716 + * @icvsize: integrity check value (ICV) size (truncated or full)
11717 + * @is_qi: true when called from caam/qi
11718 + */
11719 +void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
11720 +                              unsigned int ivsize, unsigned int icvsize,
11721 +                              const bool is_qi)
11722 +{
11723 +       u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
11724 +
11725 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
11726 +
11727 +       /* Skip key loading if it is loaded due to sharing */
11728 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11729 +                                  JUMP_COND_SHRD);
11730 +       if (cdata->key_inline)
11731 +               append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11732 +                                 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11733 +       else
11734 +               append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11735 +                          KEY_DEST_CLASS_REG);
11736 +       set_jump_tgt_here(desc, key_jump_cmd);
11737 +
11738 +       /* Class 1 operation */
11739 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11740 +                        OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11741 +
11742 +       if (is_qi) {
11743 +               /* assoclen is not needed, skip it */
11744 +               append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
11745 +
11746 +               /* Read salt and IV */
11747 +               append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11748 +                                       cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11749 +                                       FIFOLD_TYPE_IV);
11750 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11751 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11752 +       }
11753 +
11754 +       /* assoclen + cryptlen = seqoutlen */
11755 +       append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11756 +
11757 +       /*
11758 +        * MOVE_LEN opcode is not available in all SEC HW revisions,
11759 +        * thus need to do some magic, i.e. self-patch the descriptor
11760 +        * buffer.
11761 +        */
11762 +       read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
11763 +                                   (0x6 << MOVE_LEN_SHIFT));
11764 +       write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
11765 +                                    (0x8 << MOVE_LEN_SHIFT));
11766 +
11767 +       /* Will read assoclen + cryptlen bytes */
11768 +       append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11769 +
11770 +       /* Will write assoclen + cryptlen bytes */
11771 +       append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11772 +
11773 +       /* Store payload data */
11774 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11775 +
11776 +       /* In-snoop assoclen + cryptlen data */
11777 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
11778 +                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
11779 +
11780 +       set_move_tgt_here(desc, read_move_cmd);
11781 +       set_move_tgt_here(desc, write_move_cmd);
11782 +       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
11783 +       /* Move payload data to OFIFO */
11784 +       append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
11785 +       append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
11786 +
11787 +       /* Read ICV */
11788 +       append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11789 +                            FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11790 +
11791 +#ifdef DEBUG
11792 +       print_hex_dump(KERN_ERR,
11793 +                      "rfc4543 dec shdesc@" __stringify(__LINE__)": ",
11794 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11795 +#endif
11796 +}
11797 +EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
11798 +
11799 +/*
11800 + * For ablkcipher encrypt and decrypt, read from req->src and
11801 + * write to req->dst
11802 + */
11803 +static inline void ablkcipher_append_src_dst(u32 *desc)
11804 +{
11805 +       append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11806 +       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11807 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
11808 +                            KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11809 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
11810 +}
11811 +
11812 +/**
11813 + * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
11814 + * @desc: pointer to buffer used for descriptor construction
11815 + * @cdata: pointer to block cipher transform definitions
11816 + *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
11817 + *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
11818 + * @ivsize: initialization vector size
11819 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
11820 + * @ctx1_iv_off: IV offset in CONTEXT1 register
11821 + */
11822 +void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
11823 +                                 unsigned int ivsize, const bool is_rfc3686,
11824 +                                 const u32 ctx1_iv_off)
11825 +{
11826 +       u32 *key_jump_cmd;
11827 +
11828 +       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
11829 +       /* Skip if already shared */
11830 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11831 +                                  JUMP_COND_SHRD);
11832 +
11833 +       /* Load class1 key only */
11834 +       append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11835 +                         cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11836 +
11837 +       /* Load nonce into CONTEXT1 reg */
11838 +       if (is_rfc3686) {
11839 +               u8 *nonce = cdata->key_virt + cdata->keylen;
11840 +
11841 +               append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
11842 +                                  LDST_CLASS_IND_CCB |
11843 +                                  LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
11844 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
11845 +                           MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
11846 +                           (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
11847 +       }
11848 +
11849 +       set_jump_tgt_here(desc, key_jump_cmd);
11850 +
11851 +       /* Load iv */
11852 +       append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
11853 +                       LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
11854 +
11855 +       /* Load counter into CONTEXT1 reg */
11856 +       if (is_rfc3686)
11857 +               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
11858 +                                    LDST_SRCDST_BYTE_CONTEXT |
11859 +                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
11860 +                                     LDST_OFFSET_SHIFT));
11861 +
11862 +       /* Load operation */
11863 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11864 +                        OP_ALG_ENCRYPT);
11865 +
11866 +       /* Perform operation */
11867 +       ablkcipher_append_src_dst(desc);
11868 +
11869 +#ifdef DEBUG
11870 +       print_hex_dump(KERN_ERR,
11871 +                      "ablkcipher enc shdesc@" __stringify(__LINE__)": ",
11872 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11873 +#endif
11874 +}
11875 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
11876 +
11877 +/**
11878 + * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor
11879 + * @desc: pointer to buffer used for descriptor construction
11880 + * @cdata: pointer to block cipher transform definitions
11881 + *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
11882 + *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
11883 + * @ivsize: initialization vector size
11884 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
11885 + * @ctx1_iv_off: IV offset in CONTEXT1 register
11886 + */
11887 +void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
11888 +                                 unsigned int ivsize, const bool is_rfc3686,
11889 +                                 const u32 ctx1_iv_off)
11890 +{
11891 +       u32 *key_jump_cmd;
11892 +
11893 +       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
11894 +       /* Skip if already shared */
11895 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11896 +                                  JUMP_COND_SHRD);
11897 +
11898 +       /* Load class1 key only */
11899 +       append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11900 +                         cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11901 +
11902 +       /* Load nonce into CONTEXT1 reg */
11903 +       if (is_rfc3686) {
11904 +               u8 *nonce = cdata->key_virt + cdata->keylen;
11905 +
11906 +               append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
11907 +                                  LDST_CLASS_IND_CCB |
11908 +                                  LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
11909 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
11910 +                           MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
11911 +                           (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
11912 +       }
11913 +
11914 +       set_jump_tgt_here(desc, key_jump_cmd);
11915 +
11916 +       /* load IV */
11917 +       append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
11918 +                       LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
11919 +
11920 +       /* Load counter into CONTEXT1 reg */
11921 +       if (is_rfc3686)
11922 +               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
11923 +                                    LDST_SRCDST_BYTE_CONTEXT |
11924 +                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
11925 +                                     LDST_OFFSET_SHIFT));
11926 +
11927 +       /* Choose operation */
11928 +       if (ctx1_iv_off)
11929 +               append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11930 +                                OP_ALG_DECRYPT);
11931 +       else
11932 +               append_dec_op1(desc, cdata->algtype);
11933 +
11934 +       /* Perform operation */
11935 +       ablkcipher_append_src_dst(desc);
11936 +
11937 +#ifdef DEBUG
11938 +       print_hex_dump(KERN_ERR,
11939 +                      "ablkcipher dec shdesc@" __stringify(__LINE__)": ",
11940 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11941 +#endif
11942 +}
11943 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
11944 +
11945 +/**
11946 + * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
11947 + *                                   with HW-generated initialization vector.
11948 + * @desc: pointer to buffer used for descriptor construction
11949 + * @cdata: pointer to block cipher transform definitions
11950 + *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
11951 + *         with OP_ALG_AAI_CBC.
11952 + * @ivsize: initialization vector size
11953 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
11954 + * @ctx1_iv_off: IV offset in CONTEXT1 register
11955 + */
11956 +void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
11957 +                                    unsigned int ivsize, const bool is_rfc3686,
11958 +                                    const u32 ctx1_iv_off)
11959 +{
11960 +       u32 *key_jump_cmd, geniv;
11961 +
11962 +       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
11963 +       /* Skip if already shared */
11964 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11965 +                                  JUMP_COND_SHRD);
11966 +
11967 +       /* Load class1 key only */
11968 +       append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11969 +                         cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11970 +
11971 +       /* Load Nonce into CONTEXT1 reg */
11972 +       if (is_rfc3686) {
11973 +               u8 *nonce = cdata->key_virt + cdata->keylen;
11974 +
11975 +               append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
11976 +                                  LDST_CLASS_IND_CCB |
11977 +                                  LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
11978 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
11979 +                           MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
11980 +                           (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
11981 +       }
11982 +       set_jump_tgt_here(desc, key_jump_cmd);
11983 +
11984 +       /* Generate IV */
11985 +       geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
11986 +               NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
11987 +               (ivsize << NFIFOENTRY_DLEN_SHIFT);
11988 +       append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
11989 +                           LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
11990 +       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
11991 +       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
11992 +                   MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
11993 +                   (ctx1_iv_off << MOVE_OFFSET_SHIFT));
11994 +       append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
11995 +
11996 +       /* Copy generated IV to memory */
11997 +       append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
11998 +                        LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
11999 +
12000 +       /* Load Counter into CONTEXT1 reg */
12001 +       if (is_rfc3686)
12002 +               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
12003 +                                    LDST_SRCDST_BYTE_CONTEXT |
12004 +                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
12005 +                                     LDST_OFFSET_SHIFT));
12006 +
12007 +       if (ctx1_iv_off)
12008 +               append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
12009 +                           (1 << JUMP_OFFSET_SHIFT));
12010 +
12011 +       /* Load operation */
12012 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
12013 +                        OP_ALG_ENCRYPT);
12014 +
12015 +       /* Perform operation */
12016 +       ablkcipher_append_src_dst(desc);
12017 +
12018 +#ifdef DEBUG
12019 +       print_hex_dump(KERN_ERR,
12020 +                      "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
12021 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12022 +#endif
12023 +}
12024 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
12025 +
12026 +/**
12027 + * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
12028 + *                                    descriptor
12029 + * @desc: pointer to buffer used for descriptor construction
12030 + * @cdata: pointer to block cipher transform definitions
12031 + *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
12032 + */
12033 +void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
12034 +{
12035 +       __be64 sector_size = cpu_to_be64(512);
12036 +       u32 *key_jump_cmd;
12037 +
12038 +       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12039 +       /* Skip if already shared */
12040 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12041 +                                  JUMP_COND_SHRD);
12042 +
12043 +       /* Load class1 keys only */
12044 +       append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12045 +                         cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12046 +
12047 +       /* Load sector size with index 40 bytes (0x28) */
12048 +       append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
12049 +                          LDST_SRCDST_BYTE_CONTEXT |
12050 +                          (0x28 << LDST_OFFSET_SHIFT));
12051 +
12052 +       set_jump_tgt_here(desc, key_jump_cmd);
12053 +
12054 +       /*
12055 +        * create sequence for loading the sector index
12056 +        * Upper 8B of IV - will be used as sector index
12057 +        * Lower 8B of IV - will be discarded
12058 +        */
12059 +       append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
12060 +                       (0x20 << LDST_OFFSET_SHIFT));
12061 +       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
12062 +
12063 +       /* Load operation */
12064 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
12065 +                        OP_ALG_ENCRYPT);
12066 +
12067 +       /* Perform operation */
12068 +       ablkcipher_append_src_dst(desc);
12069 +
12070 +#ifdef DEBUG
12071 +       print_hex_dump(KERN_ERR,
12072 +                      "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
12073 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12074 +#endif
12075 +}
12076 +EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
12077 +
12078 +/**
12079 + * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared
12080 + *                                    descriptor
12081 + * @desc: pointer to buffer used for descriptor construction
12082 + * @cdata: pointer to block cipher transform definitions
12083 + *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
12084 + */
12085 +void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
12086 +{
12087 +       __be64 sector_size = cpu_to_be64(512);
12088 +       u32 *key_jump_cmd;
12089 +
12090 +       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12091 +       /* Skip if already shared */
12092 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12093 +                                  JUMP_COND_SHRD);
12094 +
12095 +       /* Load class1 key only */
12096 +       append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12097 +                         cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12098 +
12099 +       /* Load sector size with index 40 bytes (0x28) */
12100 +       append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
12101 +                          LDST_SRCDST_BYTE_CONTEXT |
12102 +                          (0x28 << LDST_OFFSET_SHIFT));
12103 +
12104 +       set_jump_tgt_here(desc, key_jump_cmd);
12105 +
12106 +       /*
12107 +        * create sequence for loading the sector index
12108 +        * Upper 8B of IV - will be used as sector index
12109 +        * Lower 8B of IV - will be discarded
12110 +        */
12111 +       append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
12112 +                       (0x20 << LDST_OFFSET_SHIFT));
12113 +       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
12114 +
12115 +       /* Load operation */
12116 +       append_dec_op1(desc, cdata->algtype);
12117 +
12118 +       /* Perform operation */
12119 +       ablkcipher_append_src_dst(desc);
12120 +
12121 +#ifdef DEBUG
12122 +       print_hex_dump(KERN_ERR,
12123 +                      "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
12124 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12125 +#endif
12126 +}
12127 +EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap);
12128 +
12129 +MODULE_LICENSE("GPL");
12130 +MODULE_DESCRIPTION("FSL CAAM descriptor support");
12131 +MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
12132 --- /dev/null
12133 +++ b/drivers/crypto/caam/caamalg_desc.h
12134 @@ -0,0 +1,127 @@
12135 +/*
12136 + * Shared descriptors for aead, ablkcipher algorithms
12137 + *
12138 + * Copyright 2016 NXP
12139 + */
12140 +
12141 +#ifndef _CAAMALG_DESC_H_
12142 +#define _CAAMALG_DESC_H_
12143 +
12144 +/* length of descriptors text */
12145 +#define DESC_AEAD_BASE                 (4 * CAAM_CMD_SZ)
12146 +#define DESC_AEAD_ENC_LEN              (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
12147 +#define DESC_AEAD_DEC_LEN              (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
12148 +#define DESC_AEAD_GIVENC_LEN           (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
12149 +#define DESC_QI_AEAD_ENC_LEN           (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
12150 +#define DESC_QI_AEAD_DEC_LEN           (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
12151 +#define DESC_QI_AEAD_GIVENC_LEN                (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
12152 +
12153 +#define DESC_TLS_BASE                  (4 * CAAM_CMD_SZ)
12154 +#define DESC_TLS10_ENC_LEN             (DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
12155 +
12156 +/* Note: Nonce is counted in cdata.keylen */
12157 +#define DESC_AEAD_CTR_RFC3686_LEN      (4 * CAAM_CMD_SZ)
12158 +
12159 +#define DESC_AEAD_NULL_BASE            (3 * CAAM_CMD_SZ)
12160 +#define DESC_AEAD_NULL_ENC_LEN         (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
12161 +#define DESC_AEAD_NULL_DEC_LEN         (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
12162 +
12163 +#define DESC_GCM_BASE                  (3 * CAAM_CMD_SZ)
12164 +#define DESC_GCM_ENC_LEN               (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
12165 +#define DESC_GCM_DEC_LEN               (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
12166 +#define DESC_QI_GCM_ENC_LEN            (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
12167 +#define DESC_QI_GCM_DEC_LEN            (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
12168 +
12169 +#define DESC_RFC4106_BASE              (3 * CAAM_CMD_SZ)
12170 +#define DESC_RFC4106_ENC_LEN           (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
12171 +#define DESC_RFC4106_DEC_LEN           (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
12172 +#define DESC_QI_RFC4106_ENC_LEN                (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
12173 +#define DESC_QI_RFC4106_DEC_LEN                (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
12174 +
12175 +#define DESC_RFC4543_BASE              (3 * CAAM_CMD_SZ)
12176 +#define DESC_RFC4543_ENC_LEN           (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
12177 +#define DESC_RFC4543_DEC_LEN           (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
12178 +#define DESC_QI_RFC4543_ENC_LEN                (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
12179 +#define DESC_QI_RFC4543_DEC_LEN                (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
12180 +
12181 +#define DESC_ABLKCIPHER_BASE           (3 * CAAM_CMD_SZ)
12182 +#define DESC_ABLKCIPHER_ENC_LEN                (DESC_ABLKCIPHER_BASE + \
12183 +                                        20 * CAAM_CMD_SZ)
12184 +#define DESC_ABLKCIPHER_DEC_LEN                (DESC_ABLKCIPHER_BASE + \
12185 +                                        15 * CAAM_CMD_SZ)
12186 +
12187 +void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
12188 +                                unsigned int icvsize);
12189 +
12190 +void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
12191 +                                unsigned int icvsize);
12192 +
12193 +void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
12194 +                           struct alginfo *adata, unsigned int ivsize,
12195 +                           unsigned int icvsize, const bool is_rfc3686,
12196 +                           u32 *nonce, const u32 ctx1_iv_off,
12197 +                           const bool is_qi);
12198 +
12199 +void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
12200 +                           struct alginfo *adata, unsigned int ivsize,
12201 +                           unsigned int icvsize, const bool geniv,
12202 +                           const bool is_rfc3686, u32 *nonce,
12203 +                           const u32 ctx1_iv_off, const bool is_qi);
12204 +
12205 +void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
12206 +                              struct alginfo *adata, unsigned int ivsize,
12207 +                              unsigned int icvsize, const bool is_rfc3686,
12208 +                              u32 *nonce, const u32 ctx1_iv_off,
12209 +                              const bool is_qi);
12210 +
12211 +void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
12212 +                          struct alginfo *adata, unsigned int assoclen,
12213 +                          unsigned int ivsize, unsigned int authsize,
12214 +                          unsigned int blocksize);
12215 +
12216 +void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
12217 +                          struct alginfo *adata, unsigned int assoclen,
12218 +                          unsigned int ivsize, unsigned int authsize,
12219 +                          unsigned int blocksize);
12220 +
12221 +void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
12222 +                          unsigned int ivsize, unsigned int icvsize,
12223 +                          const bool is_qi);
12224 +
12225 +void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
12226 +                          unsigned int ivsize, unsigned int icvsize,
12227 +                          const bool is_qi);
12228 +
12229 +void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
12230 +                              unsigned int ivsize, unsigned int icvsize,
12231 +                              const bool is_qi);
12232 +
12233 +void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
12234 +                              unsigned int ivsize, unsigned int icvsize,
12235 +                              const bool is_qi);
12236 +
12237 +void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
12238 +                              unsigned int ivsize, unsigned int icvsize,
12239 +                              const bool is_qi);
12240 +
12241 +void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
12242 +                              unsigned int ivsize, unsigned int icvsize,
12243 +                              const bool is_qi);
12244 +
12245 +void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
12246 +                                 unsigned int ivsize, const bool is_rfc3686,
12247 +                                 const u32 ctx1_iv_off);
12248 +
12249 +void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
12250 +                                 unsigned int ivsize, const bool is_rfc3686,
12251 +                                 const u32 ctx1_iv_off);
12252 +
12253 +void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
12254 +                                    unsigned int ivsize, const bool is_rfc3686,
12255 +                                    const u32 ctx1_iv_off);
12256 +
12257 +void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
12258 +
12259 +void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
12260 +
12261 +#endif /* _CAAMALG_DESC_H_ */
12262 --- /dev/null
12263 +++ b/drivers/crypto/caam/caamalg_qi.c
12264 @@ -0,0 +1,2877 @@
12265 +/*
12266 + * Freescale FSL CAAM support for crypto API over QI backend.
12267 + * Based on caamalg.c
12268 + *
12269 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
12270 + * Copyright 2016-2017 NXP
12271 + */
12272 +
12273 +#include "compat.h"
12274 +#include "ctrl.h"
12275 +#include "regs.h"
12276 +#include "intern.h"
12277 +#include "desc_constr.h"
12278 +#include "error.h"
12279 +#include "sg_sw_qm.h"
12280 +#include "key_gen.h"
12281 +#include "qi.h"
12282 +#include "jr.h"
12283 +#include "caamalg_desc.h"
12284 +
12285 +/*
12286 + * crypto alg
12287 + */
12288 +#define CAAM_CRA_PRIORITY              2000
12289 +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
12290 +#define CAAM_MAX_KEY_SIZE              (AES_MAX_KEY_SIZE + \
12291 +                                        SHA512_DIGEST_SIZE * 2)
12292 +
12293 +#define DESC_MAX_USED_BYTES            (DESC_QI_AEAD_GIVENC_LEN + \
12294 +                                        CAAM_MAX_KEY_SIZE)
12295 +#define DESC_MAX_USED_LEN              (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
12296 +
12297 +struct caam_alg_entry {
12298 +       int class1_alg_type;
12299 +       int class2_alg_type;
12300 +       bool rfc3686;
12301 +       bool geniv;
12302 +};
12303 +
12304 +struct caam_aead_alg {
12305 +       struct aead_alg aead;
12306 +       struct caam_alg_entry caam;
12307 +       bool registered;
12308 +};
12309 +
12310 +/*
12311 + * per-session context
12312 + */
12313 +struct caam_ctx {
12314 +       struct device *jrdev;
12315 +       u32 sh_desc_enc[DESC_MAX_USED_LEN];
12316 +       u32 sh_desc_dec[DESC_MAX_USED_LEN];
12317 +       u32 sh_desc_givenc[DESC_MAX_USED_LEN];
12318 +       u8 key[CAAM_MAX_KEY_SIZE];
12319 +       dma_addr_t key_dma;
12320 +       struct alginfo adata;
12321 +       struct alginfo cdata;
12322 +       unsigned int authsize;
12323 +       struct device *qidev;
12324 +       spinlock_t lock;        /* Protects multiple init of driver context */
12325 +       struct caam_drv_ctx *drv_ctx[NUM_OP];
12326 +};
12327 +
12328 +static int aead_set_sh_desc(struct crypto_aead *aead)
12329 +{
12330 +       struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
12331 +                                                typeof(*alg), aead);
12332 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
12333 +       unsigned int ivsize = crypto_aead_ivsize(aead);
12334 +       u32 ctx1_iv_off = 0;
12335 +       u32 *nonce = NULL;
12336 +       unsigned int data_len[2];
12337 +       u32 inl_mask;
12338 +       const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
12339 +                              OP_ALG_AAI_CTR_MOD128);
12340 +       const bool is_rfc3686 = alg->caam.rfc3686;
12341 +
12342 +       if (!ctx->cdata.keylen || !ctx->authsize)
12343 +               return 0;
12344 +
12345 +       /*
12346 +        * AES-CTR needs to load IV in CONTEXT1 reg
12347 +        * at an offset of 128bits (16bytes)
12348 +        * CONTEXT1[255:128] = IV
12349 +        */
12350 +       if (ctr_mode)
12351 +               ctx1_iv_off = 16;
12352 +
12353 +       /*
12354 +        * RFC3686 specific:
12355 +        *      CONTEXT1[255:128] = {NONCE, IV, COUNTER}
12356 +        */
12357 +       if (is_rfc3686) {
12358 +               ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
12359 +               nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
12360 +                               ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
12361 +       }
12362 +
12363 +       data_len[0] = ctx->adata.keylen_pad;
12364 +       data_len[1] = ctx->cdata.keylen;
12365 +
12366 +       if (alg->caam.geniv)
12367 +               goto skip_enc;
12368 +
12369 +       /* aead_encrypt shared descriptor */
12370 +       if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
12371 +                             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12372 +                             DESC_JOB_IO_LEN, data_len, &inl_mask,
12373 +                             ARRAY_SIZE(data_len)) < 0)
12374 +               return -EINVAL;
12375 +
12376 +       if (inl_mask & 1)
12377 +               ctx->adata.key_virt = ctx->key;
12378 +       else
12379 +               ctx->adata.key_dma = ctx->key_dma;
12380 +
12381 +       if (inl_mask & 2)
12382 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12383 +       else
12384 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12385 +
12386 +       ctx->adata.key_inline = !!(inl_mask & 1);
12387 +       ctx->cdata.key_inline = !!(inl_mask & 2);
12388 +
12389 +       cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12390 +                              ivsize, ctx->authsize, is_rfc3686, nonce,
12391 +                              ctx1_iv_off, true);
12392 +
12393 +skip_enc:
12394 +       /* aead_decrypt shared descriptor */
12395 +       if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
12396 +                             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12397 +                             DESC_JOB_IO_LEN, data_len, &inl_mask,
12398 +                             ARRAY_SIZE(data_len)) < 0)
12399 +               return -EINVAL;
12400 +
12401 +       if (inl_mask & 1)
12402 +               ctx->adata.key_virt = ctx->key;
12403 +       else
12404 +               ctx->adata.key_dma = ctx->key_dma;
12405 +
12406 +       if (inl_mask & 2)
12407 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12408 +       else
12409 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12410 +
12411 +       ctx->adata.key_inline = !!(inl_mask & 1);
12412 +       ctx->cdata.key_inline = !!(inl_mask & 2);
12413 +
12414 +       cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
12415 +                              ivsize, ctx->authsize, alg->caam.geniv,
12416 +                              is_rfc3686, nonce, ctx1_iv_off, true);
12417 +
12418 +       if (!alg->caam.geniv)
12419 +               goto skip_givenc;
12420 +
12421 +       /* aead_givencrypt shared descriptor */
12422 +       if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
12423 +                             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12424 +                             DESC_JOB_IO_LEN, data_len, &inl_mask,
12425 +                             ARRAY_SIZE(data_len)) < 0)
12426 +               return -EINVAL;
12427 +
12428 +       if (inl_mask & 1)
12429 +               ctx->adata.key_virt = ctx->key;
12430 +       else
12431 +               ctx->adata.key_dma = ctx->key_dma;
12432 +
12433 +       if (inl_mask & 2)
12434 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12435 +       else
12436 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12437 +
12438 +       ctx->adata.key_inline = !!(inl_mask & 1);
12439 +       ctx->cdata.key_inline = !!(inl_mask & 2);
12440 +
12441 +       cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12442 +                                 ivsize, ctx->authsize, is_rfc3686, nonce,
12443 +                                 ctx1_iv_off, true);
12444 +
12445 +skip_givenc:
12446 +       return 0;
12447 +}
12448 +
12449 +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
12450 +{
12451 +       struct caam_ctx *ctx = crypto_aead_ctx(authenc);
12452 +
12453 +       ctx->authsize = authsize;
12454 +       aead_set_sh_desc(authenc);
12455 +
12456 +       return 0;
12457 +}
12458 +
12459 +static int aead_setkey(struct crypto_aead *aead, const u8 *key,
12460 +                      unsigned int keylen)
12461 +{
12462 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
12463 +       struct device *jrdev = ctx->jrdev;
12464 +       struct crypto_authenc_keys keys;
12465 +       int ret = 0;
12466 +
12467 +       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
12468 +               goto badkey;
12469 +
12470 +#ifdef DEBUG
12471 +       dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
12472 +               keys.authkeylen + keys.enckeylen, keys.enckeylen,
12473 +               keys.authkeylen);
12474 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12475 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12476 +#endif
12477 +
12478 +       ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
12479 +                           keys.authkeylen, CAAM_MAX_KEY_SIZE -
12480 +                           keys.enckeylen);
12481 +       if (ret)
12482 +               goto badkey;
12483 +
12484 +       /* postpend encryption key to auth split key */
12485 +       memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
12486 +       dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
12487 +                                  keys.enckeylen, DMA_TO_DEVICE);
12488 +#ifdef DEBUG
12489 +       print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
12490 +                      DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
12491 +                      ctx->adata.keylen_pad + keys.enckeylen, 1);
12492 +#endif
12493 +
12494 +       ctx->cdata.keylen = keys.enckeylen;
12495 +
12496 +       ret = aead_set_sh_desc(aead);
12497 +       if (ret)
12498 +               goto badkey;
12499 +
12500 +       /* Now update the driver contexts with the new shared descriptor */
12501 +       if (ctx->drv_ctx[ENCRYPT]) {
12502 +               ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12503 +                                         ctx->sh_desc_enc);
12504 +               if (ret) {
12505 +                       dev_err(jrdev, "driver enc context update failed\n");
12506 +                       goto badkey;
12507 +               }
12508 +       }
12509 +
12510 +       if (ctx->drv_ctx[DECRYPT]) {
12511 +               ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12512 +                                         ctx->sh_desc_dec);
12513 +               if (ret) {
12514 +                       dev_err(jrdev, "driver dec context update failed\n");
12515 +                       goto badkey;
12516 +               }
12517 +       }
12518 +
12519 +       return ret;
12520 +badkey:
12521 +       crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
12522 +       return -EINVAL;
12523 +}
12524 +
12525 +static int tls_set_sh_desc(struct crypto_aead *tls)
12526 +{
12527 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
12528 +       unsigned int ivsize = crypto_aead_ivsize(tls);
12529 +       unsigned int blocksize = crypto_aead_blocksize(tls);
12530 +       unsigned int assoclen = 13; /* always 13 bytes for TLS */
12531 +       unsigned int data_len[2];
12532 +       u32 inl_mask;
12533 +
12534 +       if (!ctx->cdata.keylen || !ctx->authsize)
12535 +               return 0;
12536 +
12537 +       /*
12538 +        * TLS 1.0 encrypt shared descriptor
12539 +        * Job Descriptor and Shared Descriptor
12540 +        * must fit into the 64-word Descriptor h/w Buffer
12541 +        */
12542 +       data_len[0] = ctx->adata.keylen_pad;
12543 +       data_len[1] = ctx->cdata.keylen;
12544 +
12545 +       if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
12546 +                             &inl_mask, ARRAY_SIZE(data_len)) < 0)
12547 +               return -EINVAL;
12548 +
12549 +       if (inl_mask & 1)
12550 +               ctx->adata.key_virt = ctx->key;
12551 +       else
12552 +               ctx->adata.key_dma = ctx->key_dma;
12553 +
12554 +       if (inl_mask & 2)
12555 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12556 +       else
12557 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12558 +
12559 +       ctx->adata.key_inline = !!(inl_mask & 1);
12560 +       ctx->cdata.key_inline = !!(inl_mask & 2);
12561 +
12562 +       cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12563 +                             assoclen, ivsize, ctx->authsize, blocksize);
12564 +
12565 +       /*
12566 +        * TLS 1.0 decrypt shared descriptor
12567 +        * Keys do not fit inline, regardless of algorithms used
12568 +        */
12569 +       ctx->adata.key_dma = ctx->key_dma;
12570 +       ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12571 +
12572 +       cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
12573 +                             assoclen, ivsize, ctx->authsize, blocksize);
12574 +
12575 +       return 0;
12576 +}
12577 +
12578 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
12579 +{
12580 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
12581 +
12582 +       ctx->authsize = authsize;
12583 +       tls_set_sh_desc(tls);
12584 +
12585 +       return 0;
12586 +}
12587 +
12588 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
12589 +                     unsigned int keylen)
12590 +{
12591 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
12592 +       struct device *jrdev = ctx->jrdev;
12593 +       struct crypto_authenc_keys keys;
12594 +       int ret = 0;
12595 +
12596 +       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
12597 +               goto badkey;
12598 +
12599 +#ifdef DEBUG
12600 +       dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
12601 +               keys.authkeylen + keys.enckeylen, keys.enckeylen,
12602 +               keys.authkeylen);
12603 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12604 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12605 +#endif
12606 +
12607 +       ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
12608 +                           keys.authkeylen, CAAM_MAX_KEY_SIZE -
12609 +                           keys.enckeylen);
12610 +       if (ret)
12611 +               goto badkey;
12612 +
12613 +       /* postpend encryption key to auth split key */
12614 +       memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
12615 +       dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
12616 +                                  keys.enckeylen, DMA_TO_DEVICE);
12617 +
12618 +#ifdef DEBUG
12619 +       dev_err(jrdev, "split keylen %d split keylen padded %d\n",
12620 +               ctx->adata.keylen, ctx->adata.keylen_pad);
12621 +       print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
12622 +                      DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
12623 +                      ctx->adata.keylen_pad + keys.enckeylen, 1);
12624 +#endif
12625 +
12626 +       ctx->cdata.keylen = keys.enckeylen;
12627 +
12628 +       ret = tls_set_sh_desc(tls);
12629 +       if (ret)
12630 +               goto badkey;
12631 +
12632 +       /* Now update the driver contexts with the new shared descriptor */
12633 +       if (ctx->drv_ctx[ENCRYPT]) {
12634 +               ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12635 +                                         ctx->sh_desc_enc);
12636 +               if (ret) {
12637 +                       dev_err(jrdev, "driver enc context update failed\n");
12638 +                       goto badkey;
12639 +               }
12640 +       }
12641 +
12642 +       if (ctx->drv_ctx[DECRYPT]) {
12643 +               ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12644 +                                         ctx->sh_desc_dec);
12645 +               if (ret) {
12646 +                       dev_err(jrdev, "driver dec context update failed\n");
12647 +                       goto badkey;
12648 +               }
12649 +       }
12650 +
12651 +       return ret;
12652 +badkey:
12653 +       crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
12654 +       return -EINVAL;
12655 +}
12656 +
12657 +static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
12658 +                            const u8 *key, unsigned int keylen)
12659 +{
12660 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
12661 +       struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
12662 +       const char *alg_name = crypto_tfm_alg_name(tfm);
12663 +       struct device *jrdev = ctx->jrdev;
12664 +       unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
12665 +       u32 ctx1_iv_off = 0;
12666 +       const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
12667 +                              OP_ALG_AAI_CTR_MOD128);
12668 +       const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
12669 +       int ret = 0;
12670 +
12671 +       memcpy(ctx->key, key, keylen);
12672 +#ifdef DEBUG
12673 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12674 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12675 +#endif
12676 +       /*
12677 +        * AES-CTR needs to load IV in CONTEXT1 reg
12678 +        * at an offset of 128bits (16bytes)
12679 +        * CONTEXT1[255:128] = IV
12680 +        */
12681 +       if (ctr_mode)
12682 +               ctx1_iv_off = 16;
12683 +
12684 +       /*
12685 +        * RFC3686 specific:
12686 +        *      | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
12687 +        *      | *key = {KEY, NONCE}
12688 +        */
12689 +       if (is_rfc3686) {
12690 +               ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
12691 +               keylen -= CTR_RFC3686_NONCE_SIZE;
12692 +       }
12693 +
12694 +       dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
12695 +       ctx->cdata.keylen = keylen;
12696 +       ctx->cdata.key_virt = ctx->key;
12697 +       ctx->cdata.key_inline = true;
12698 +
12699 +       /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
12700 +       cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
12701 +                                    is_rfc3686, ctx1_iv_off);
12702 +       cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
12703 +                                    is_rfc3686, ctx1_iv_off);
12704 +       cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
12705 +                                       ivsize, is_rfc3686, ctx1_iv_off);
12706 +
12707 +       /* Now update the driver contexts with the new shared descriptor */
12708 +       if (ctx->drv_ctx[ENCRYPT]) {
12709 +               ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12710 +                                         ctx->sh_desc_enc);
12711 +               if (ret) {
12712 +                       dev_err(jrdev, "driver enc context update failed\n");
12713 +                       goto badkey;
12714 +               }
12715 +       }
12716 +
12717 +       if (ctx->drv_ctx[DECRYPT]) {
12718 +               ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12719 +                                         ctx->sh_desc_dec);
12720 +               if (ret) {
12721 +                       dev_err(jrdev, "driver dec context update failed\n");
12722 +                       goto badkey;
12723 +               }
12724 +       }
12725 +
12726 +       if (ctx->drv_ctx[GIVENCRYPT]) {
12727 +               ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
12728 +                                         ctx->sh_desc_givenc);
12729 +               if (ret) {
12730 +                       dev_err(jrdev, "driver givenc context update failed\n");
12731 +                       goto badkey;
12732 +               }
12733 +       }
12734 +
12735 +       return ret;
12736 +badkey:
12737 +       crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
12738 +       return -EINVAL;
12739 +}
12740 +
12741 +static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
12742 +                                const u8 *key, unsigned int keylen)
12743 +{
12744 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
12745 +       struct device *jrdev = ctx->jrdev;
12746 +       int ret = 0;
12747 +
12748 +       if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
12749 +               crypto_ablkcipher_set_flags(ablkcipher,
12750 +                                           CRYPTO_TFM_RES_BAD_KEY_LEN);
12751 +               dev_err(jrdev, "key size mismatch\n");
12752 +               return -EINVAL;
12753 +       }
12754 +
12755 +       memcpy(ctx->key, key, keylen);
12756 +       dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
12757 +       ctx->cdata.keylen = keylen;
12758 +       ctx->cdata.key_virt = ctx->key;
12759 +       ctx->cdata.key_inline = true;
12760 +
12761 +       /* xts ablkcipher encrypt, decrypt shared descriptors */
12762 +       cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
12763 +       cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
12764 +
12765 +       /* Now update the driver contexts with the new shared descriptor */
12766 +       if (ctx->drv_ctx[ENCRYPT]) {
12767 +               ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12768 +                                         ctx->sh_desc_enc);
12769 +               if (ret) {
12770 +                       dev_err(jrdev, "driver enc context update failed\n");
12771 +                       goto badkey;
12772 +               }
12773 +       }
12774 +
12775 +       if (ctx->drv_ctx[DECRYPT]) {
12776 +               ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12777 +                                         ctx->sh_desc_dec);
12778 +               if (ret) {
12779 +                       dev_err(jrdev, "driver dec context update failed\n");
12780 +                       goto badkey;
12781 +               }
12782 +       }
12783 +
12784 +       return ret;
12785 +badkey:
12786 +       crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
12787 +       return 0;
12788 +}
12789 +
12790 +/*
12791 + * aead_edesc - s/w-extended aead descriptor
12792 + * @src_nents: number of segments in input scatterlist
12793 + * @dst_nents: number of segments in output scatterlist
12794 + * @iv_dma: dma address of iv for checking continuity and link table
12795 + * @qm_sg_bytes: length of dma mapped h/w link table
12796 + * @qm_sg_dma: bus physical mapped address of h/w link table
12797 + * @assoclen: associated data length, in CAAM endianness
12798 + * @assoclen_dma: bus physical mapped address of req->assoclen
12799 + * @drv_req: driver-specific request structure
12800 + * @sgt: the h/w link table
12801 + */
12802 +struct aead_edesc {
12803 +       int src_nents;
12804 +       int dst_nents;
12805 +       dma_addr_t iv_dma;
12806 +       int qm_sg_bytes;
12807 +       dma_addr_t qm_sg_dma;
12808 +       unsigned int assoclen;
12809 +       dma_addr_t assoclen_dma;
12810 +       struct caam_drv_req drv_req;
12811 +#define CAAM_QI_MAX_AEAD_SG                                            \
12812 +       ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) /   \
12813 +        sizeof(struct qm_sg_entry))
12814 +       struct qm_sg_entry sgt[0];
12815 +};
12816 +
12817 +/*
12818 + * tls_edesc - s/w-extended tls descriptor
12819 + * @src_nents: number of segments in input scatterlist
12820 + * @dst_nents: number of segments in output scatterlist
12821 + * @iv_dma: dma address of iv for checking continuity and link table
12822 + * @qm_sg_bytes: length of dma mapped h/w link table
12823 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
12824 + * @qm_sg_dma: bus physical mapped address of h/w link table
12825 + * @drv_req: driver-specific request structure
12826 + * @sgt: the h/w link table
12827 + */
12828 +struct tls_edesc {
12829 +       int src_nents;
12830 +       int dst_nents;
12831 +       dma_addr_t iv_dma;
12832 +       int qm_sg_bytes;
12833 +       dma_addr_t qm_sg_dma;
12834 +       struct scatterlist tmp[2];
12835 +       struct scatterlist *dst;
12836 +       struct caam_drv_req drv_req;
12837 +       struct qm_sg_entry sgt[0];
12838 +};
12839 +
12840 +/*
12841 + * ablkcipher_edesc - s/w-extended ablkcipher descriptor
12842 + * @src_nents: number of segments in input scatterlist
12843 + * @dst_nents: number of segments in output scatterlist
12844 + * @iv_dma: dma address of iv for checking continuity and link table
12845 + * @qm_sg_bytes: length of dma mapped h/w link table
12846 + * @qm_sg_dma: bus physical mapped address of h/w link table
12847 + * @drv_req: driver-specific request structure
12848 + * @sgt: the h/w link table
12849 + */
12850 +struct ablkcipher_edesc {
12851 +       int src_nents;
12852 +       int dst_nents;
12853 +       dma_addr_t iv_dma;
12854 +       int qm_sg_bytes;
12855 +       dma_addr_t qm_sg_dma;
12856 +       struct caam_drv_req drv_req;
12857 +#define CAAM_QI_MAX_ABLKCIPHER_SG                                          \
12858 +       ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
12859 +        sizeof(struct qm_sg_entry))
12860 +       struct qm_sg_entry sgt[0];
12861 +};
12862 +
12863 +static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
12864 +                                       enum optype type)
12865 +{
12866 +       /*
12867 +        * This function is called on the fast path with values of 'type'
12868 +        * known at compile time. Invalid arguments are not expected and
12869 +        * thus no checks are made.
12870 +        */
12871 +       struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
12872 +       u32 *desc;
12873 +
12874 +       if (unlikely(!drv_ctx)) {
12875 +               spin_lock(&ctx->lock);
12876 +
12877 +               /* Read again to check if some other core init drv_ctx */
12878 +               drv_ctx = ctx->drv_ctx[type];
12879 +               if (!drv_ctx) {
12880 +                       int cpu;
12881 +
12882 +                       if (type == ENCRYPT)
12883 +                               desc = ctx->sh_desc_enc;
12884 +                       else if (type == DECRYPT)
12885 +                               desc = ctx->sh_desc_dec;
12886 +                       else /* (type == GIVENCRYPT) */
12887 +                               desc = ctx->sh_desc_givenc;
12888 +
12889 +                       cpu = smp_processor_id();
12890 +                       drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
12891 +                       if (likely(!IS_ERR_OR_NULL(drv_ctx)))
12892 +                               drv_ctx->op_type = type;
12893 +
12894 +                       ctx->drv_ctx[type] = drv_ctx;
12895 +               }
12896 +
12897 +               spin_unlock(&ctx->lock);
12898 +       }
12899 +
12900 +       return drv_ctx;
12901 +}
12902 +
12903 +static void caam_unmap(struct device *dev, struct scatterlist *src,
12904 +                      struct scatterlist *dst, int src_nents,
12905 +                      int dst_nents, dma_addr_t iv_dma, int ivsize,
12906 +                      enum optype op_type, dma_addr_t qm_sg_dma,
12907 +                      int qm_sg_bytes)
12908 +{
12909 +       if (dst != src) {
12910 +               if (src_nents)
12911 +                       dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
12912 +               dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
12913 +       } else {
12914 +               dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
12915 +       }
12916 +
12917 +       if (iv_dma)
12918 +               dma_unmap_single(dev, iv_dma, ivsize,
12919 +                                op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
12920 +                                                        DMA_TO_DEVICE);
12921 +       if (qm_sg_bytes)
12922 +               dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
12923 +}
12924 +
12925 +static void aead_unmap(struct device *dev,
12926 +                      struct aead_edesc *edesc,
12927 +                      struct aead_request *req)
12928 +{
12929 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
12930 +       int ivsize = crypto_aead_ivsize(aead);
12931 +
12932 +       caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
12933 +                  edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
12934 +                  edesc->qm_sg_dma, edesc->qm_sg_bytes);
12935 +       dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
12936 +}
12937 +
12938 +static void tls_unmap(struct device *dev,
12939 +                     struct tls_edesc *edesc,
12940 +                     struct aead_request *req)
12941 +{
12942 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
12943 +       int ivsize = crypto_aead_ivsize(aead);
12944 +
12945 +       caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
12946 +                  edesc->dst_nents, edesc->iv_dma, ivsize,
12947 +                  edesc->drv_req.drv_ctx->op_type, edesc->qm_sg_dma,
12948 +                  edesc->qm_sg_bytes);
12949 +}
12950 +
12951 +static void ablkcipher_unmap(struct device *dev,
12952 +                            struct ablkcipher_edesc *edesc,
12953 +                            struct ablkcipher_request *req)
12954 +{
12955 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
12956 +       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
12957 +
12958 +       caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
12959 +                  edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
12960 +                  edesc->qm_sg_dma, edesc->qm_sg_bytes);
12961 +}
12962 +
12963 +static void aead_done(struct caam_drv_req *drv_req, u32 status)
12964 +{
12965 +       struct device *qidev;
12966 +       struct aead_edesc *edesc;
12967 +       struct aead_request *aead_req = drv_req->app_ctx;
12968 +       struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
12969 +       struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
12970 +       int ecode = 0;
12971 +
12972 +       qidev = caam_ctx->qidev;
12973 +
12974 +       if (unlikely(status)) {
12975 +               caam_jr_strstatus(qidev, status);
12976 +               ecode = -EIO;
12977 +       }
12978 +
12979 +       edesc = container_of(drv_req, typeof(*edesc), drv_req);
12980 +       aead_unmap(qidev, edesc, aead_req);
12981 +
12982 +       aead_request_complete(aead_req, ecode);
12983 +       qi_cache_free(edesc);
12984 +}
12985 +
12986 +/*
12987 + * allocate and map the aead extended descriptor
12988 + */
12989 +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
12990 +                                          bool encrypt)
12991 +{
12992 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
12993 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
12994 +       struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
12995 +                                                typeof(*alg), aead);
12996 +       struct device *qidev = ctx->qidev;
12997 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
12998 +                      GFP_KERNEL : GFP_ATOMIC;
12999 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
13000 +       struct aead_edesc *edesc;
13001 +       dma_addr_t qm_sg_dma, iv_dma = 0;
13002 +       int ivsize = 0;
13003 +       unsigned int authsize = ctx->authsize;
13004 +       int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
13005 +       int in_len, out_len;
13006 +       struct qm_sg_entry *sg_table, *fd_sgt;
13007 +       struct caam_drv_ctx *drv_ctx;
13008 +       enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
13009 +
13010 +       drv_ctx = get_drv_ctx(ctx, op_type);
13011 +       if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13012 +               return (struct aead_edesc *)drv_ctx;
13013 +
13014 +       /* allocate space for base edesc and hw desc commands, link tables */
13015 +       edesc = qi_cache_alloc(GFP_DMA | flags);
13016 +       if (unlikely(!edesc)) {
13017 +               dev_err(qidev, "could not allocate extended descriptor\n");
13018 +               return ERR_PTR(-ENOMEM);
13019 +       }
13020 +
13021 +       if (likely(req->src == req->dst)) {
13022 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
13023 +                                            req->cryptlen +
13024 +                                               (encrypt ? authsize : 0));
13025 +               if (unlikely(src_nents < 0)) {
13026 +                       dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13027 +                               req->assoclen + req->cryptlen +
13028 +                               (encrypt ? authsize : 0));
13029 +                       qi_cache_free(edesc);
13030 +                       return ERR_PTR(src_nents);
13031 +               }
13032 +
13033 +               mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13034 +                                             DMA_BIDIRECTIONAL);
13035 +               if (unlikely(!mapped_src_nents)) {
13036 +                       dev_err(qidev, "unable to map source\n");
13037 +                       qi_cache_free(edesc);
13038 +                       return ERR_PTR(-ENOMEM);
13039 +               }
13040 +       } else {
13041 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
13042 +                                            req->cryptlen);
13043 +               if (unlikely(src_nents < 0)) {
13044 +                       dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13045 +                               req->assoclen + req->cryptlen);
13046 +                       qi_cache_free(edesc);
13047 +                       return ERR_PTR(src_nents);
13048 +               }
13049 +
13050 +               dst_nents = sg_nents_for_len(req->dst, req->assoclen +
13051 +                                            req->cryptlen +
13052 +                                            (encrypt ? authsize :
13053 +                                                       (-authsize)));
13054 +               if (unlikely(dst_nents < 0)) {
13055 +                       dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13056 +                               req->assoclen + req->cryptlen +
13057 +                               (encrypt ? authsize : (-authsize)));
13058 +                       qi_cache_free(edesc);
13059 +                       return ERR_PTR(dst_nents);
13060 +               }
13061 +
13062 +               if (src_nents) {
13063 +                       mapped_src_nents = dma_map_sg(qidev, req->src,
13064 +                                                     src_nents, DMA_TO_DEVICE);
13065 +                       if (unlikely(!mapped_src_nents)) {
13066 +                               dev_err(qidev, "unable to map source\n");
13067 +                               qi_cache_free(edesc);
13068 +                               return ERR_PTR(-ENOMEM);
13069 +                       }
13070 +               } else {
13071 +                       mapped_src_nents = 0;
13072 +               }
13073 +
13074 +               mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
13075 +                                             DMA_FROM_DEVICE);
13076 +               if (unlikely(!mapped_dst_nents)) {
13077 +                       dev_err(qidev, "unable to map destination\n");
13078 +                       dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13079 +                       qi_cache_free(edesc);
13080 +                       return ERR_PTR(-ENOMEM);
13081 +               }
13082 +       }
13083 +
13084 +       if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
13085 +               ivsize = crypto_aead_ivsize(aead);
13086 +               iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
13087 +               if (dma_mapping_error(qidev, iv_dma)) {
13088 +                       dev_err(qidev, "unable to map IV\n");
13089 +                       caam_unmap(qidev, req->src, req->dst, src_nents,
13090 +                                  dst_nents, 0, 0, op_type, 0, 0);
13091 +                       qi_cache_free(edesc);
13092 +                       return ERR_PTR(-ENOMEM);
13093 +               }
13094 +       }
13095 +
13096 +       /*
13097 +        * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
13098 +        * Input is not contiguous.
13099 +        */
13100 +       qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
13101 +                    (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
13102 +       if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
13103 +               dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
13104 +                       qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
13105 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13106 +                          iv_dma, ivsize, op_type, 0, 0);
13107 +               qi_cache_free(edesc);
13108 +               return ERR_PTR(-ENOMEM);
13109 +       }
13110 +       sg_table = &edesc->sgt[0];
13111 +       qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13112 +
13113 +       edesc->src_nents = src_nents;
13114 +       edesc->dst_nents = dst_nents;
13115 +       edesc->iv_dma = iv_dma;
13116 +       edesc->drv_req.app_ctx = req;
13117 +       edesc->drv_req.cbk = aead_done;
13118 +       edesc->drv_req.drv_ctx = drv_ctx;
13119 +
13120 +       edesc->assoclen = cpu_to_caam32(req->assoclen);
13121 +       edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
13122 +                                            DMA_TO_DEVICE);
13123 +       if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
13124 +               dev_err(qidev, "unable to map assoclen\n");
13125 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13126 +                          iv_dma, ivsize, op_type, 0, 0);
13127 +               qi_cache_free(edesc);
13128 +               return ERR_PTR(-ENOMEM);
13129 +       }
13130 +
13131 +       dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
13132 +       qm_sg_index++;
13133 +       if (ivsize) {
13134 +               dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
13135 +               qm_sg_index++;
13136 +       }
13137 +       sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
13138 +       qm_sg_index += mapped_src_nents;
13139 +
13140 +       if (mapped_dst_nents > 1)
13141 +               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
13142 +                                qm_sg_index, 0);
13143 +
13144 +       qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
13145 +       if (dma_mapping_error(qidev, qm_sg_dma)) {
13146 +               dev_err(qidev, "unable to map S/G table\n");
13147 +               dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
13148 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13149 +                          iv_dma, ivsize, op_type, 0, 0);
13150 +               qi_cache_free(edesc);
13151 +               return ERR_PTR(-ENOMEM);
13152 +       }
13153 +
13154 +       edesc->qm_sg_dma = qm_sg_dma;
13155 +       edesc->qm_sg_bytes = qm_sg_bytes;
13156 +
13157 +       out_len = req->assoclen + req->cryptlen +
13158 +                 (encrypt ? ctx->authsize : (-ctx->authsize));
13159 +       in_len = 4 + ivsize + req->assoclen + req->cryptlen;
13160 +
13161 +       fd_sgt = &edesc->drv_req.fd_sgt[0];
13162 +       dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
13163 +
13164 +       if (req->dst == req->src) {
13165 +               if (mapped_src_nents == 1)
13166 +                       dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
13167 +                                        out_len, 0);
13168 +               else
13169 +                       dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
13170 +                                            (1 + !!ivsize) * sizeof(*sg_table),
13171 +                                            out_len, 0);
13172 +       } else if (mapped_dst_nents == 1) {
13173 +               dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
13174 +                                0);
13175 +       } else {
13176 +               dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
13177 +                                    qm_sg_index, out_len, 0);
13178 +       }
13179 +
13180 +       return edesc;
13181 +}
13182 +
13183 +static inline int aead_crypt(struct aead_request *req, bool encrypt)
13184 +{
13185 +       struct aead_edesc *edesc;
13186 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
13187 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
13188 +       int ret;
13189 +
13190 +       if (unlikely(caam_congested))
13191 +               return -EAGAIN;
13192 +
13193 +       /* allocate extended descriptor */
13194 +       edesc = aead_edesc_alloc(req, encrypt);
13195 +       if (IS_ERR_OR_NULL(edesc))
13196 +               return PTR_ERR(edesc);
13197 +
13198 +       /* Create and submit job descriptor */
13199 +       ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13200 +       if (!ret) {
13201 +               ret = -EINPROGRESS;
13202 +       } else {
13203 +               aead_unmap(ctx->qidev, edesc, req);
13204 +               qi_cache_free(edesc);
13205 +       }
13206 +
13207 +       return ret;
13208 +}
13209 +
13210 +static int aead_encrypt(struct aead_request *req)
13211 +{
13212 +       return aead_crypt(req, true);
13213 +}
13214 +
13215 +static int aead_decrypt(struct aead_request *req)
13216 +{
13217 +       return aead_crypt(req, false);
13218 +}
13219 +
13220 +static void tls_done(struct caam_drv_req *drv_req, u32 status)
13221 +{
13222 +       struct device *qidev;
13223 +       struct tls_edesc *edesc;
13224 +       struct aead_request *aead_req = drv_req->app_ctx;
13225 +       struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
13226 +       struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
13227 +       int ecode = 0;
13228 +
13229 +       qidev = caam_ctx->qidev;
13230 +
13231 +       if (unlikely(status)) {
13232 +               caam_jr_strstatus(qidev, status);
13233 +               ecode = -EIO;
13234 +       }
13235 +
13236 +       edesc = container_of(drv_req, typeof(*edesc), drv_req);
13237 +       tls_unmap(qidev, edesc, aead_req);
13238 +
13239 +       aead_request_complete(aead_req, ecode);
13240 +       qi_cache_free(edesc);
13241 +}
13242 +
13243 +/*
13244 + * allocate and map the tls extended descriptor
13245 + */
13246 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
13247 +{
13248 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
13249 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
13250 +       unsigned int blocksize = crypto_aead_blocksize(aead);
13251 +       unsigned int padsize, authsize;
13252 +       struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
13253 +                                                typeof(*alg), aead);
13254 +       struct device *qidev = ctx->qidev;
13255 +       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
13256 +                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
13257 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
13258 +       struct tls_edesc *edesc;
13259 +       dma_addr_t qm_sg_dma, iv_dma = 0;
13260 +       int ivsize = 0;
13261 +       int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
13262 +       int in_len, out_len;
13263 +       struct qm_sg_entry *sg_table, *fd_sgt;
13264 +       struct caam_drv_ctx *drv_ctx;
13265 +       enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
13266 +       struct scatterlist *dst;
13267 +
13268 +       if (encrypt) {
13269 +               padsize = blocksize - ((req->cryptlen + ctx->authsize) %
13270 +                                       blocksize);
13271 +               authsize = ctx->authsize + padsize;
13272 +       } else {
13273 +               authsize = ctx->authsize;
13274 +       }
13275 +
13276 +       drv_ctx = get_drv_ctx(ctx, op_type);
13277 +       if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13278 +               return (struct tls_edesc *)drv_ctx;
13279 +
13280 +       /* allocate space for base edesc and hw desc commands, link tables */
13281 +       edesc = qi_cache_alloc(GFP_DMA | flags);
13282 +       if (unlikely(!edesc)) {
13283 +               dev_err(qidev, "could not allocate extended descriptor\n");
13284 +               return ERR_PTR(-ENOMEM);
13285 +       }
13286 +
13287 +       if (likely(req->src == req->dst)) {
13288 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
13289 +                                            req->cryptlen +
13290 +                                            (encrypt ? authsize : 0));
13291 +               if (unlikely(src_nents < 0)) {
13292 +                       dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13293 +                               req->assoclen + req->cryptlen +
13294 +                               (encrypt ? authsize : 0));
13295 +                       qi_cache_free(edesc);
13296 +                       return ERR_PTR(src_nents);
13297 +               }
13298 +
13299 +               mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13300 +                                             DMA_BIDIRECTIONAL);
13301 +               if (unlikely(!mapped_src_nents)) {
13302 +                       dev_err(qidev, "unable to map source\n");
13303 +                       qi_cache_free(edesc);
13304 +                       return ERR_PTR(-ENOMEM);
13305 +               }
13306 +               dst = req->dst;
13307 +       } else {
13308 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
13309 +                                            req->cryptlen);
13310 +               if (unlikely(src_nents < 0)) {
13311 +                       dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13312 +                               req->assoclen + req->cryptlen);
13313 +                       qi_cache_free(edesc);
13314 +                       return ERR_PTR(src_nents);
13315 +               }
13316 +
13317 +               dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
13318 +               dst_nents = sg_nents_for_len(dst, req->cryptlen +
13319 +                                            (encrypt ? authsize : 0));
13320 +               if (unlikely(dst_nents < 0)) {
13321 +                       dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13322 +                               req->cryptlen +
13323 +                               (encrypt ? authsize : 0));
13324 +                       qi_cache_free(edesc);
13325 +                       return ERR_PTR(dst_nents);
13326 +               }
13327 +
13328 +               if (src_nents) {
13329 +                       mapped_src_nents = dma_map_sg(qidev, req->src,
13330 +                                                     src_nents, DMA_TO_DEVICE);
13331 +                       if (unlikely(!mapped_src_nents)) {
13332 +                               dev_err(qidev, "unable to map source\n");
13333 +                               qi_cache_free(edesc);
13334 +                               return ERR_PTR(-ENOMEM);
13335 +                       }
13336 +               } else {
13337 +                       mapped_src_nents = 0;
13338 +               }
13339 +
13340 +               mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
13341 +                                             DMA_FROM_DEVICE);
13342 +               if (unlikely(!mapped_dst_nents)) {
13343 +                       dev_err(qidev, "unable to map destination\n");
13344 +                       dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13345 +                       qi_cache_free(edesc);
13346 +                       return ERR_PTR(-ENOMEM);
13347 +               }
13348 +       }
13349 +
13350 +       ivsize = crypto_aead_ivsize(aead);
13351 +       iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
13352 +       if (dma_mapping_error(qidev, iv_dma)) {
13353 +               dev_err(qidev, "unable to map IV\n");
13354 +               caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0,
13355 +                          op_type, 0, 0);
13356 +               qi_cache_free(edesc);
13357 +               return ERR_PTR(-ENOMEM);
13358 +       }
13359 +
13360 +       /*
13361 +        * Create S/G table: IV, src, dst.
13362 +        * Input is not contiguous.
13363 +        */
13364 +       qm_sg_ents = 1 + mapped_src_nents +
13365 +                    (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
13366 +       sg_table = &edesc->sgt[0];
13367 +       qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13368 +
13369 +       edesc->src_nents = src_nents;
13370 +       edesc->dst_nents = dst_nents;
13371 +       edesc->dst = dst;
13372 +       edesc->iv_dma = iv_dma;
13373 +       edesc->drv_req.app_ctx = req;
13374 +       edesc->drv_req.cbk = tls_done;
13375 +       edesc->drv_req.drv_ctx = drv_ctx;
13376 +
13377 +       dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
13378 +       qm_sg_index = 1;
13379 +
13380 +       sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
13381 +       qm_sg_index += mapped_src_nents;
13382 +
13383 +       if (mapped_dst_nents > 1)
13384 +               sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
13385 +                                qm_sg_index, 0);
13386 +
13387 +       qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
13388 +       if (dma_mapping_error(qidev, qm_sg_dma)) {
13389 +               dev_err(qidev, "unable to map S/G table\n");
13390 +               caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
13391 +                          ivsize, op_type, 0, 0);
13392 +               qi_cache_free(edesc);
13393 +               return ERR_PTR(-ENOMEM);
13394 +       }
13395 +
13396 +       edesc->qm_sg_dma = qm_sg_dma;
13397 +       edesc->qm_sg_bytes = qm_sg_bytes;
13398 +
13399 +       out_len = req->cryptlen + (encrypt ? authsize : 0);
13400 +       in_len = ivsize + req->assoclen + req->cryptlen;
13401 +
13402 +       fd_sgt = &edesc->drv_req.fd_sgt[0];
13403 +
13404 +       dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
13405 +
13406 +       if (req->dst == req->src)
13407 +               dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
13408 +                                   (sg_nents_for_len(req->src, req->assoclen) +
13409 +                                    1) * sizeof(*sg_table), out_len, 0);
13410 +       else if (mapped_dst_nents == 1)
13411 +               dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
13412 +       else
13413 +               dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
13414 +                                    qm_sg_index, out_len, 0);
13415 +
13416 +       return edesc;
13417 +}
13418 +
13419 +static int tls_crypt(struct aead_request *req, bool encrypt)
13420 +{
13421 +       struct tls_edesc *edesc;
13422 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
13423 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
13424 +       int ret;
13425 +
13426 +       if (unlikely(caam_congested))
13427 +               return -EAGAIN;
13428 +
13429 +       edesc = tls_edesc_alloc(req, encrypt);
13430 +       if (IS_ERR_OR_NULL(edesc))
13431 +               return PTR_ERR(edesc);
13432 +
13433 +       ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13434 +       if (!ret) {
13435 +               ret = -EINPROGRESS;
13436 +       } else {
13437 +               tls_unmap(ctx->qidev, edesc, req);
13438 +               qi_cache_free(edesc);
13439 +       }
13440 +
13441 +       return ret;
13442 +}
13443 +
13444 +static int tls_encrypt(struct aead_request *req)
13445 +{
13446 +       return tls_crypt(req, true);
13447 +}
13448 +
13449 +static int tls_decrypt(struct aead_request *req)
13450 +{
13451 +       return tls_crypt(req, false);
13452 +}
13453 +
13454 +static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
13455 +{
13456 +       struct ablkcipher_edesc *edesc;
13457 +       struct ablkcipher_request *req = drv_req->app_ctx;
13458 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13459 +       struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
13460 +       struct device *qidev = caam_ctx->qidev;
13461 +       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13462 +
13463 +#ifdef DEBUG
13464 +       dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
13465 +#endif
13466 +
13467 +       edesc = container_of(drv_req, typeof(*edesc), drv_req);
13468 +
13469 +       if (status)
13470 +               caam_jr_strstatus(qidev, status);
13471 +
13472 +#ifdef DEBUG
13473 +       print_hex_dump(KERN_ERR, "dstiv  @" __stringify(__LINE__)": ",
13474 +                      DUMP_PREFIX_ADDRESS, 16, 4, req->info,
13475 +                      edesc->src_nents > 1 ? 100 : ivsize, 1);
13476 +       caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
13477 +                    DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
13478 +                    edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
13479 +#endif
13480 +
13481 +       ablkcipher_unmap(qidev, edesc, req);
13482 +       qi_cache_free(edesc);
13483 +
13484 +       /*
13485 +        * The crypto API expects us to set the IV (req->info) to the last
13486 +        * ciphertext block. This is used e.g. by the CTS mode.
13487 +        */
13488 +       scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
13489 +                                ivsize, 0);
13490 +
13491 +       ablkcipher_request_complete(req, status);
13492 +}
13493 +
13494 +static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
13495 +                                                      *req, bool encrypt)
13496 +{
13497 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13498 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13499 +       struct device *qidev = ctx->qidev;
13500 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
13501 +                      GFP_KERNEL : GFP_ATOMIC;
13502 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
13503 +       struct ablkcipher_edesc *edesc;
13504 +       dma_addr_t iv_dma;
13505 +       bool in_contig;
13506 +       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13507 +       int dst_sg_idx, qm_sg_ents;
13508 +       struct qm_sg_entry *sg_table, *fd_sgt;
13509 +       struct caam_drv_ctx *drv_ctx;
13510 +       enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
13511 +
13512 +       drv_ctx = get_drv_ctx(ctx, op_type);
13513 +       if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13514 +               return (struct ablkcipher_edesc *)drv_ctx;
13515 +
13516 +       src_nents = sg_nents_for_len(req->src, req->nbytes);
13517 +       if (unlikely(src_nents < 0)) {
13518 +               dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13519 +                       req->nbytes);
13520 +               return ERR_PTR(src_nents);
13521 +       }
13522 +
13523 +       if (unlikely(req->src != req->dst)) {
13524 +               dst_nents = sg_nents_for_len(req->dst, req->nbytes);
13525 +               if (unlikely(dst_nents < 0)) {
13526 +                       dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13527 +                               req->nbytes);
13528 +                       return ERR_PTR(dst_nents);
13529 +               }
13530 +
13531 +               mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13532 +                                             DMA_TO_DEVICE);
13533 +               if (unlikely(!mapped_src_nents)) {
13534 +                       dev_err(qidev, "unable to map source\n");
13535 +                       return ERR_PTR(-ENOMEM);
13536 +               }
13537 +
13538 +               mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
13539 +                                             DMA_FROM_DEVICE);
13540 +               if (unlikely(!mapped_dst_nents)) {
13541 +                       dev_err(qidev, "unable to map destination\n");
13542 +                       dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13543 +                       return ERR_PTR(-ENOMEM);
13544 +               }
13545 +       } else {
13546 +               mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13547 +                                             DMA_BIDIRECTIONAL);
13548 +               if (unlikely(!mapped_src_nents)) {
13549 +                       dev_err(qidev, "unable to map source\n");
13550 +                       return ERR_PTR(-ENOMEM);
13551 +               }
13552 +       }
13553 +
13554 +       iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
13555 +       if (dma_mapping_error(qidev, iv_dma)) {
13556 +               dev_err(qidev, "unable to map IV\n");
13557 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
13558 +                          0, 0, 0, 0);
13559 +               return ERR_PTR(-ENOMEM);
13560 +       }
13561 +
13562 +       if (mapped_src_nents == 1 &&
13563 +           iv_dma + ivsize == sg_dma_address(req->src)) {
13564 +               in_contig = true;
13565 +               qm_sg_ents = 0;
13566 +       } else {
13567 +               in_contig = false;
13568 +               qm_sg_ents = 1 + mapped_src_nents;
13569 +       }
13570 +       dst_sg_idx = qm_sg_ents;
13571 +
13572 +       qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
13573 +       if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
13574 +               dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
13575 +                       qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
13576 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13577 +                          iv_dma, ivsize, op_type, 0, 0);
13578 +               return ERR_PTR(-ENOMEM);
13579 +       }
13580 +
13581 +       /* allocate space for base edesc and link tables */
13582 +       edesc = qi_cache_alloc(GFP_DMA | flags);
13583 +       if (unlikely(!edesc)) {
13584 +               dev_err(qidev, "could not allocate extended descriptor\n");
13585 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13586 +                          iv_dma, ivsize, op_type, 0, 0);
13587 +               return ERR_PTR(-ENOMEM);
13588 +       }
13589 +
13590 +       edesc->src_nents = src_nents;
13591 +       edesc->dst_nents = dst_nents;
13592 +       edesc->iv_dma = iv_dma;
13593 +       sg_table = &edesc->sgt[0];
13594 +       edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13595 +       edesc->drv_req.app_ctx = req;
13596 +       edesc->drv_req.cbk = ablkcipher_done;
13597 +       edesc->drv_req.drv_ctx = drv_ctx;
13598 +
13599 +       if (!in_contig) {
13600 +               dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
13601 +               sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
13602 +       }
13603 +
13604 +       if (mapped_dst_nents > 1)
13605 +               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
13606 +                                dst_sg_idx, 0);
13607 +
13608 +       edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
13609 +                                         DMA_TO_DEVICE);
13610 +       if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
13611 +               dev_err(qidev, "unable to map S/G table\n");
13612 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13613 +                          iv_dma, ivsize, op_type, 0, 0);
13614 +               qi_cache_free(edesc);
13615 +               return ERR_PTR(-ENOMEM);
13616 +       }
13617 +
13618 +       fd_sgt = &edesc->drv_req.fd_sgt[0];
13619 +
13620 +       if (!in_contig)
13621 +               dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
13622 +                                         ivsize + req->nbytes, 0);
13623 +       else
13624 +               dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
13625 +                                     0);
13626 +
13627 +       if (req->src == req->dst) {
13628 +               if (!in_contig)
13629 +                       dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
13630 +                                            sizeof(*sg_table), req->nbytes, 0);
13631 +               else
13632 +                       dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
13633 +                                        req->nbytes, 0);
13634 +       } else if (mapped_dst_nents > 1) {
13635 +               dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
13636 +                                    sizeof(*sg_table), req->nbytes, 0);
13637 +       } else {
13638 +               dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
13639 +                                req->nbytes, 0);
13640 +       }
13641 +
13642 +       return edesc;
13643 +}
13644 +
13645 +static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
13646 +       struct skcipher_givcrypt_request *creq)
13647 +{
13648 +       struct ablkcipher_request *req = &creq->creq;
13649 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13650 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13651 +       struct device *qidev = ctx->qidev;
13652 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
13653 +                      GFP_KERNEL : GFP_ATOMIC;
13654 +       int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
13655 +       struct ablkcipher_edesc *edesc;
13656 +       dma_addr_t iv_dma;
13657 +       bool out_contig;
13658 +       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13659 +       struct qm_sg_entry *sg_table, *fd_sgt;
13660 +       int dst_sg_idx, qm_sg_ents;
13661 +       struct caam_drv_ctx *drv_ctx;
13662 +
13663 +       drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
13664 +       if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13665 +               return (struct ablkcipher_edesc *)drv_ctx;
13666 +
13667 +       src_nents = sg_nents_for_len(req->src, req->nbytes);
13668 +       if (unlikely(src_nents < 0)) {
13669 +               dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13670 +                       req->nbytes);
13671 +               return ERR_PTR(src_nents);
13672 +       }
13673 +
13674 +       if (unlikely(req->src != req->dst)) {
13675 +               dst_nents = sg_nents_for_len(req->dst, req->nbytes);
13676 +               if (unlikely(dst_nents < 0)) {
13677 +                       dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13678 +                               req->nbytes);
13679 +                       return ERR_PTR(dst_nents);
13680 +               }
13681 +
13682 +               mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13683 +                                             DMA_TO_DEVICE);
13684 +               if (unlikely(!mapped_src_nents)) {
13685 +                       dev_err(qidev, "unable to map source\n");
13686 +                       return ERR_PTR(-ENOMEM);
13687 +               }
13688 +
13689 +               mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
13690 +                                             DMA_FROM_DEVICE);
13691 +               if (unlikely(!mapped_dst_nents)) {
13692 +                       dev_err(qidev, "unable to map destination\n");
13693 +                       dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13694 +                       return ERR_PTR(-ENOMEM);
13695 +               }
13696 +       } else {
13697 +               mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13698 +                                             DMA_BIDIRECTIONAL);
13699 +               if (unlikely(!mapped_src_nents)) {
13700 +                       dev_err(qidev, "unable to map source\n");
13701 +                       return ERR_PTR(-ENOMEM);
13702 +               }
13703 +
13704 +               dst_nents = src_nents;
13705 +               mapped_dst_nents = src_nents;
13706 +       }
13707 +
13708 +       iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
13709 +       if (dma_mapping_error(qidev, iv_dma)) {
13710 +               dev_err(qidev, "unable to map IV\n");
13711 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
13712 +                          0, 0, 0, 0);
13713 +               return ERR_PTR(-ENOMEM);
13714 +       }
13715 +
13716 +       qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
13717 +       dst_sg_idx = qm_sg_ents;
13718 +       if (mapped_dst_nents == 1 &&
13719 +           iv_dma + ivsize == sg_dma_address(req->dst)) {
13720 +               out_contig = true;
13721 +       } else {
13722 +               out_contig = false;
13723 +               qm_sg_ents += 1 + mapped_dst_nents;
13724 +       }
13725 +
13726 +       if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
13727 +               dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
13728 +                       qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
13729 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13730 +                          iv_dma, ivsize, GIVENCRYPT, 0, 0);
13731 +               return ERR_PTR(-ENOMEM);
13732 +       }
13733 +
13734 +       /* allocate space for base edesc and link tables */
13735 +       edesc = qi_cache_alloc(GFP_DMA | flags);
13736 +       if (!edesc) {
13737 +               dev_err(qidev, "could not allocate extended descriptor\n");
13738 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13739 +                          iv_dma, ivsize, GIVENCRYPT, 0, 0);
13740 +               return ERR_PTR(-ENOMEM);
13741 +       }
13742 +
13743 +       edesc->src_nents = src_nents;
13744 +       edesc->dst_nents = dst_nents;
13745 +       edesc->iv_dma = iv_dma;
13746 +       sg_table = &edesc->sgt[0];
13747 +       edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13748 +       edesc->drv_req.app_ctx = req;
13749 +       edesc->drv_req.cbk = ablkcipher_done;
13750 +       edesc->drv_req.drv_ctx = drv_ctx;
13751 +
13752 +       if (mapped_src_nents > 1)
13753 +               sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
13754 +
13755 +       if (!out_contig) {
13756 +               dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
13757 +               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
13758 +                                dst_sg_idx + 1, 0);
13759 +       }
13760 +
13761 +       edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
13762 +                                         DMA_TO_DEVICE);
13763 +       if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
13764 +               dev_err(qidev, "unable to map S/G table\n");
13765 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13766 +                          iv_dma, ivsize, GIVENCRYPT, 0, 0);
13767 +               qi_cache_free(edesc);
13768 +               return ERR_PTR(-ENOMEM);
13769 +       }
13770 +
13771 +       fd_sgt = &edesc->drv_req.fd_sgt[0];
13772 +
13773 +       if (mapped_src_nents > 1)
13774 +               dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
13775 +                                    0);
13776 +       else
13777 +               dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
13778 +                                req->nbytes, 0);
13779 +
13780 +       if (!out_contig)
13781 +               dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
13782 +                                    sizeof(*sg_table), ivsize + req->nbytes,
13783 +                                    0);
13784 +       else
13785 +               dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
13786 +                                ivsize + req->nbytes, 0);
13787 +
13788 +       return edesc;
13789 +}
13790 +
13791 +static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
13792 +{
13793 +       struct ablkcipher_edesc *edesc;
13794 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13795 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13796 +       int ret;
13797 +
13798 +       if (unlikely(caam_congested))
13799 +               return -EAGAIN;
13800 +
13801 +       /* allocate extended descriptor */
13802 +       edesc = ablkcipher_edesc_alloc(req, encrypt);
13803 +       if (IS_ERR(edesc))
13804 +               return PTR_ERR(edesc);
13805 +
13806 +       ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13807 +       if (!ret) {
13808 +               ret = -EINPROGRESS;
13809 +       } else {
13810 +               ablkcipher_unmap(ctx->qidev, edesc, req);
13811 +               qi_cache_free(edesc);
13812 +       }
13813 +
13814 +       return ret;
13815 +}
13816 +
13817 +static int ablkcipher_encrypt(struct ablkcipher_request *req)
13818 +{
13819 +       return ablkcipher_crypt(req, true);
13820 +}
13821 +
13822 +static int ablkcipher_decrypt(struct ablkcipher_request *req)
13823 +{
13824 +       return ablkcipher_crypt(req, false);
13825 +}
13826 +
13827 +static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
13828 +{
13829 +       struct ablkcipher_request *req = &creq->creq;
13830 +       struct ablkcipher_edesc *edesc;
13831 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13832 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13833 +       int ret;
13834 +
13835 +       if (unlikely(caam_congested))
13836 +               return -EAGAIN;
13837 +
13838 +       /* allocate extended descriptor */
13839 +       edesc = ablkcipher_giv_edesc_alloc(creq);
13840 +       if (IS_ERR(edesc))
13841 +               return PTR_ERR(edesc);
13842 +
13843 +       ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13844 +       if (!ret) {
13845 +               ret = -EINPROGRESS;
13846 +       } else {
13847 +               ablkcipher_unmap(ctx->qidev, edesc, req);
13848 +               qi_cache_free(edesc);
13849 +       }
13850 +
13851 +       return ret;
13852 +}
13853 +
13854 +#define template_ablkcipher    template_u.ablkcipher
13855 +struct caam_alg_template {
13856 +       char name[CRYPTO_MAX_ALG_NAME];
13857 +       char driver_name[CRYPTO_MAX_ALG_NAME];
13858 +       unsigned int blocksize;
13859 +       u32 type;
13860 +       union {
13861 +               struct ablkcipher_alg ablkcipher;
13862 +       } template_u;
13863 +       u32 class1_alg_type;
13864 +       u32 class2_alg_type;
13865 +};
13866 +
13867 +static struct caam_alg_template driver_algs[] = {
13868 +       /* ablkcipher descriptor */
13869 +       {
13870 +               .name = "cbc(aes)",
13871 +               .driver_name = "cbc-aes-caam-qi",
13872 +               .blocksize = AES_BLOCK_SIZE,
13873 +               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
13874 +               .template_ablkcipher = {
13875 +                       .setkey = ablkcipher_setkey,
13876 +                       .encrypt = ablkcipher_encrypt,
13877 +                       .decrypt = ablkcipher_decrypt,
13878 +                       .givencrypt = ablkcipher_givencrypt,
13879 +                       .geniv = "<built-in>",
13880 +                       .min_keysize = AES_MIN_KEY_SIZE,
13881 +                       .max_keysize = AES_MAX_KEY_SIZE,
13882 +                       .ivsize = AES_BLOCK_SIZE,
13883 +               },
13884 +               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
13885 +       },
13886 +       {
13887 +               .name = "cbc(des3_ede)",
13888 +               .driver_name = "cbc-3des-caam-qi",
13889 +               .blocksize = DES3_EDE_BLOCK_SIZE,
13890 +               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
13891 +               .template_ablkcipher = {
13892 +                       .setkey = ablkcipher_setkey,
13893 +                       .encrypt = ablkcipher_encrypt,
13894 +                       .decrypt = ablkcipher_decrypt,
13895 +                       .givencrypt = ablkcipher_givencrypt,
13896 +                       .geniv = "<built-in>",
13897 +                       .min_keysize = DES3_EDE_KEY_SIZE,
13898 +                       .max_keysize = DES3_EDE_KEY_SIZE,
13899 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
13900 +               },
13901 +               .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
13902 +       },
13903 +       {
13904 +               .name = "cbc(des)",
13905 +               .driver_name = "cbc-des-caam-qi",
13906 +               .blocksize = DES_BLOCK_SIZE,
13907 +               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
13908 +               .template_ablkcipher = {
13909 +                       .setkey = ablkcipher_setkey,
13910 +                       .encrypt = ablkcipher_encrypt,
13911 +                       .decrypt = ablkcipher_decrypt,
13912 +                       .givencrypt = ablkcipher_givencrypt,
13913 +                       .geniv = "<built-in>",
13914 +                       .min_keysize = DES_KEY_SIZE,
13915 +                       .max_keysize = DES_KEY_SIZE,
13916 +                       .ivsize = DES_BLOCK_SIZE,
13917 +               },
13918 +               .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
13919 +       },
13920 +       {
13921 +               .name = "ctr(aes)",
13922 +               .driver_name = "ctr-aes-caam-qi",
13923 +               .blocksize = 1,
13924 +               .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
13925 +               .template_ablkcipher = {
13926 +                       .setkey = ablkcipher_setkey,
13927 +                       .encrypt = ablkcipher_encrypt,
13928 +                       .decrypt = ablkcipher_decrypt,
13929 +                       .geniv = "chainiv",
13930 +                       .min_keysize = AES_MIN_KEY_SIZE,
13931 +                       .max_keysize = AES_MAX_KEY_SIZE,
13932 +                       .ivsize = AES_BLOCK_SIZE,
13933 +               },
13934 +               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
13935 +       },
13936 +       {
13937 +               .name = "rfc3686(ctr(aes))",
13938 +               .driver_name = "rfc3686-ctr-aes-caam-qi",
13939 +               .blocksize = 1,
13940 +               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
13941 +               .template_ablkcipher = {
13942 +                       .setkey = ablkcipher_setkey,
13943 +                       .encrypt = ablkcipher_encrypt,
13944 +                       .decrypt = ablkcipher_decrypt,
13945 +                       .givencrypt = ablkcipher_givencrypt,
13946 +                       .geniv = "<built-in>",
13947 +                       .min_keysize = AES_MIN_KEY_SIZE +
13948 +                                      CTR_RFC3686_NONCE_SIZE,
13949 +                       .max_keysize = AES_MAX_KEY_SIZE +
13950 +                                      CTR_RFC3686_NONCE_SIZE,
13951 +                       .ivsize = CTR_RFC3686_IV_SIZE,
13952 +               },
13953 +               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
13954 +       },
13955 +       {
13956 +               .name = "xts(aes)",
13957 +               .driver_name = "xts-aes-caam-qi",
13958 +               .blocksize = AES_BLOCK_SIZE,
13959 +               .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
13960 +               .template_ablkcipher = {
13961 +                       .setkey = xts_ablkcipher_setkey,
13962 +                       .encrypt = ablkcipher_encrypt,
13963 +                       .decrypt = ablkcipher_decrypt,
13964 +                       .geniv = "eseqiv",
13965 +                       .min_keysize = 2 * AES_MIN_KEY_SIZE,
13966 +                       .max_keysize = 2 * AES_MAX_KEY_SIZE,
13967 +                       .ivsize = AES_BLOCK_SIZE,
13968 +               },
13969 +               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
13970 +       },
13971 +};
13972 +
13973 +static struct caam_aead_alg driver_aeads[] = {
13974 +       /* single-pass ipsec_esp descriptor */
13975 +       {
13976 +               .aead = {
13977 +                       .base = {
13978 +                               .cra_name = "authenc(hmac(md5),cbc(aes))",
13979 +                               .cra_driver_name = "authenc-hmac-md5-"
13980 +                                                  "cbc-aes-caam-qi",
13981 +                               .cra_blocksize = AES_BLOCK_SIZE,
13982 +                       },
13983 +                       .setkey = aead_setkey,
13984 +                       .setauthsize = aead_setauthsize,
13985 +                       .encrypt = aead_encrypt,
13986 +                       .decrypt = aead_decrypt,
13987 +                       .ivsize = AES_BLOCK_SIZE,
13988 +                       .maxauthsize = MD5_DIGEST_SIZE,
13989 +               },
13990 +               .caam = {
13991 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
13992 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
13993 +                                          OP_ALG_AAI_HMAC_PRECOMP,
13994 +               }
13995 +       },
13996 +       {
13997 +               .aead = {
13998 +                       .base = {
13999 +                               .cra_name = "echainiv(authenc(hmac(md5),"
14000 +                                           "cbc(aes)))",
14001 +                               .cra_driver_name = "echainiv-authenc-hmac-md5-"
14002 +                                                  "cbc-aes-caam-qi",
14003 +                               .cra_blocksize = AES_BLOCK_SIZE,
14004 +                       },
14005 +                       .setkey = aead_setkey,
14006 +                       .setauthsize = aead_setauthsize,
14007 +                       .encrypt = aead_encrypt,
14008 +                       .decrypt = aead_decrypt,
14009 +                       .ivsize = AES_BLOCK_SIZE,
14010 +                       .maxauthsize = MD5_DIGEST_SIZE,
14011 +               },
14012 +               .caam = {
14013 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14014 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14015 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14016 +                       .geniv = true,
14017 +               }
14018 +       },
14019 +       {
14020 +               .aead = {
14021 +                       .base = {
14022 +                               .cra_name = "authenc(hmac(sha1),cbc(aes))",
14023 +                               .cra_driver_name = "authenc-hmac-sha1-"
14024 +                                                  "cbc-aes-caam-qi",
14025 +                               .cra_blocksize = AES_BLOCK_SIZE,
14026 +                       },
14027 +                       .setkey = aead_setkey,
14028 +                       .setauthsize = aead_setauthsize,
14029 +                       .encrypt = aead_encrypt,
14030 +                       .decrypt = aead_decrypt,
14031 +                       .ivsize = AES_BLOCK_SIZE,
14032 +                       .maxauthsize = SHA1_DIGEST_SIZE,
14033 +               },
14034 +               .caam = {
14035 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14036 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14037 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14038 +               }
14039 +       },
14040 +       {
14041 +               .aead = {
14042 +                       .base = {
14043 +                               .cra_name = "echainiv(authenc(hmac(sha1),"
14044 +                                           "cbc(aes)))",
14045 +                               .cra_driver_name = "echainiv-authenc-"
14046 +                                                  "hmac-sha1-cbc-aes-caam-qi",
14047 +                               .cra_blocksize = AES_BLOCK_SIZE,
14048 +                       },
14049 +                       .setkey = aead_setkey,
14050 +                       .setauthsize = aead_setauthsize,
14051 +                       .encrypt = aead_encrypt,
14052 +                       .decrypt = aead_decrypt,
14053 +                       .ivsize = AES_BLOCK_SIZE,
14054 +                       .maxauthsize = SHA1_DIGEST_SIZE,
14055 +               },
14056 +               .caam = {
14057 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14058 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14059 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14060 +                       .geniv = true,
14061 +               },
14062 +       },
14063 +       {
14064 +               .aead = {
14065 +                       .base = {
14066 +                               .cra_name = "authenc(hmac(sha224),cbc(aes))",
14067 +                               .cra_driver_name = "authenc-hmac-sha224-"
14068 +                                                  "cbc-aes-caam-qi",
14069 +                               .cra_blocksize = AES_BLOCK_SIZE,
14070 +                       },
14071 +                       .setkey = aead_setkey,
14072 +                       .setauthsize = aead_setauthsize,
14073 +                       .encrypt = aead_encrypt,
14074 +                       .decrypt = aead_decrypt,
14075 +                       .ivsize = AES_BLOCK_SIZE,
14076 +                       .maxauthsize = SHA224_DIGEST_SIZE,
14077 +               },
14078 +               .caam = {
14079 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14080 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14081 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14082 +               }
14083 +       },
14084 +       {
14085 +               .aead = {
14086 +                       .base = {
14087 +                               .cra_name = "echainiv(authenc(hmac(sha224),"
14088 +                                           "cbc(aes)))",
14089 +                               .cra_driver_name = "echainiv-authenc-"
14090 +                                                  "hmac-sha224-cbc-aes-caam-qi",
14091 +                               .cra_blocksize = AES_BLOCK_SIZE,
14092 +                       },
14093 +                       .setkey = aead_setkey,
14094 +                       .setauthsize = aead_setauthsize,
14095 +                       .encrypt = aead_encrypt,
14096 +                       .decrypt = aead_decrypt,
14097 +                       .ivsize = AES_BLOCK_SIZE,
14098 +                       .maxauthsize = SHA224_DIGEST_SIZE,
14099 +               },
14100 +               .caam = {
14101 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14102 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14103 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14104 +                       .geniv = true,
14105 +               }
14106 +       },
14107 +       {
14108 +               .aead = {
14109 +                       .base = {
14110 +                               .cra_name = "authenc(hmac(sha256),cbc(aes))",
14111 +                               .cra_driver_name = "authenc-hmac-sha256-"
14112 +                                                  "cbc-aes-caam-qi",
14113 +                               .cra_blocksize = AES_BLOCK_SIZE,
14114 +                       },
14115 +                       .setkey = aead_setkey,
14116 +                       .setauthsize = aead_setauthsize,
14117 +                       .encrypt = aead_encrypt,
14118 +                       .decrypt = aead_decrypt,
14119 +                       .ivsize = AES_BLOCK_SIZE,
14120 +                       .maxauthsize = SHA256_DIGEST_SIZE,
14121 +               },
14122 +               .caam = {
14123 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14124 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14125 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14126 +               }
14127 +       },
14128 +       {
14129 +               .aead = {
14130 +                       .base = {
14131 +                               .cra_name = "echainiv(authenc(hmac(sha256),"
14132 +                                           "cbc(aes)))",
14133 +                               .cra_driver_name = "echainiv-authenc-"
14134 +                                                  "hmac-sha256-cbc-aes-"
14135 +                                                  "caam-qi",
14136 +                               .cra_blocksize = AES_BLOCK_SIZE,
14137 +                       },
14138 +                       .setkey = aead_setkey,
14139 +                       .setauthsize = aead_setauthsize,
14140 +                       .encrypt = aead_encrypt,
14141 +                       .decrypt = aead_decrypt,
14142 +                       .ivsize = AES_BLOCK_SIZE,
14143 +                       .maxauthsize = SHA256_DIGEST_SIZE,
14144 +               },
14145 +               .caam = {
14146 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14147 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14148 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14149 +                       .geniv = true,
14150 +               }
14151 +       },
14152 +       {
14153 +               .aead = {
14154 +                       .base = {
14155 +                               .cra_name = "authenc(hmac(sha384),cbc(aes))",
14156 +                               .cra_driver_name = "authenc-hmac-sha384-"
14157 +                                                  "cbc-aes-caam-qi",
14158 +                               .cra_blocksize = AES_BLOCK_SIZE,
14159 +                       },
14160 +                       .setkey = aead_setkey,
14161 +                       .setauthsize = aead_setauthsize,
14162 +                       .encrypt = aead_encrypt,
14163 +                       .decrypt = aead_decrypt,
14164 +                       .ivsize = AES_BLOCK_SIZE,
14165 +                       .maxauthsize = SHA384_DIGEST_SIZE,
14166 +               },
14167 +               .caam = {
14168 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14169 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14170 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14171 +               }
14172 +       },
14173 +       {
14174 +               .aead = {
14175 +                       .base = {
14176 +                               .cra_name = "echainiv(authenc(hmac(sha384),"
14177 +                                           "cbc(aes)))",
14178 +                               .cra_driver_name = "echainiv-authenc-"
14179 +                                                  "hmac-sha384-cbc-aes-"
14180 +                                                  "caam-qi",
14181 +                               .cra_blocksize = AES_BLOCK_SIZE,
14182 +                       },
14183 +                       .setkey = aead_setkey,
14184 +                       .setauthsize = aead_setauthsize,
14185 +                       .encrypt = aead_encrypt,
14186 +                       .decrypt = aead_decrypt,
14187 +                       .ivsize = AES_BLOCK_SIZE,
14188 +                       .maxauthsize = SHA384_DIGEST_SIZE,
14189 +               },
14190 +               .caam = {
14191 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14192 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14193 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14194 +                       .geniv = true,
14195 +               }
14196 +       },
14197 +       {
14198 +               .aead = {
14199 +                       .base = {
14200 +                               .cra_name = "authenc(hmac(sha512),cbc(aes))",
14201 +                               .cra_driver_name = "authenc-hmac-sha512-"
14202 +                                                  "cbc-aes-caam-qi",
14203 +                               .cra_blocksize = AES_BLOCK_SIZE,
14204 +                       },
14205 +                       .setkey = aead_setkey,
14206 +                       .setauthsize = aead_setauthsize,
14207 +                       .encrypt = aead_encrypt,
14208 +                       .decrypt = aead_decrypt,
14209 +                       .ivsize = AES_BLOCK_SIZE,
14210 +                       .maxauthsize = SHA512_DIGEST_SIZE,
14211 +               },
14212 +               .caam = {
14213 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14214 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14215 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14216 +               }
14217 +       },
14218 +       {
14219 +               .aead = {
14220 +                       .base = {
14221 +                               .cra_name = "echainiv(authenc(hmac(sha512),"
14222 +                                           "cbc(aes)))",
14223 +                               .cra_driver_name = "echainiv-authenc-"
14224 +                                                  "hmac-sha512-cbc-aes-"
14225 +                                                  "caam-qi",
14226 +                               .cra_blocksize = AES_BLOCK_SIZE,
14227 +                       },
14228 +                       .setkey = aead_setkey,
14229 +                       .setauthsize = aead_setauthsize,
14230 +                       .encrypt = aead_encrypt,
14231 +                       .decrypt = aead_decrypt,
14232 +                       .ivsize = AES_BLOCK_SIZE,
14233 +                       .maxauthsize = SHA512_DIGEST_SIZE,
14234 +               },
14235 +               .caam = {
14236 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14237 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14238 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14239 +                       .geniv = true,
14240 +               }
14241 +       },
14242 +       {
14243 +               .aead = {
14244 +                       .base = {
14245 +                               .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
14246 +                               .cra_driver_name = "authenc-hmac-md5-"
14247 +                                                  "cbc-des3_ede-caam-qi",
14248 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14249 +                       },
14250 +                       .setkey = aead_setkey,
14251 +                       .setauthsize = aead_setauthsize,
14252 +                       .encrypt = aead_encrypt,
14253 +                       .decrypt = aead_decrypt,
14254 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14255 +                       .maxauthsize = MD5_DIGEST_SIZE,
14256 +               },
14257 +               .caam = {
14258 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14259 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14260 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14261 +               }
14262 +       },
14263 +       {
14264 +               .aead = {
14265 +                       .base = {
14266 +                               .cra_name = "echainiv(authenc(hmac(md5),"
14267 +                                           "cbc(des3_ede)))",
14268 +                               .cra_driver_name = "echainiv-authenc-hmac-md5-"
14269 +                                                  "cbc-des3_ede-caam-qi",
14270 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14271 +                       },
14272 +                       .setkey = aead_setkey,
14273 +                       .setauthsize = aead_setauthsize,
14274 +                       .encrypt = aead_encrypt,
14275 +                       .decrypt = aead_decrypt,
14276 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14277 +                       .maxauthsize = MD5_DIGEST_SIZE,
14278 +               },
14279 +               .caam = {
14280 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14281 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14282 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14283 +                       .geniv = true,
14284 +               }
14285 +       },
14286 +       {
14287 +               .aead = {
14288 +                       .base = {
14289 +                               .cra_name = "authenc(hmac(sha1),"
14290 +                                           "cbc(des3_ede))",
14291 +                               .cra_driver_name = "authenc-hmac-sha1-"
14292 +                                                  "cbc-des3_ede-caam-qi",
14293 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14294 +                       },
14295 +                       .setkey = aead_setkey,
14296 +                       .setauthsize = aead_setauthsize,
14297 +                       .encrypt = aead_encrypt,
14298 +                       .decrypt = aead_decrypt,
14299 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14300 +                       .maxauthsize = SHA1_DIGEST_SIZE,
14301 +               },
14302 +               .caam = {
14303 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14304 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14305 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14306 +               },
14307 +       },
14308 +       {
14309 +               .aead = {
14310 +                       .base = {
14311 +                               .cra_name = "echainiv(authenc(hmac(sha1),"
14312 +                                           "cbc(des3_ede)))",
14313 +                               .cra_driver_name = "echainiv-authenc-"
14314 +                                                  "hmac-sha1-"
14315 +                                                  "cbc-des3_ede-caam-qi",
14316 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14317 +                       },
14318 +                       .setkey = aead_setkey,
14319 +                       .setauthsize = aead_setauthsize,
14320 +                       .encrypt = aead_encrypt,
14321 +                       .decrypt = aead_decrypt,
14322 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14323 +                       .maxauthsize = SHA1_DIGEST_SIZE,
14324 +               },
14325 +               .caam = {
14326 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14327 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14328 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14329 +                       .geniv = true,
14330 +               }
14331 +       },
14332 +       {
14333 +               .aead = {
14334 +                       .base = {
14335 +                               .cra_name = "authenc(hmac(sha224),"
14336 +                                           "cbc(des3_ede))",
14337 +                               .cra_driver_name = "authenc-hmac-sha224-"
14338 +                                                  "cbc-des3_ede-caam-qi",
14339 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14340 +                       },
14341 +                       .setkey = aead_setkey,
14342 +                       .setauthsize = aead_setauthsize,
14343 +                       .encrypt = aead_encrypt,
14344 +                       .decrypt = aead_decrypt,
14345 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14346 +                       .maxauthsize = SHA224_DIGEST_SIZE,
14347 +               },
14348 +               .caam = {
14349 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14350 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14351 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14352 +               },
14353 +       },
14354 +       {
14355 +               .aead = {
14356 +                       .base = {
14357 +                               .cra_name = "echainiv(authenc(hmac(sha224),"
14358 +                                           "cbc(des3_ede)))",
14359 +                               .cra_driver_name = "echainiv-authenc-"
14360 +                                                  "hmac-sha224-"
14361 +                                                  "cbc-des3_ede-caam-qi",
14362 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14363 +                       },
14364 +                       .setkey = aead_setkey,
14365 +                       .setauthsize = aead_setauthsize,
14366 +                       .encrypt = aead_encrypt,
14367 +                       .decrypt = aead_decrypt,
14368 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14369 +                       .maxauthsize = SHA224_DIGEST_SIZE,
14370 +               },
14371 +               .caam = {
14372 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14373 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14374 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14375 +                       .geniv = true,
14376 +               }
14377 +       },
14378 +       {
14379 +               .aead = {
14380 +                       .base = {
14381 +                               .cra_name = "authenc(hmac(sha256),"
14382 +                                           "cbc(des3_ede))",
14383 +                               .cra_driver_name = "authenc-hmac-sha256-"
14384 +                                                  "cbc-des3_ede-caam-qi",
14385 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14386 +                       },
14387 +                       .setkey = aead_setkey,
14388 +                       .setauthsize = aead_setauthsize,
14389 +                       .encrypt = aead_encrypt,
14390 +                       .decrypt = aead_decrypt,
14391 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14392 +                       .maxauthsize = SHA256_DIGEST_SIZE,
14393 +               },
14394 +               .caam = {
14395 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14396 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14397 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14398 +               },
14399 +       },
14400 +       {
14401 +               .aead = {
14402 +                       .base = {
14403 +                               .cra_name = "echainiv(authenc(hmac(sha256),"
14404 +                                           "cbc(des3_ede)))",
14405 +                               .cra_driver_name = "echainiv-authenc-"
14406 +                                                  "hmac-sha256-"
14407 +                                                  "cbc-des3_ede-caam-qi",
14408 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14409 +                       },
14410 +                       .setkey = aead_setkey,
14411 +                       .setauthsize = aead_setauthsize,
14412 +                       .encrypt = aead_encrypt,
14413 +                       .decrypt = aead_decrypt,
14414 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14415 +                       .maxauthsize = SHA256_DIGEST_SIZE,
14416 +               },
14417 +               .caam = {
14418 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14419 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14420 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14421 +                       .geniv = true,
14422 +               }
14423 +       },
14424 +       {
14425 +               .aead = {
14426 +                       .base = {
14427 +                               .cra_name = "authenc(hmac(sha384),"
14428 +                                           "cbc(des3_ede))",
14429 +                               .cra_driver_name = "authenc-hmac-sha384-"
14430 +                                                  "cbc-des3_ede-caam-qi",
14431 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14432 +                       },
14433 +                       .setkey = aead_setkey,
14434 +                       .setauthsize = aead_setauthsize,
14435 +                       .encrypt = aead_encrypt,
14436 +                       .decrypt = aead_decrypt,
14437 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14438 +                       .maxauthsize = SHA384_DIGEST_SIZE,
14439 +               },
14440 +               .caam = {
14441 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14442 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14443 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14444 +               },
14445 +       },
14446 +       {
14447 +               .aead = {
14448 +                       .base = {
14449 +                               .cra_name = "echainiv(authenc(hmac(sha384),"
14450 +                                           "cbc(des3_ede)))",
14451 +                               .cra_driver_name = "echainiv-authenc-"
14452 +                                                  "hmac-sha384-"
14453 +                                                  "cbc-des3_ede-caam-qi",
14454 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14455 +                       },
14456 +                       .setkey = aead_setkey,
14457 +                       .setauthsize = aead_setauthsize,
14458 +                       .encrypt = aead_encrypt,
14459 +                       .decrypt = aead_decrypt,
14460 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14461 +                       .maxauthsize = SHA384_DIGEST_SIZE,
14462 +               },
14463 +               .caam = {
14464 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14465 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14466 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14467 +                       .geniv = true,
14468 +               }
14469 +       },
14470 +       {
14471 +               .aead = {
14472 +                       .base = {
14473 +                               .cra_name = "authenc(hmac(sha512),"
14474 +                                           "cbc(des3_ede))",
14475 +                               .cra_driver_name = "authenc-hmac-sha512-"
14476 +                                                  "cbc-des3_ede-caam-qi",
14477 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14478 +                       },
14479 +                       .setkey = aead_setkey,
14480 +                       .setauthsize = aead_setauthsize,
14481 +                       .encrypt = aead_encrypt,
14482 +                       .decrypt = aead_decrypt,
14483 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14484 +                       .maxauthsize = SHA512_DIGEST_SIZE,
14485 +               },
14486 +               .caam = {
14487 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14488 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14489 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14490 +               },
14491 +       },
14492 +       {
14493 +               .aead = {
14494 +                       .base = {
14495 +                               .cra_name = "echainiv(authenc(hmac(sha512),"
14496 +                                           "cbc(des3_ede)))",
14497 +                               .cra_driver_name = "echainiv-authenc-"
14498 +                                                  "hmac-sha512-"
14499 +                                                  "cbc-des3_ede-caam-qi",
14500 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14501 +                       },
14502 +                       .setkey = aead_setkey,
14503 +                       .setauthsize = aead_setauthsize,
14504 +                       .encrypt = aead_encrypt,
14505 +                       .decrypt = aead_decrypt,
14506 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14507 +                       .maxauthsize = SHA512_DIGEST_SIZE,
14508 +               },
14509 +               .caam = {
14510 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14511 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14512 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14513 +                       .geniv = true,
14514 +               }
14515 +       },
14516 +       {
14517 +               .aead = {
14518 +                       .base = {
14519 +                               .cra_name = "authenc(hmac(md5),cbc(des))",
14520 +                               .cra_driver_name = "authenc-hmac-md5-"
14521 +                                                  "cbc-des-caam-qi",
14522 +                               .cra_blocksize = DES_BLOCK_SIZE,
14523 +                       },
14524 +                       .setkey = aead_setkey,
14525 +                       .setauthsize = aead_setauthsize,
14526 +                       .encrypt = aead_encrypt,
14527 +                       .decrypt = aead_decrypt,
14528 +                       .ivsize = DES_BLOCK_SIZE,
14529 +                       .maxauthsize = MD5_DIGEST_SIZE,
14530 +               },
14531 +               .caam = {
14532 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14533 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14534 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14535 +               },
14536 +       },
14537 +       {
14538 +               .aead = {
14539 +                       .base = {
14540 +                               .cra_name = "echainiv(authenc(hmac(md5),"
14541 +                                           "cbc(des)))",
14542 +                               .cra_driver_name = "echainiv-authenc-hmac-md5-"
14543 +                                                  "cbc-des-caam-qi",
14544 +                               .cra_blocksize = DES_BLOCK_SIZE,
14545 +                       },
14546 +                       .setkey = aead_setkey,
14547 +                       .setauthsize = aead_setauthsize,
14548 +                       .encrypt = aead_encrypt,
14549 +                       .decrypt = aead_decrypt,
14550 +                       .ivsize = DES_BLOCK_SIZE,
14551 +                       .maxauthsize = MD5_DIGEST_SIZE,
14552 +               },
14553 +               .caam = {
14554 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14555 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14556 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14557 +                       .geniv = true,
14558 +               }
14559 +       },
14560 +       {
14561 +               .aead = {
14562 +                       .base = {
14563 +                               .cra_name = "authenc(hmac(sha1),cbc(des))",
14564 +                               .cra_driver_name = "authenc-hmac-sha1-"
14565 +                                                  "cbc-des-caam-qi",
14566 +                               .cra_blocksize = DES_BLOCK_SIZE,
14567 +                       },
14568 +                       .setkey = aead_setkey,
14569 +                       .setauthsize = aead_setauthsize,
14570 +                       .encrypt = aead_encrypt,
14571 +                       .decrypt = aead_decrypt,
14572 +                       .ivsize = DES_BLOCK_SIZE,
14573 +                       .maxauthsize = SHA1_DIGEST_SIZE,
14574 +               },
14575 +               .caam = {
14576 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14577 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14578 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14579 +               },
14580 +       },
14581 +       {
14582 +               .aead = {
14583 +                       .base = {
14584 +                               .cra_name = "echainiv(authenc(hmac(sha1),"
14585 +                                           "cbc(des)))",
14586 +                               .cra_driver_name = "echainiv-authenc-"
14587 +                                                  "hmac-sha1-cbc-des-caam-qi",
14588 +                               .cra_blocksize = DES_BLOCK_SIZE,
14589 +                       },
14590 +                       .setkey = aead_setkey,
14591 +                       .setauthsize = aead_setauthsize,
14592 +                       .encrypt = aead_encrypt,
14593 +                       .decrypt = aead_decrypt,
14594 +                       .ivsize = DES_BLOCK_SIZE,
14595 +                       .maxauthsize = SHA1_DIGEST_SIZE,
14596 +               },
14597 +               .caam = {
14598 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14599 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14600 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14601 +                       .geniv = true,
14602 +               }
14603 +       },
14604 +       {
14605 +               .aead = {
14606 +                       .base = {
14607 +                               .cra_name = "authenc(hmac(sha224),cbc(des))",
14608 +                               .cra_driver_name = "authenc-hmac-sha224-"
14609 +                                                  "cbc-des-caam-qi",
14610 +                               .cra_blocksize = DES_BLOCK_SIZE,
14611 +                       },
14612 +                       .setkey = aead_setkey,
14613 +                       .setauthsize = aead_setauthsize,
14614 +                       .encrypt = aead_encrypt,
14615 +                       .decrypt = aead_decrypt,
14616 +                       .ivsize = DES_BLOCK_SIZE,
14617 +                       .maxauthsize = SHA224_DIGEST_SIZE,
14618 +               },
14619 +               .caam = {
14620 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14621 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14622 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14623 +               },
14624 +       },
14625 +       {
14626 +               .aead = {
14627 +                       .base = {
14628 +                               .cra_name = "echainiv(authenc(hmac(sha224),"
14629 +                                           "cbc(des)))",
14630 +                               .cra_driver_name = "echainiv-authenc-"
14631 +                                                  "hmac-sha224-cbc-des-"
14632 +                                                  "caam-qi",
14633 +                               .cra_blocksize = DES_BLOCK_SIZE,
14634 +                       },
14635 +                       .setkey = aead_setkey,
14636 +                       .setauthsize = aead_setauthsize,
14637 +                       .encrypt = aead_encrypt,
14638 +                       .decrypt = aead_decrypt,
14639 +                       .ivsize = DES_BLOCK_SIZE,
14640 +                       .maxauthsize = SHA224_DIGEST_SIZE,
14641 +               },
14642 +               .caam = {
14643 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14644 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14645 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14646 +                       .geniv = true,
14647 +               }
14648 +       },
14649 +       {
14650 +               .aead = {
14651 +                       .base = {
14652 +                               .cra_name = "authenc(hmac(sha256),cbc(des))",
14653 +                               .cra_driver_name = "authenc-hmac-sha256-"
14654 +                                                  "cbc-des-caam-qi",
14655 +                               .cra_blocksize = DES_BLOCK_SIZE,
14656 +                       },
14657 +                       .setkey = aead_setkey,
14658 +                       .setauthsize = aead_setauthsize,
14659 +                       .encrypt = aead_encrypt,
14660 +                       .decrypt = aead_decrypt,
14661 +                       .ivsize = DES_BLOCK_SIZE,
14662 +                       .maxauthsize = SHA256_DIGEST_SIZE,
14663 +               },
14664 +               .caam = {
14665 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14666 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14667 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14668 +               },
14669 +       },
14670 +       {
14671 +               .aead = {
14672 +                       .base = {
14673 +                               .cra_name = "echainiv(authenc(hmac(sha256),"
14674 +                                           "cbc(des)))",
14675 +                               .cra_driver_name = "echainiv-authenc-"
14676 +                                                  "hmac-sha256-cbc-des-"
14677 +                                                  "caam-qi",
14678 +                               .cra_blocksize = DES_BLOCK_SIZE,
14679 +                       },
14680 +                       .setkey = aead_setkey,
14681 +                       .setauthsize = aead_setauthsize,
14682 +                       .encrypt = aead_encrypt,
14683 +                       .decrypt = aead_decrypt,
14684 +                       .ivsize = DES_BLOCK_SIZE,
14685 +                       .maxauthsize = SHA256_DIGEST_SIZE,
14686 +               },
14687 +               .caam = {
14688 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14689 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14690 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14691 +                       .geniv = true,
14692 +               },
14693 +       },
14694 +       {
14695 +               .aead = {
14696 +                       .base = {
14697 +                               .cra_name = "authenc(hmac(sha384),cbc(des))",
14698 +                               .cra_driver_name = "authenc-hmac-sha384-"
14699 +                                                  "cbc-des-caam-qi",
14700 +                               .cra_blocksize = DES_BLOCK_SIZE,
14701 +                       },
14702 +                       .setkey = aead_setkey,
14703 +                       .setauthsize = aead_setauthsize,
14704 +                       .encrypt = aead_encrypt,
14705 +                       .decrypt = aead_decrypt,
14706 +                       .ivsize = DES_BLOCK_SIZE,
14707 +                       .maxauthsize = SHA384_DIGEST_SIZE,
14708 +               },
14709 +               .caam = {
14710 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14711 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14712 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14713 +               },
14714 +       },
14715 +       {
14716 +               .aead = {
14717 +                       .base = {
14718 +                               .cra_name = "echainiv(authenc(hmac(sha384),"
14719 +                                           "cbc(des)))",
14720 +                               .cra_driver_name = "echainiv-authenc-"
14721 +                                                  "hmac-sha384-cbc-des-"
14722 +                                                  "caam-qi",
14723 +                               .cra_blocksize = DES_BLOCK_SIZE,
14724 +                       },
14725 +                       .setkey = aead_setkey,
14726 +                       .setauthsize = aead_setauthsize,
14727 +                       .encrypt = aead_encrypt,
14728 +                       .decrypt = aead_decrypt,
14729 +                       .ivsize = DES_BLOCK_SIZE,
14730 +                       .maxauthsize = SHA384_DIGEST_SIZE,
14731 +               },
14732 +               .caam = {
14733 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14734 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14735 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14736 +                       .geniv = true,
14737 +               }
14738 +       },
14739 +       {
14740 +               .aead = {
14741 +                       .base = {
14742 +                               .cra_name = "authenc(hmac(sha512),cbc(des))",
14743 +                               .cra_driver_name = "authenc-hmac-sha512-"
14744 +                                                  "cbc-des-caam-qi",
14745 +                               .cra_blocksize = DES_BLOCK_SIZE,
14746 +                       },
14747 +                       .setkey = aead_setkey,
14748 +                       .setauthsize = aead_setauthsize,
14749 +                       .encrypt = aead_encrypt,
14750 +                       .decrypt = aead_decrypt,
14751 +                       .ivsize = DES_BLOCK_SIZE,
14752 +                       .maxauthsize = SHA512_DIGEST_SIZE,
14753 +               },
14754 +               .caam = {
14755 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14756 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14757 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14758 +               }
14759 +       },
14760 +       {
14761 +               .aead = {
14762 +                       .base = {
14763 +                               .cra_name = "echainiv(authenc(hmac(sha512),"
14764 +                                           "cbc(des)))",
14765 +                               .cra_driver_name = "echainiv-authenc-"
14766 +                                                  "hmac-sha512-cbc-des-"
14767 +                                                  "caam-qi",
14768 +                               .cra_blocksize = DES_BLOCK_SIZE,
14769 +                       },
14770 +                       .setkey = aead_setkey,
14771 +                       .setauthsize = aead_setauthsize,
14772 +                       .encrypt = aead_encrypt,
14773 +                       .decrypt = aead_decrypt,
14774 +                       .ivsize = DES_BLOCK_SIZE,
14775 +                       .maxauthsize = SHA512_DIGEST_SIZE,
14776 +               },
14777 +               .caam = {
14778 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14779 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14780 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14781 +                       .geniv = true,
14782 +               }
14783 +       },
14784 +       {
14785 +               .aead = {
14786 +                       .base = {
14787 +                               .cra_name = "tls10(hmac(sha1),cbc(aes))",
14788 +                               .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi",
14789 +                               .cra_blocksize = AES_BLOCK_SIZE,
14790 +                       },
14791 +                       .setkey = tls_setkey,
14792 +                       .setauthsize = tls_setauthsize,
14793 +                       .encrypt = tls_encrypt,
14794 +                       .decrypt = tls_decrypt,
14795 +                       .ivsize = AES_BLOCK_SIZE,
14796 +                       .maxauthsize = SHA1_DIGEST_SIZE,
14797 +               },
14798 +               .caam = {
14799 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14800 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14801 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14802 +               }
14803 +       }
14804 +};
14805 +
14806 +struct caam_crypto_alg {
14807 +       struct list_head entry;
14808 +       struct crypto_alg crypto_alg;
14809 +       struct caam_alg_entry caam;
14810 +};
14811 +
14812 +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
14813 +{
14814 +       struct caam_drv_private *priv;
14815 +       /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
14816 +       static const u8 digest_size[] = {
14817 +               MD5_DIGEST_SIZE,
14818 +               SHA1_DIGEST_SIZE,
14819 +               SHA224_DIGEST_SIZE,
14820 +               SHA256_DIGEST_SIZE,
14821 +               SHA384_DIGEST_SIZE,
14822 +               SHA512_DIGEST_SIZE
14823 +       };
14824 +       u8 op_id;
14825 +
14826 +       /*
14827 +        * distribute tfms across job rings to ensure in-order
14828 +        * crypto request processing per tfm
14829 +        */
14830 +       ctx->jrdev = caam_jr_alloc();
14831 +       if (IS_ERR(ctx->jrdev)) {
14832 +               pr_err("Job Ring Device allocation for transform failed\n");
14833 +               return PTR_ERR(ctx->jrdev);
14834 +       }
14835 +
14836 +       ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
14837 +                                     DMA_TO_DEVICE);
14838 +       if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
14839 +               dev_err(ctx->jrdev, "unable to map key\n");
14840 +               caam_jr_free(ctx->jrdev);
14841 +               return -ENOMEM;
14842 +       }
14843 +
14844 +       /* copy descriptor header template value */
14845 +       ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
14846 +       ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
14847 +
14848 +       if (ctx->adata.algtype) {
14849 +               op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK)
14850 +                               >> OP_ALG_ALGSEL_SHIFT;
14851 +               if (op_id < ARRAY_SIZE(digest_size)) {
14852 +                       ctx->authsize = digest_size[op_id];
14853 +               } else {
14854 +                       dev_err(ctx->jrdev,
14855 +                               "incorrect op_id %d; must be less than %zu\n",
14856 +                               op_id, ARRAY_SIZE(digest_size));
14857 +                       caam_jr_free(ctx->jrdev);
14858 +                       return -EINVAL;
14859 +               }
14860 +       } else {
14861 +               ctx->authsize = 0;
14862 +       }
14863 +
14864 +       priv = dev_get_drvdata(ctx->jrdev->parent);
14865 +       ctx->qidev = priv->qidev;
14866 +
14867 +       spin_lock_init(&ctx->lock);
14868 +       ctx->drv_ctx[ENCRYPT] = NULL;
14869 +       ctx->drv_ctx[DECRYPT] = NULL;
14870 +       ctx->drv_ctx[GIVENCRYPT] = NULL;
14871 +
14872 +       return 0;
14873 +}
14874 +
14875 +static int caam_cra_init(struct crypto_tfm *tfm)
14876 +{
14877 +       struct crypto_alg *alg = tfm->__crt_alg;
14878 +       struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
14879 +                                                       crypto_alg);
14880 +       struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
14881 +
14882 +       return caam_init_common(ctx, &caam_alg->caam);
14883 +}
14884 +
14885 +static int caam_aead_init(struct crypto_aead *tfm)
14886 +{
14887 +       struct aead_alg *alg = crypto_aead_alg(tfm);
14888 +       struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
14889 +                                                     aead);
14890 +       struct caam_ctx *ctx = crypto_aead_ctx(tfm);
14891 +
14892 +       return caam_init_common(ctx, &caam_alg->caam);
14893 +}
14894 +
14895 +static void caam_exit_common(struct caam_ctx *ctx)
14896 +{
14897 +       caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
14898 +       caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
14899 +       caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
14900 +
14901 +       dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
14902 +                        DMA_TO_DEVICE);
14903 +
14904 +       caam_jr_free(ctx->jrdev);
14905 +}
14906 +
14907 +static void caam_cra_exit(struct crypto_tfm *tfm)
14908 +{
14909 +       caam_exit_common(crypto_tfm_ctx(tfm));
14910 +}
14911 +
14912 +static void caam_aead_exit(struct crypto_aead *tfm)
14913 +{
14914 +       caam_exit_common(crypto_aead_ctx(tfm));
14915 +}
14916 +
14917 +static struct list_head alg_list;
14918 +static void __exit caam_qi_algapi_exit(void)
14919 +{
14920 +       struct caam_crypto_alg *t_alg, *n;
14921 +       int i;
14922 +
14923 +       for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
14924 +               struct caam_aead_alg *t_alg = driver_aeads + i;
14925 +
14926 +               if (t_alg->registered)
14927 +                       crypto_unregister_aead(&t_alg->aead);
14928 +       }
14929 +
14930 +       if (!alg_list.next)
14931 +               return;
14932 +
14933 +       list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
14934 +               crypto_unregister_alg(&t_alg->crypto_alg);
14935 +               list_del(&t_alg->entry);
14936 +               kfree(t_alg);
14937 +       }
14938 +}
14939 +
14940 +static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
14941 +                                             *template)
14942 +{
14943 +       struct caam_crypto_alg *t_alg;
14944 +       struct crypto_alg *alg;
14945 +
14946 +       t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
14947 +       if (!t_alg)
14948 +               return ERR_PTR(-ENOMEM);
14949 +
14950 +       alg = &t_alg->crypto_alg;
14951 +
14952 +       snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
14953 +       snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
14954 +                template->driver_name);
14955 +       alg->cra_module = THIS_MODULE;
14956 +       alg->cra_init = caam_cra_init;
14957 +       alg->cra_exit = caam_cra_exit;
14958 +       alg->cra_priority = CAAM_CRA_PRIORITY;
14959 +       alg->cra_blocksize = template->blocksize;
14960 +       alg->cra_alignmask = 0;
14961 +       alg->cra_ctxsize = sizeof(struct caam_ctx);
14962 +       alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
14963 +                        template->type;
14964 +       switch (template->type) {
14965 +       case CRYPTO_ALG_TYPE_GIVCIPHER:
14966 +               alg->cra_type = &crypto_givcipher_type;
14967 +               alg->cra_ablkcipher = template->template_ablkcipher;
14968 +               break;
14969 +       case CRYPTO_ALG_TYPE_ABLKCIPHER:
14970 +               alg->cra_type = &crypto_ablkcipher_type;
14971 +               alg->cra_ablkcipher = template->template_ablkcipher;
14972 +               break;
14973 +       }
14974 +
14975 +       t_alg->caam.class1_alg_type = template->class1_alg_type;
14976 +       t_alg->caam.class2_alg_type = template->class2_alg_type;
14977 +
14978 +       return t_alg;
14979 +}
14980 +
14981 +static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
14982 +{
14983 +       struct aead_alg *alg = &t_alg->aead;
14984 +
14985 +       alg->base.cra_module = THIS_MODULE;
14986 +       alg->base.cra_priority = CAAM_CRA_PRIORITY;
14987 +       alg->base.cra_ctxsize = sizeof(struct caam_ctx);
14988 +       alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
14989 +
14990 +       alg->init = caam_aead_init;
14991 +       alg->exit = caam_aead_exit;
14992 +}
14993 +
14994 +static int __init caam_qi_algapi_init(void)
14995 +{
14996 +       struct device_node *dev_node;
14997 +       struct platform_device *pdev;
14998 +       struct device *ctrldev;
14999 +       struct caam_drv_private *priv;
15000 +       int i = 0, err = 0;
15001 +       u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
15002 +       unsigned int md_limit = SHA512_DIGEST_SIZE;
15003 +       bool registered = false;
15004 +
15005 +       dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
15006 +       if (!dev_node) {
15007 +               dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
15008 +               if (!dev_node)
15009 +                       return -ENODEV;
15010 +       }
15011 +
15012 +       pdev = of_find_device_by_node(dev_node);
15013 +       of_node_put(dev_node);
15014 +       if (!pdev)
15015 +               return -ENODEV;
15016 +
15017 +       ctrldev = &pdev->dev;
15018 +       priv = dev_get_drvdata(ctrldev);
15019 +
15020 +       /*
15021 +        * If priv is NULL, it's probably because the caam driver wasn't
15022 +        * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
15023 +        */
15024 +       if (!priv || !priv->qi_present)
15025 +               return -ENODEV;
15026 +
15027 +       if (caam_dpaa2) {
15028 +               dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
15029 +               return -ENODEV;
15030 +       }
15031 +
15032 +       INIT_LIST_HEAD(&alg_list);
15033 +
15034 +       /*
15035 +        * Register crypto algorithms the device supports.
15036 +        * First, detect presence and attributes of DES, AES, and MD blocks.
15037 +        */
15038 +       cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
15039 +       cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
15040 +       des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
15041 +       aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
15042 +       md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
15043 +
15044 +       /* If MD is present, limit digest size based on LP256 */
15045 +       if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
15046 +               md_limit = SHA256_DIGEST_SIZE;
15047 +
15048 +       for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
15049 +               struct caam_crypto_alg *t_alg;
15050 +               struct caam_alg_template *alg = driver_algs + i;
15051 +               u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
15052 +
15053 +               /* Skip DES algorithms if not supported by device */
15054 +               if (!des_inst &&
15055 +                   ((alg_sel == OP_ALG_ALGSEL_3DES) ||
15056 +                    (alg_sel == OP_ALG_ALGSEL_DES)))
15057 +                       continue;
15058 +
15059 +               /* Skip AES algorithms if not supported by device */
15060 +               if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
15061 +                       continue;
15062 +
15063 +               t_alg = caam_alg_alloc(alg);
15064 +               if (IS_ERR(t_alg)) {
15065 +                       err = PTR_ERR(t_alg);
15066 +                       dev_warn(priv->qidev, "%s alg allocation failed\n",
15067 +                                alg->driver_name);
15068 +                       continue;
15069 +               }
15070 +
15071 +               err = crypto_register_alg(&t_alg->crypto_alg);
15072 +               if (err) {
15073 +                       dev_warn(priv->qidev, "%s alg registration failed\n",
15074 +                                t_alg->crypto_alg.cra_driver_name);
15075 +                       kfree(t_alg);
15076 +                       continue;
15077 +               }
15078 +
15079 +               list_add_tail(&t_alg->entry, &alg_list);
15080 +               registered = true;
15081 +       }
15082 +
15083 +       for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
15084 +               struct caam_aead_alg *t_alg = driver_aeads + i;
15085 +               u32 c1_alg_sel = t_alg->caam.class1_alg_type &
15086 +                                OP_ALG_ALGSEL_MASK;
15087 +               u32 c2_alg_sel = t_alg->caam.class2_alg_type &
15088 +                                OP_ALG_ALGSEL_MASK;
15089 +               u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
15090 +
15091 +               /* Skip DES algorithms if not supported by device */
15092 +               if (!des_inst &&
15093 +                   ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
15094 +                    (c1_alg_sel == OP_ALG_ALGSEL_DES)))
15095 +                       continue;
15096 +
15097 +               /* Skip AES algorithms if not supported by device */
15098 +               if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
15099 +                       continue;
15100 +
15101 +               /*
15102 +                * Check support for AES algorithms not available
15103 +                * on LP devices.
15104 +                */
15105 +               if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
15106 +                   (alg_aai == OP_ALG_AAI_GCM))
15107 +                       continue;
15108 +
15109 +               /*
15110 +                * Skip algorithms requiring message digests
15111 +                * if MD or MD size is not supported by device.
15112 +                */
15113 +               if (c2_alg_sel &&
15114 +                   (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
15115 +                       continue;
15116 +
15117 +               caam_aead_alg_init(t_alg);
15118 +
15119 +               err = crypto_register_aead(&t_alg->aead);
15120 +               if (err) {
15121 +                       pr_warn("%s alg registration failed\n",
15122 +                               t_alg->aead.base.cra_driver_name);
15123 +                       continue;
15124 +               }
15125 +
15126 +               t_alg->registered = true;
15127 +               registered = true;
15128 +       }
15129 +
15130 +       if (registered)
15131 +               dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
15132 +
15133 +       return err;
15134 +}
15135 +
15136 +module_init(caam_qi_algapi_init);
15137 +module_exit(caam_qi_algapi_exit);
15138 +
15139 +MODULE_LICENSE("GPL");
15140 +MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
15141 +MODULE_AUTHOR("Freescale Semiconductor");
15142 --- /dev/null
15143 +++ b/drivers/crypto/caam/caamalg_qi2.c
15144 @@ -0,0 +1,4428 @@
15145 +/*
15146 + * Copyright 2015-2016 Freescale Semiconductor Inc.
15147 + * Copyright 2017 NXP
15148 + *
15149 + * Redistribution and use in source and binary forms, with or without
15150 + * modification, are permitted provided that the following conditions are met:
15151 + *     * Redistributions of source code must retain the above copyright
15152 + *      notice, this list of conditions and the following disclaimer.
15153 + *     * Redistributions in binary form must reproduce the above copyright
15154 + *      notice, this list of conditions and the following disclaimer in the
15155 + *      documentation and/or other materials provided with the distribution.
15156 + *     * Neither the names of the above-listed copyright holders nor the
15157 + *      names of any contributors may be used to endorse or promote products
15158 + *      derived from this software without specific prior written permission.
15159 + *
15160 + *
15161 + * ALTERNATIVELY, this software may be distributed under the terms of the
15162 + * GNU General Public License ("GPL") as published by the Free Software
15163 + * Foundation, either version 2 of that License or (at your option) any
15164 + * later version.
15165 + *
15166 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15167 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15168 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
15169 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
15170 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
15171 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
15172 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
15173 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
15174 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
15175 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
15176 + * POSSIBILITY OF SUCH DAMAGE.
15177 + */
15178 +
15179 +#include "compat.h"
15180 +#include "regs.h"
15181 +#include "caamalg_qi2.h"
15182 +#include "dpseci_cmd.h"
15183 +#include "desc_constr.h"
15184 +#include "error.h"
15185 +#include "sg_sw_sec4.h"
15186 +#include "sg_sw_qm2.h"
15187 +#include "key_gen.h"
15188 +#include "caamalg_desc.h"
15189 +#include "../../../drivers/staging/fsl-mc/include/mc.h"
15190 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
15191 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
15192 +
15193 +#define CAAM_CRA_PRIORITY      2000
15194 +
15195 +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
15196 +#define CAAM_MAX_KEY_SIZE      (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
15197 +                                SHA512_DIGEST_SIZE * 2)
15198 +
15199 +#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM
15200 +bool caam_little_end;
15201 +EXPORT_SYMBOL(caam_little_end);
15202 +bool caam_imx;
15203 +EXPORT_SYMBOL(caam_imx);
15204 +#endif
15205 +
15206 +/*
15207 + * This is a a cache of buffers, from which the users of CAAM QI driver
15208 + * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
15209 + * NOTE: A more elegant solution would be to have some headroom in the frames
15210 + *       being processed. This can be added by the dpaa2-eth driver. This would
15211 + *       pose a problem for userspace application processing which cannot
15212 + *       know of this limitation. So for now, this will work.
15213 + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
15214 + */
15215 +static struct kmem_cache *qi_cache;
15216 +
15217 +struct caam_alg_entry {
15218 +       struct device *dev;
15219 +       int class1_alg_type;
15220 +       int class2_alg_type;
15221 +       bool rfc3686;
15222 +       bool geniv;
15223 +};
15224 +
15225 +struct caam_aead_alg {
15226 +       struct aead_alg aead;
15227 +       struct caam_alg_entry caam;
15228 +       bool registered;
15229 +};
15230 +
15231 +/**
15232 + * caam_ctx - per-session context
15233 + * @flc: Flow Contexts array
15234 + * @key:  virtual address of the key(s): [authentication key], encryption key
15235 + * @key_dma: I/O virtual address of the key
15236 + * @dev: dpseci device
15237 + * @adata: authentication algorithm details
15238 + * @cdata: encryption algorithm details
15239 + * @authsize: authentication tag (a.k.a. ICV / MAC) size
15240 + */
15241 +struct caam_ctx {
15242 +       struct caam_flc flc[NUM_OP];
15243 +       u8 key[CAAM_MAX_KEY_SIZE];
15244 +       dma_addr_t key_dma;
15245 +       struct device *dev;
15246 +       struct alginfo adata;
15247 +       struct alginfo cdata;
15248 +       unsigned int authsize;
15249 +};
15250 +
15251 +void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
15252 +                             dma_addr_t iova_addr)
15253 +{
15254 +       phys_addr_t phys_addr;
15255 +
15256 +       phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
15257 +                                  iova_addr;
15258 +
15259 +       return phys_to_virt(phys_addr);
15260 +}
15261 +
15262 +/*
15263 + * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
15264 + *
15265 + * Allocate data on the hotpath. Instead of using kzalloc, one can use the
15266 + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
15267 + * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
15268 + * hosting 16 SG entries.
15269 + *
15270 + * @flags - flags that would be used for the equivalent kmalloc(..) call
15271 + *
15272 + * Returns a pointer to a retrieved buffer on success or NULL on failure.
15273 + */
15274 +static inline void *qi_cache_zalloc(gfp_t flags)
15275 +{
15276 +       return kmem_cache_zalloc(qi_cache, flags);
15277 +}
15278 +
15279 +/*
15280 + * qi_cache_free - Frees buffers allocated from CAAM-QI cache
15281 + *
15282 + * @obj - buffer previously allocated by qi_cache_zalloc
15283 + *
15284 + * No checking is being done, the call is a passthrough call to
15285 + * kmem_cache_free(...)
15286 + */
15287 +static inline void qi_cache_free(void *obj)
15288 +{
15289 +       kmem_cache_free(qi_cache, obj);
15290 +}
15291 +
15292 +static struct caam_request *to_caam_req(struct crypto_async_request *areq)
15293 +{
15294 +       switch (crypto_tfm_alg_type(areq->tfm)) {
15295 +       case CRYPTO_ALG_TYPE_ABLKCIPHER:
15296 +       case CRYPTO_ALG_TYPE_GIVCIPHER:
15297 +               return ablkcipher_request_ctx(ablkcipher_request_cast(areq));
15298 +       case CRYPTO_ALG_TYPE_AEAD:
15299 +               return aead_request_ctx(container_of(areq, struct aead_request,
15300 +                                                    base));
15301 +       default:
15302 +               return ERR_PTR(-EINVAL);
15303 +       }
15304 +}
15305 +
15306 +static void caam_unmap(struct device *dev, struct scatterlist *src,
15307 +                      struct scatterlist *dst, int src_nents,
15308 +                      int dst_nents, dma_addr_t iv_dma, int ivsize,
15309 +                      enum optype op_type, dma_addr_t qm_sg_dma,
15310 +                      int qm_sg_bytes)
15311 +{
15312 +       if (dst != src) {
15313 +               if (src_nents)
15314 +                       dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
15315 +               dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
15316 +       } else {
15317 +               dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
15318 +       }
15319 +
15320 +       if (iv_dma)
15321 +               dma_unmap_single(dev, iv_dma, ivsize,
15322 +                                op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
15323 +                                                        DMA_TO_DEVICE);
15324 +
15325 +       if (qm_sg_bytes)
15326 +               dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
15327 +}
15328 +
15329 +static int aead_set_sh_desc(struct crypto_aead *aead)
15330 +{
15331 +       struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
15332 +                                                typeof(*alg), aead);
15333 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
15334 +       unsigned int ivsize = crypto_aead_ivsize(aead);
15335 +       struct device *dev = ctx->dev;
15336 +       struct caam_flc *flc;
15337 +       u32 *desc;
15338 +       u32 ctx1_iv_off = 0;
15339 +       u32 *nonce = NULL;
15340 +       unsigned int data_len[2];
15341 +       u32 inl_mask;
15342 +       const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
15343 +                              OP_ALG_AAI_CTR_MOD128);
15344 +       const bool is_rfc3686 = alg->caam.rfc3686;
15345 +
15346 +       if (!ctx->cdata.keylen || !ctx->authsize)
15347 +               return 0;
15348 +
15349 +       /*
15350 +        * AES-CTR needs to load IV in CONTEXT1 reg
15351 +        * at an offset of 128bits (16bytes)
15352 +        * CONTEXT1[255:128] = IV
15353 +        */
15354 +       if (ctr_mode)
15355 +               ctx1_iv_off = 16;
15356 +
15357 +       /*
15358 +        * RFC3686 specific:
15359 +        *      CONTEXT1[255:128] = {NONCE, IV, COUNTER}
15360 +        */
15361 +       if (is_rfc3686) {
15362 +               ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
15363 +               nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
15364 +                               ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
15365 +       }
15366 +
15367 +       data_len[0] = ctx->adata.keylen_pad;
15368 +       data_len[1] = ctx->cdata.keylen;
15369 +
15370 +       /* aead_encrypt shared descriptor */
15371 +       if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
15372 +                                                DESC_QI_AEAD_ENC_LEN) +
15373 +                             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
15374 +                             DESC_JOB_IO_LEN, data_len, &inl_mask,
15375 +                             ARRAY_SIZE(data_len)) < 0)
15376 +               return -EINVAL;
15377 +
15378 +       if (inl_mask & 1)
15379 +               ctx->adata.key_virt = ctx->key;
15380 +       else
15381 +               ctx->adata.key_dma = ctx->key_dma;
15382 +
15383 +       if (inl_mask & 2)
15384 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
15385 +       else
15386 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
15387 +
15388 +       ctx->adata.key_inline = !!(inl_mask & 1);
15389 +       ctx->cdata.key_inline = !!(inl_mask & 2);
15390 +
15391 +       flc = &ctx->flc[ENCRYPT];
15392 +       desc = flc->sh_desc;
15393 +
15394 +       if (alg->caam.geniv)
15395 +               cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
15396 +                                         ivsize, ctx->authsize, is_rfc3686,
15397 +                                         nonce, ctx1_iv_off, true);
15398 +       else
15399 +               cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
15400 +                                      ivsize, ctx->authsize, is_rfc3686, nonce,
15401 +                                      ctx1_iv_off, true);
15402 +
15403 +       flc->flc[1] = desc_len(desc); /* SDL */
15404 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
15405 +                                     desc_bytes(desc), DMA_TO_DEVICE);
15406 +       if (dma_mapping_error(dev, flc->flc_dma)) {
15407 +               dev_err(dev, "unable to map shared descriptor\n");
15408 +               return -ENOMEM;
15409 +       }
15410 +
15411 +       /* aead_decrypt shared descriptor */
15412 +       if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
15413 +                             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
15414 +                             DESC_JOB_IO_LEN, data_len, &inl_mask,
15415 +                             ARRAY_SIZE(data_len)) < 0)
15416 +               return -EINVAL;
15417 +
15418 +       if (inl_mask & 1)
15419 +               ctx->adata.key_virt = ctx->key;
15420 +       else
15421 +               ctx->adata.key_dma = ctx->key_dma;
15422 +
15423 +       if (inl_mask & 2)
15424 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
15425 +       else
15426 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
15427 +
15428 +       ctx->adata.key_inline = !!(inl_mask & 1);
15429 +       ctx->cdata.key_inline = !!(inl_mask & 2);
15430 +
15431 +       flc = &ctx->flc[DECRYPT];
15432 +       desc = flc->sh_desc;
15433 +
15434 +       cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
15435 +                              ivsize, ctx->authsize, alg->caam.geniv,
15436 +                              is_rfc3686, nonce, ctx1_iv_off, true);
15437 +
15438 +       flc->flc[1] = desc_len(desc); /* SDL */
15439 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
15440 +                                     desc_bytes(desc), DMA_TO_DEVICE);
15441 +       if (dma_mapping_error(dev, flc->flc_dma)) {
15442 +               dev_err(dev, "unable to map shared descriptor\n");
15443 +               return -ENOMEM;
15444 +       }
15445 +
15446 +       return 0;
15447 +}
15448 +
15449 +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
15450 +{
15451 +       struct caam_ctx *ctx = crypto_aead_ctx(authenc);
15452 +
15453 +       ctx->authsize = authsize;
15454 +       aead_set_sh_desc(authenc);
15455 +
15456 +       return 0;
15457 +}
15458 +
15459 +struct split_key_sh_result {
15460 +       struct completion completion;
15461 +       int err;
15462 +       struct device *dev;
15463 +};
15464 +
15465 +static void split_key_sh_done(void *cbk_ctx, u32 err)
15466 +{
15467 +       struct split_key_sh_result *res = cbk_ctx;
15468 +
15469 +#ifdef DEBUG
15470 +       dev_err(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
15471 +#endif
15472 +
15473 +       if (err)
15474 +               caam_qi2_strstatus(res->dev, err);
15475 +
15476 +       res->err = err;
15477 +       complete(&res->completion);
15478 +}
15479 +
15480 +static int gen_split_key_sh(struct device *dev, u8 *key_out,
15481 +                           struct alginfo * const adata, const u8 *key_in,
15482 +                           u32 keylen)
15483 +{
15484 +       struct caam_request *req_ctx;
15485 +       u32 *desc;
15486 +       struct split_key_sh_result result;
15487 +       dma_addr_t dma_addr_in, dma_addr_out;
15488 +       struct caam_flc *flc;
15489 +       struct dpaa2_fl_entry *in_fle, *out_fle;
15490 +       int ret = -ENOMEM;
15491 +
15492 +       req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
15493 +       if (!req_ctx)
15494 +               return -ENOMEM;
15495 +
15496 +       in_fle = &req_ctx->fd_flt[1];
15497 +       out_fle = &req_ctx->fd_flt[0];
15498 +
15499 +       flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
15500 +       if (!flc)
15501 +               goto err_flc;
15502 +
15503 +       dma_addr_in = dma_map_single(dev, (void *)key_in, keylen,
15504 +                                    DMA_TO_DEVICE);
15505 +       if (dma_mapping_error(dev, dma_addr_in)) {
15506 +               dev_err(dev, "unable to map key input memory\n");
15507 +               goto err_dma_addr_in;
15508 +       }
15509 +
15510 +       dma_addr_out = dma_map_single(dev, key_out, adata->keylen_pad,
15511 +                                     DMA_FROM_DEVICE);
15512 +       if (dma_mapping_error(dev, dma_addr_out)) {
15513 +               dev_err(dev, "unable to map key output memory\n");
15514 +               goto err_dma_addr_out;
15515 +       }
15516 +
15517 +       desc = flc->sh_desc;
15518 +
15519 +       init_sh_desc(desc, 0);
15520 +       append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
15521 +
15522 +       /* Sets MDHA up into an HMAC-INIT */
15523 +       append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
15524 +                        OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
15525 +                        OP_ALG_AS_INIT);
15526 +
15527 +       /*
15528 +        * do a FIFO_LOAD of zero, this will trigger the internal key expansion
15529 +        * into both pads inside MDHA
15530 +        */
15531 +       append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
15532 +                               FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
15533 +
15534 +       /*
15535 +        * FIFO_STORE with the explicit split-key content store
15536 +        * (0x26 output type)
15537 +        */
15538 +       append_fifo_store(desc, dma_addr_out, adata->keylen,
15539 +                         LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
15540 +
15541 +       flc->flc[1] = desc_len(desc); /* SDL */
15542 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
15543 +                                     desc_bytes(desc), DMA_TO_DEVICE);
15544 +       if (dma_mapping_error(dev, flc->flc_dma)) {
15545 +               dev_err(dev, "unable to map shared descriptor\n");
15546 +               goto err_flc_dma;
15547 +       }
15548 +
15549 +       dpaa2_fl_set_final(in_fle, true);
15550 +       dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
15551 +       dpaa2_fl_set_addr(in_fle, dma_addr_in);
15552 +       dpaa2_fl_set_len(in_fle, keylen);
15553 +       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
15554 +       dpaa2_fl_set_addr(out_fle, dma_addr_out);
15555 +       dpaa2_fl_set_len(out_fle, adata->keylen_pad);
15556 +
15557 +#ifdef DEBUG
15558 +       print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
15559 +                      DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
15560 +       print_hex_dump(KERN_ERR, "desc@" __stringify(__LINE__)": ",
15561 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
15562 +#endif
15563 +
15564 +       result.err = 0;
15565 +       init_completion(&result.completion);
15566 +       result.dev = dev;
15567 +
15568 +       req_ctx->flc = flc;
15569 +       req_ctx->cbk = split_key_sh_done;
15570 +       req_ctx->ctx = &result;
15571 +
15572 +       ret = dpaa2_caam_enqueue(dev, req_ctx);
15573 +       if (ret == -EINPROGRESS) {
15574 +               /* in progress */
15575 +               wait_for_completion(&result.completion);
15576 +               ret = result.err;
15577 +#ifdef DEBUG
15578 +               print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
15579 +                              DUMP_PREFIX_ADDRESS, 16, 4, key_out,
15580 +                              adata->keylen_pad, 1);
15581 +#endif
15582 +       }
15583 +
15584 +       dma_unmap_single(dev, flc->flc_dma, sizeof(flc->flc) + desc_bytes(desc),
15585 +                        DMA_TO_DEVICE);
15586 +err_flc_dma:
15587 +       dma_unmap_single(dev, dma_addr_out, adata->keylen_pad, DMA_FROM_DEVICE);
15588 +err_dma_addr_out:
15589 +       dma_unmap_single(dev, dma_addr_in, keylen, DMA_TO_DEVICE);
15590 +err_dma_addr_in:
15591 +       kfree(flc);
15592 +err_flc:
15593 +       kfree(req_ctx);
15594 +       return ret;
15595 +}
15596 +
15597 +static int gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
15598 +                             u32 authkeylen)
15599 +{
15600 +       return gen_split_key_sh(ctx->dev, ctx->key, &ctx->adata, key_in,
15601 +                               authkeylen);
15602 +}
15603 +
15604 +static int aead_setkey(struct crypto_aead *aead, const u8 *key,
15605 +                      unsigned int keylen)
15606 +{
15607 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
15608 +       struct device *dev = ctx->dev;
15609 +       struct crypto_authenc_keys keys;
15610 +       int ret;
15611 +
15612 +       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
15613 +               goto badkey;
15614 +
15615 +#ifdef DEBUG
15616 +       dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
15617 +               keys.authkeylen + keys.enckeylen, keys.enckeylen,
15618 +               keys.authkeylen);
15619 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
15620 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
15621 +#endif
15622 +
15623 +       ctx->adata.keylen = split_key_len(ctx->adata.algtype &
15624 +                                         OP_ALG_ALGSEL_MASK);
15625 +       ctx->adata.keylen_pad = split_key_pad_len(ctx->adata.algtype &
15626 +                                                 OP_ALG_ALGSEL_MASK);
15627 +
15628 +#ifdef DEBUG
15629 +       dev_err(dev, "split keylen %d split keylen padded %d\n",
15630 +               ctx->adata.keylen, ctx->adata.keylen_pad);
15631 +       print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
15632 +                      DUMP_PREFIX_ADDRESS, 16, 4, keys.authkey, keylen, 1);
15633 +#endif
15634 +
15635 +       if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
15636 +               goto badkey;
15637 +
15638 +       ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
15639 +       if (ret)
15640 +               goto badkey;
15641 +
15642 +       /* postpend encryption key to auth split key */
15643 +       memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
15644 +
15645 +       ctx->key_dma = dma_map_single(dev, ctx->key, ctx->adata.keylen_pad +
15646 +                                     keys.enckeylen, DMA_TO_DEVICE);
15647 +       if (dma_mapping_error(dev, ctx->key_dma)) {
15648 +               dev_err(dev, "unable to map key i/o memory\n");
15649 +               return -ENOMEM;
15650 +       }
15651 +#ifdef DEBUG
15652 +       print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
15653 +                      DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
15654 +                      ctx->adata.keylen_pad + keys.enckeylen, 1);
15655 +#endif
15656 +
15657 +       ctx->cdata.keylen = keys.enckeylen;
15658 +
15659 +       ret = aead_set_sh_desc(aead);
15660 +       if (ret)
15661 +               dma_unmap_single(dev, ctx->key_dma, ctx->adata.keylen_pad +
15662 +                                keys.enckeylen, DMA_TO_DEVICE);
15663 +
15664 +       return ret;
15665 +badkey:
15666 +       crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
15667 +       return -EINVAL;
15668 +}
15669 +
15670 +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
15671 +                                          bool encrypt)
15672 +{
15673 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
15674 +       struct caam_request *req_ctx = aead_request_ctx(req);
15675 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
15676 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
15677 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
15678 +       struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
15679 +                                                typeof(*alg), aead);
15680 +       struct device *dev = ctx->dev;
15681 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
15682 +                     GFP_KERNEL : GFP_ATOMIC;
15683 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
15684 +       struct aead_edesc *edesc;
15685 +       dma_addr_t qm_sg_dma, iv_dma = 0;
15686 +       int ivsize = 0;
15687 +       unsigned int authsize = ctx->authsize;
15688 +       int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
15689 +       int in_len, out_len;
15690 +       struct dpaa2_sg_entry *sg_table;
15691 +       enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
15692 +
15693 +       /* allocate space for base edesc and link tables */
15694 +       edesc = qi_cache_zalloc(GFP_DMA | flags);
15695 +       if (unlikely(!edesc)) {
15696 +               dev_err(dev, "could not allocate extended descriptor\n");
15697 +               return ERR_PTR(-ENOMEM);
15698 +       }
15699 +
15700 +       if (unlikely(req->dst != req->src)) {
15701 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
15702 +                                            req->cryptlen);
15703 +               if (unlikely(src_nents < 0)) {
15704 +                       dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15705 +                               req->assoclen + req->cryptlen);
15706 +                       qi_cache_free(edesc);
15707 +                       return ERR_PTR(src_nents);
15708 +               }
15709 +
15710 +               dst_nents = sg_nents_for_len(req->dst, req->assoclen +
15711 +                                            req->cryptlen +
15712 +                                            (encrypt ? authsize :
15713 +                                                       (-authsize)));
15714 +               if (unlikely(dst_nents < 0)) {
15715 +                       dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
15716 +                               req->assoclen + req->cryptlen +
15717 +                               (encrypt ? authsize : (-authsize)));
15718 +                       qi_cache_free(edesc);
15719 +                       return ERR_PTR(dst_nents);
15720 +               }
15721 +
15722 +               if (src_nents) {
15723 +                       mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
15724 +                                                     DMA_TO_DEVICE);
15725 +                       if (unlikely(!mapped_src_nents)) {
15726 +                               dev_err(dev, "unable to map source\n");
15727 +                               qi_cache_free(edesc);
15728 +                               return ERR_PTR(-ENOMEM);
15729 +                       }
15730 +               } else {
15731 +                       mapped_src_nents = 0;
15732 +               }
15733 +
15734 +               mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
15735 +                                             DMA_FROM_DEVICE);
15736 +               if (unlikely(!mapped_dst_nents)) {
15737 +                       dev_err(dev, "unable to map destination\n");
15738 +                       dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
15739 +                       qi_cache_free(edesc);
15740 +                       return ERR_PTR(-ENOMEM);
15741 +               }
15742 +       } else {
15743 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
15744 +                                            req->cryptlen +
15745 +                                               (encrypt ? authsize : 0));
15746 +               if (unlikely(src_nents < 0)) {
15747 +                       dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15748 +                               req->assoclen + req->cryptlen +
15749 +                               (encrypt ? authsize : 0));
15750 +                       qi_cache_free(edesc);
15751 +                       return ERR_PTR(src_nents);
15752 +               }
15753 +
15754 +               mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
15755 +                                             DMA_BIDIRECTIONAL);
15756 +               if (unlikely(!mapped_src_nents)) {
15757 +                       dev_err(dev, "unable to map source\n");
15758 +                       qi_cache_free(edesc);
15759 +                       return ERR_PTR(-ENOMEM);
15760 +               }
15761 +       }
15762 +
15763 +       if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
15764 +               ivsize = crypto_aead_ivsize(aead);
15765 +               iv_dma = dma_map_single(dev, req->iv, ivsize, DMA_TO_DEVICE);
15766 +               if (dma_mapping_error(dev, iv_dma)) {
15767 +                       dev_err(dev, "unable to map IV\n");
15768 +                       caam_unmap(dev, req->src, req->dst, src_nents,
15769 +                                  dst_nents, 0, 0, op_type, 0, 0);
15770 +                       qi_cache_free(edesc);
15771 +                       return ERR_PTR(-ENOMEM);
15772 +               }
15773 +       }
15774 +
15775 +       /*
15776 +        * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
15777 +        * Input is not contiguous.
15778 +        */
15779 +       qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
15780 +                     (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
15781 +       if (unlikely(qm_sg_nents > CAAM_QI_MAX_AEAD_SG)) {
15782 +               dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
15783 +                       qm_sg_nents, CAAM_QI_MAX_AEAD_SG);
15784 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
15785 +                          iv_dma, ivsize, op_type, 0, 0);
15786 +               qi_cache_free(edesc);
15787 +               return ERR_PTR(-ENOMEM);
15788 +       }
15789 +       sg_table = &edesc->sgt[0];
15790 +       qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
15791 +
15792 +       edesc->src_nents = src_nents;
15793 +       edesc->dst_nents = dst_nents;
15794 +       edesc->iv_dma = iv_dma;
15795 +
15796 +       edesc->assoclen_dma = dma_map_single(dev, &req->assoclen, 4,
15797 +                                            DMA_TO_DEVICE);
15798 +       if (dma_mapping_error(dev, edesc->assoclen_dma)) {
15799 +               dev_err(dev, "unable to map assoclen\n");
15800 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
15801 +                          iv_dma, ivsize, op_type, 0, 0);
15802 +               qi_cache_free(edesc);
15803 +               return ERR_PTR(-ENOMEM);
15804 +       }
15805 +
15806 +       dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
15807 +       qm_sg_index++;
15808 +       if (ivsize) {
15809 +               dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
15810 +               qm_sg_index++;
15811 +       }
15812 +       sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
15813 +       qm_sg_index += mapped_src_nents;
15814 +
15815 +       if (mapped_dst_nents > 1)
15816 +               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
15817 +                                qm_sg_index, 0);
15818 +
15819 +       qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
15820 +       if (dma_mapping_error(dev, qm_sg_dma)) {
15821 +               dev_err(dev, "unable to map S/G table\n");
15822 +               dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
15823 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
15824 +                          iv_dma, ivsize, op_type, 0, 0);
15825 +               qi_cache_free(edesc);
15826 +               return ERR_PTR(-ENOMEM);
15827 +       }
15828 +
15829 +       edesc->qm_sg_dma = qm_sg_dma;
15830 +       edesc->qm_sg_bytes = qm_sg_bytes;
15831 +
15832 +       out_len = req->assoclen + req->cryptlen +
15833 +                 (encrypt ? ctx->authsize : (-ctx->authsize));
15834 +       in_len = 4 + ivsize + req->assoclen + req->cryptlen;
15835 +
15836 +       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
15837 +       dpaa2_fl_set_final(in_fle, true);
15838 +       dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
15839 +       dpaa2_fl_set_addr(in_fle, qm_sg_dma);
15840 +       dpaa2_fl_set_len(in_fle, in_len);
15841 +
15842 +       if (req->dst == req->src) {
15843 +               if (mapped_src_nents == 1) {
15844 +                       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
15845 +                       dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
15846 +               } else {
15847 +                       dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
15848 +                       dpaa2_fl_set_addr(out_fle, qm_sg_dma +
15849 +                                         (1 + !!ivsize) * sizeof(*sg_table));
15850 +               }
15851 +       } else if (mapped_dst_nents == 1) {
15852 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
15853 +               dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
15854 +       } else {
15855 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
15856 +               dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
15857 +                                 sizeof(*sg_table));
15858 +       }
15859 +
15860 +       dpaa2_fl_set_len(out_fle, out_len);
15861 +
15862 +       return edesc;
15863 +}
15864 +
15865 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req,
15866 +                                        bool encrypt)
15867 +{
15868 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
15869 +       unsigned int blocksize = crypto_aead_blocksize(tls);
15870 +       unsigned int padsize, authsize;
15871 +       struct caam_request *req_ctx = aead_request_ctx(req);
15872 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
15873 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
15874 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
15875 +       struct caam_aead_alg *alg = container_of(crypto_aead_alg(tls),
15876 +                                                typeof(*alg), aead);
15877 +       struct device *dev = ctx->dev;
15878 +       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
15879 +                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
15880 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
15881 +       struct tls_edesc *edesc;
15882 +       dma_addr_t qm_sg_dma, iv_dma = 0;
15883 +       int ivsize = 0;
15884 +       int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
15885 +       int in_len, out_len;
15886 +       struct dpaa2_sg_entry *sg_table;
15887 +       enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
15888 +       struct scatterlist *dst;
15889 +
15890 +       if (encrypt) {
15891 +               padsize = blocksize - ((req->cryptlen + ctx->authsize) %
15892 +                                       blocksize);
15893 +               authsize = ctx->authsize + padsize;
15894 +       } else {
15895 +               authsize = ctx->authsize;
15896 +       }
15897 +
15898 +       /* allocate space for base edesc and link tables */
15899 +       edesc = qi_cache_zalloc(GFP_DMA | flags);
15900 +       if (unlikely(!edesc)) {
15901 +               dev_err(dev, "could not allocate extended descriptor\n");
15902 +               return ERR_PTR(-ENOMEM);
15903 +       }
15904 +
15905 +       if (likely(req->src == req->dst)) {
15906 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
15907 +                                            req->cryptlen +
15908 +                                            (encrypt ? authsize : 0));
15909 +               if (unlikely(src_nents < 0)) {
15910 +                       dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15911 +                               req->assoclen + req->cryptlen +
15912 +                               (encrypt ? authsize : 0));
15913 +                       qi_cache_free(edesc);
15914 +                       return ERR_PTR(src_nents);
15915 +               }
15916 +
15917 +               mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
15918 +                                             DMA_BIDIRECTIONAL);
15919 +               if (unlikely(!mapped_src_nents)) {
15920 +                       dev_err(dev, "unable to map source\n");
15921 +                       qi_cache_free(edesc);
15922 +                       return ERR_PTR(-ENOMEM);
15923 +               }
15924 +               dst = req->dst;
15925 +       } else {
15926 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
15927 +                                            req->cryptlen);
15928 +               if (unlikely(src_nents < 0)) {
15929 +                       dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15930 +                               req->assoclen + req->cryptlen);
15931 +                       qi_cache_free(edesc);
15932 +                       return ERR_PTR(src_nents);
15933 +               }
15934 +
15935 +               dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
15936 +               dst_nents = sg_nents_for_len(dst, req->cryptlen +
15937 +                                            (encrypt ? authsize : 0));
15938 +               if (unlikely(dst_nents < 0)) {
15939 +                       dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
15940 +                               req->cryptlen +
15941 +                               (encrypt ? authsize : 0));
15942 +                       qi_cache_free(edesc);
15943 +                       return ERR_PTR(dst_nents);
15944 +               }
15945 +
15946 +               if (src_nents) {
15947 +                       mapped_src_nents = dma_map_sg(dev, req->src,
15948 +                                                     src_nents, DMA_TO_DEVICE);
15949 +                       if (unlikely(!mapped_src_nents)) {
15950 +                               dev_err(dev, "unable to map source\n");
15951 +                               qi_cache_free(edesc);
15952 +                               return ERR_PTR(-ENOMEM);
15953 +                       }
15954 +               } else {
15955 +                       mapped_src_nents = 0;
15956 +               }
15957 +
15958 +               mapped_dst_nents = dma_map_sg(dev, dst, dst_nents,
15959 +                                             DMA_FROM_DEVICE);
15960 +               if (unlikely(!mapped_dst_nents)) {
15961 +                       dev_err(dev, "unable to map destination\n");
15962 +                       dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
15963 +                       qi_cache_free(edesc);
15964 +                       return ERR_PTR(-ENOMEM);
15965 +               }
15966 +       }
15967 +
15968 +       ivsize = crypto_aead_ivsize(tls);
15969 +       iv_dma = dma_map_single(dev, req->iv, ivsize, DMA_TO_DEVICE);
15970 +       if (dma_mapping_error(dev, iv_dma)) {
15971 +               dev_err(dev, "unable to map IV\n");
15972 +               caam_unmap(dev, req->src, dst, src_nents, dst_nents, 0, 0,
15973 +                          op_type, 0, 0);
15974 +               qi_cache_free(edesc);
15975 +               return ERR_PTR(-ENOMEM);
15976 +       }
15977 +
15978 +       /*
15979 +        * Create S/G table: IV, src, dst.
15980 +        * Input is not contiguous.
15981 +        */
15982 +       qm_sg_ents = 1 + mapped_src_nents +
15983 +                    (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
15984 +       sg_table = &edesc->sgt[0];
15985 +       qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
15986 +
15987 +       edesc->src_nents = src_nents;
15988 +       edesc->dst_nents = dst_nents;
15989 +       edesc->dst = dst;
15990 +       edesc->iv_dma = iv_dma;
15991 +
15992 +       dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
15993 +       qm_sg_index = 1;
15994 +
15995 +       sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
15996 +       qm_sg_index += mapped_src_nents;
15997 +
15998 +       if (mapped_dst_nents > 1)
15999 +               sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
16000 +                                qm_sg_index, 0);
16001 +
16002 +       qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
16003 +       if (dma_mapping_error(dev, qm_sg_dma)) {
16004 +               dev_err(dev, "unable to map S/G table\n");
16005 +               caam_unmap(dev, req->src, dst, src_nents, dst_nents, iv_dma,
16006 +                          ivsize, op_type, 0, 0);
16007 +               qi_cache_free(edesc);
16008 +               return ERR_PTR(-ENOMEM);
16009 +       }
16010 +
16011 +       edesc->qm_sg_dma = qm_sg_dma;
16012 +       edesc->qm_sg_bytes = qm_sg_bytes;
16013 +
16014 +       out_len = req->cryptlen + (encrypt ? authsize : 0);
16015 +       in_len = ivsize + req->assoclen + req->cryptlen;
16016 +
16017 +       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
16018 +       dpaa2_fl_set_final(in_fle, true);
16019 +       dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
16020 +       dpaa2_fl_set_addr(in_fle, qm_sg_dma);
16021 +       dpaa2_fl_set_len(in_fle, in_len);
16022 +
16023 +       if (req->dst == req->src) {
16024 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16025 +               dpaa2_fl_set_addr(out_fle, qm_sg_dma +
16026 +                                 (sg_nents_for_len(req->src, req->assoclen) +
16027 +                                  1) * sizeof(*sg_table));
16028 +       } else if (mapped_dst_nents == 1) {
16029 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16030 +               dpaa2_fl_set_addr(out_fle, sg_dma_address(dst));
16031 +       } else {
16032 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16033 +               dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
16034 +                                 sizeof(*sg_table));
16035 +       }
16036 +
16037 +       dpaa2_fl_set_len(out_fle, out_len);
16038 +
16039 +       return edesc;
16040 +}
16041 +
16042 +static int tls_set_sh_desc(struct crypto_aead *tls)
16043 +{
16044 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
16045 +       unsigned int ivsize = crypto_aead_ivsize(tls);
16046 +       unsigned int blocksize = crypto_aead_blocksize(tls);
16047 +       struct device *dev = ctx->dev;
16048 +       struct caam_flc *flc;
16049 +       u32 *desc;
16050 +       unsigned int assoclen = 13; /* always 13 bytes for TLS */
16051 +       unsigned int data_len[2];
16052 +       u32 inl_mask;
16053 +
16054 +       if (!ctx->cdata.keylen || !ctx->authsize)
16055 +               return 0;
16056 +
16057 +       /*
16058 +        * TLS 1.0 encrypt shared descriptor
16059 +        * Job Descriptor and Shared Descriptor
16060 +        * must fit into the 64-word Descriptor h/w Buffer
16061 +        */
16062 +       data_len[0] = ctx->adata.keylen_pad;
16063 +       data_len[1] = ctx->cdata.keylen;
16064 +
16065 +       if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
16066 +                             &inl_mask, ARRAY_SIZE(data_len)) < 0)
16067 +               return -EINVAL;
16068 +
16069 +       if (inl_mask & 1)
16070 +               ctx->adata.key_virt = ctx->key;
16071 +       else
16072 +               ctx->adata.key_dma = ctx->key_dma;
16073 +
16074 +       if (inl_mask & 2)
16075 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
16076 +       else
16077 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
16078 +
16079 +       ctx->adata.key_inline = !!(inl_mask & 1);
16080 +       ctx->cdata.key_inline = !!(inl_mask & 2);
16081 +
16082 +       flc = &ctx->flc[ENCRYPT];
16083 +       desc = flc->sh_desc;
16084 +
16085 +       cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata,
16086 +                             assoclen, ivsize, ctx->authsize, blocksize);
16087 +
16088 +       flc->flc[1] = desc_len(desc);
16089 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16090 +                                     desc_bytes(desc), DMA_TO_DEVICE);
16091 +
16092 +       if (dma_mapping_error(dev, flc->flc_dma)) {
16093 +               dev_err(dev, "unable to map shared descriptor\n");
16094 +               return -ENOMEM;
16095 +       }
16096 +
16097 +       /*
16098 +        * TLS 1.0 decrypt shared descriptor
16099 +        * Keys do not fit inline, regardless of algorithms used
16100 +        */
16101 +       ctx->adata.key_dma = ctx->key_dma;
16102 +       ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
16103 +
16104 +       flc = &ctx->flc[DECRYPT];
16105 +       desc = flc->sh_desc;
16106 +
16107 +       cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize,
16108 +                             ctx->authsize, blocksize);
16109 +
16110 +       flc->flc[1] = desc_len(desc); /* SDL */
16111 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16112 +                                     desc_bytes(desc), DMA_TO_DEVICE);
16113 +       if (dma_mapping_error(dev, flc->flc_dma)) {
16114 +               dev_err(dev, "unable to map shared descriptor\n");
16115 +               return -ENOMEM;
16116 +       }
16117 +
16118 +       return 0;
16119 +}
16120 +
16121 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
16122 +                     unsigned int keylen)
16123 +{
16124 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
16125 +       struct device *dev = ctx->dev;
16126 +       struct crypto_authenc_keys keys;
16127 +       int ret;
16128 +
16129 +       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
16130 +               goto badkey;
16131 +
16132 +#ifdef DEBUG
16133 +       dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
16134 +               keys.authkeylen + keys.enckeylen, keys.enckeylen,
16135 +               keys.authkeylen);
16136 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16137 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16138 +#endif
16139 +
16140 +       ctx->adata.keylen = split_key_len(ctx->adata.algtype &
16141 +                                         OP_ALG_ALGSEL_MASK);
16142 +       ctx->adata.keylen_pad = split_key_pad_len(ctx->adata.algtype &
16143 +                                                 OP_ALG_ALGSEL_MASK);
16144 +
16145 +#ifdef DEBUG
16146 +       dev_err(dev, "split keylen %d split keylen padded %d\n",
16147 +               ctx->adata.keylen, ctx->adata.keylen_pad);
16148 +       print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
16149 +                      DUMP_PREFIX_ADDRESS, 16, 4, keys.authkey,
16150 +                      keys.authkeylen + keys.enckeylen, 1);
16151 +#endif
16152 +
16153 +       if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
16154 +               goto badkey;
16155 +
16156 +       ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
16157 +       if (ret)
16158 +               goto badkey;
16159 +
16160 +       /* postpend encryption key to auth split key */
16161 +       memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
16162 +
16163 +       ctx->key_dma = dma_map_single(dev, ctx->key, ctx->adata.keylen_pad +
16164 +                                     keys.enckeylen, DMA_TO_DEVICE);
16165 +       if (dma_mapping_error(dev, ctx->key_dma)) {
16166 +               dev_err(dev, "unable to map key i/o memory\n");
16167 +               return -ENOMEM;
16168 +       }
16169 +#ifdef DEBUG
16170 +       print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
16171 +                      DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
16172 +                      ctx->adata.keylen_pad + keys.enckeylen, 1);
16173 +#endif
16174 +
16175 +       ctx->cdata.keylen = keys.enckeylen;
16176 +
16177 +       ret = tls_set_sh_desc(tls);
16178 +       if (ret)
16179 +               dma_unmap_single(dev, ctx->key_dma, ctx->adata.keylen_pad +
16180 +                                keys.enckeylen, DMA_TO_DEVICE);
16181 +
16182 +       return ret;
16183 +badkey:
16184 +       crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
16185 +       return -EINVAL;
16186 +}
16187 +
16188 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
16189 +{
16190 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
16191 +
16192 +       ctx->authsize = authsize;
16193 +       tls_set_sh_desc(tls);
16194 +
16195 +       return 0;
16196 +}
16197 +
16198 +static int gcm_set_sh_desc(struct crypto_aead *aead)
16199 +{
16200 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
16201 +       struct device *dev = ctx->dev;
16202 +       unsigned int ivsize = crypto_aead_ivsize(aead);
16203 +       struct caam_flc *flc;
16204 +       u32 *desc;
16205 +       int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16206 +                       ctx->cdata.keylen;
16207 +
16208 +       if (!ctx->cdata.keylen || !ctx->authsize)
16209 +               return 0;
16210 +
16211 +       /*
16212 +        * AES GCM encrypt shared descriptor
16213 +        * Job Descriptor and Shared Descriptor
16214 +        * must fit into the 64-word Descriptor h/w Buffer
16215 +        */
16216 +       if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
16217 +               ctx->cdata.key_inline = true;
16218 +               ctx->cdata.key_virt = ctx->key;
16219 +       } else {
16220 +               ctx->cdata.key_inline = false;
16221 +               ctx->cdata.key_dma = ctx->key_dma;
16222 +       }
16223 +
16224 +       flc = &ctx->flc[ENCRYPT];
16225 +       desc = flc->sh_desc;
16226 +       cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
16227 +
16228 +       flc->flc[1] = desc_len(desc); /* SDL */
16229 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16230 +                                     desc_bytes(desc), DMA_TO_DEVICE);
16231 +       if (dma_mapping_error(dev, flc->flc_dma)) {
16232 +               dev_err(dev, "unable to map shared descriptor\n");
16233 +               return -ENOMEM;
16234 +       }
16235 +
16236 +       /*
16237 +        * Job Descriptor and Shared Descriptors
16238 +        * must all fit into the 64-word Descriptor h/w Buffer
16239 +        */
16240 +       if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
16241 +               ctx->cdata.key_inline = true;
16242 +               ctx->cdata.key_virt = ctx->key;
16243 +       } else {
16244 +               ctx->cdata.key_inline = false;
16245 +               ctx->cdata.key_dma = ctx->key_dma;
16246 +       }
16247 +
16248 +       flc = &ctx->flc[DECRYPT];
16249 +       desc = flc->sh_desc;
16250 +       cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
16251 +
16252 +       flc->flc[1] = desc_len(desc); /* SDL */
16253 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16254 +                                     desc_bytes(desc), DMA_TO_DEVICE);
16255 +       if (dma_mapping_error(dev, flc->flc_dma)) {
16256 +               dev_err(dev, "unable to map shared descriptor\n");
16257 +               return -ENOMEM;
16258 +       }
16259 +
16260 +       return 0;
16261 +}
16262 +
16263 +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
16264 +{
16265 +       struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16266 +
16267 +       ctx->authsize = authsize;
16268 +       gcm_set_sh_desc(authenc);
16269 +
16270 +       return 0;
16271 +}
16272 +
16273 +static int gcm_setkey(struct crypto_aead *aead,
16274 +                     const u8 *key, unsigned int keylen)
16275 +{
16276 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
16277 +       struct device *dev = ctx->dev;
16278 +       int ret;
16279 +
16280 +#ifdef DEBUG
16281 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16282 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16283 +#endif
16284 +
16285 +       memcpy(ctx->key, key, keylen);
16286 +       ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
16287 +       if (dma_mapping_error(dev, ctx->key_dma)) {
16288 +               dev_err(dev, "unable to map key i/o memory\n");
16289 +               return -ENOMEM;
16290 +       }
16291 +       ctx->cdata.keylen = keylen;
16292 +
16293 +       ret = gcm_set_sh_desc(aead);
16294 +       if (ret)
16295 +               dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
16296 +                                DMA_TO_DEVICE);
16297 +
16298 +       return ret;
16299 +}
16300 +
16301 +static int rfc4106_set_sh_desc(struct crypto_aead *aead)
16302 +{
16303 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
16304 +       struct device *dev = ctx->dev;
16305 +       unsigned int ivsize = crypto_aead_ivsize(aead);
16306 +       struct caam_flc *flc;
16307 +       u32 *desc;
16308 +       int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16309 +                       ctx->cdata.keylen;
16310 +
16311 +       if (!ctx->cdata.keylen || !ctx->authsize)
16312 +               return 0;
16313 +
16314 +       ctx->cdata.key_virt = ctx->key;
16315 +
16316 +       /*
16317 +        * RFC4106 encrypt shared descriptor
16318 +        * Job Descriptor and Shared Descriptor
16319 +        * must fit into the 64-word Descriptor h/w Buffer
16320 +        */
16321 +       if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
16322 +               ctx->cdata.key_inline = true;
16323 +       } else {
16324 +               ctx->cdata.key_inline = false;
16325 +               ctx->cdata.key_dma = ctx->key_dma;
16326 +       }
16327 +
16328 +       flc = &ctx->flc[ENCRYPT];
16329 +       desc = flc->sh_desc;
16330 +       cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
16331 +                                 true);
16332 +
16333 +       flc->flc[1] = desc_len(desc); /* SDL */
16334 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16335 +                                     desc_bytes(desc), DMA_TO_DEVICE);
16336 +       if (dma_mapping_error(dev, flc->flc_dma)) {
16337 +               dev_err(dev, "unable to map shared descriptor\n");
16338 +               return -ENOMEM;
16339 +       }
16340 +
16341 +       /*
16342 +        * Job Descriptor and Shared Descriptors
16343 +        * must all fit into the 64-word Descriptor h/w Buffer
16344 +        */
16345 +       if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
16346 +               ctx->cdata.key_inline = true;
16347 +       } else {
16348 +               ctx->cdata.key_inline = false;
16349 +               ctx->cdata.key_dma = ctx->key_dma;
16350 +       }
16351 +
16352 +       flc = &ctx->flc[DECRYPT];
16353 +       desc = flc->sh_desc;
16354 +       cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
16355 +                                 true);
16356 +
16357 +       flc->flc[1] = desc_len(desc); /* SDL */
16358 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16359 +                                     desc_bytes(desc), DMA_TO_DEVICE);
16360 +       if (dma_mapping_error(dev, flc->flc_dma)) {
16361 +               dev_err(dev, "unable to map shared descriptor\n");
16362 +               return -ENOMEM;
16363 +       }
16364 +
16365 +       return 0;
16366 +}
16367 +
16368 +static int rfc4106_setauthsize(struct crypto_aead *authenc,
16369 +                              unsigned int authsize)
16370 +{
16371 +       struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16372 +
16373 +       ctx->authsize = authsize;
16374 +       rfc4106_set_sh_desc(authenc);
16375 +
16376 +       return 0;
16377 +}
16378 +
16379 +static int rfc4106_setkey(struct crypto_aead *aead,
16380 +                         const u8 *key, unsigned int keylen)
16381 +{
16382 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
16383 +       struct device *dev = ctx->dev;
16384 +       int ret;
16385 +
16386 +       if (keylen < 4)
16387 +               return -EINVAL;
16388 +
16389 +#ifdef DEBUG
16390 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16391 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16392 +#endif
16393 +
16394 +       memcpy(ctx->key, key, keylen);
16395 +       /*
16396 +        * The last four bytes of the key material are used as the salt value
16397 +        * in the nonce. Update the AES key length.
16398 +        */
16399 +       ctx->cdata.keylen = keylen - 4;
16400 +       ctx->key_dma = dma_map_single(dev, ctx->key, ctx->cdata.keylen,
16401 +                                     DMA_TO_DEVICE);
16402 +       if (dma_mapping_error(dev, ctx->key_dma)) {
16403 +               dev_err(dev, "unable to map key i/o memory\n");
16404 +               return -ENOMEM;
16405 +       }
16406 +
16407 +       ret = rfc4106_set_sh_desc(aead);
16408 +       if (ret)
16409 +               dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
16410 +                                DMA_TO_DEVICE);
16411 +
16412 +       return ret;
16413 +}
16414 +
16415 +static int rfc4543_set_sh_desc(struct crypto_aead *aead)
16416 +{
16417 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
16418 +       struct device *dev = ctx->dev;
16419 +       unsigned int ivsize = crypto_aead_ivsize(aead);
16420 +       struct caam_flc *flc;
16421 +       u32 *desc;
16422 +       int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16423 +                       ctx->cdata.keylen;
16424 +
16425 +       if (!ctx->cdata.keylen || !ctx->authsize)
16426 +               return 0;
16427 +
16428 +       ctx->cdata.key_virt = ctx->key;
16429 +
16430 +       /*
16431 +        * RFC4543 encrypt shared descriptor
16432 +        * Job Descriptor and Shared Descriptor
16433 +        * must fit into the 64-word Descriptor h/w Buffer
16434 +        */
16435 +       if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
16436 +               ctx->cdata.key_inline = true;
16437 +       } else {
16438 +               ctx->cdata.key_inline = false;
16439 +               ctx->cdata.key_dma = ctx->key_dma;
16440 +       }
16441 +
16442 +       flc = &ctx->flc[ENCRYPT];
16443 +       desc = flc->sh_desc;
16444 +       cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
16445 +                                 true);
16446 +
16447 +       flc->flc[1] = desc_len(desc); /* SDL */
16448 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16449 +                                     desc_bytes(desc), DMA_TO_DEVICE);
16450 +       if (dma_mapping_error(dev, flc->flc_dma)) {
16451 +               dev_err(dev, "unable to map shared descriptor\n");
16452 +               return -ENOMEM;
16453 +       }
16454 +
16455 +       /*
16456 +        * Job Descriptor and Shared Descriptors
16457 +        * must all fit into the 64-word Descriptor h/w Buffer
16458 +        */
16459 +       if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
16460 +               ctx->cdata.key_inline = true;
16461 +       } else {
16462 +               ctx->cdata.key_inline = false;
16463 +               ctx->cdata.key_dma = ctx->key_dma;
16464 +       }
16465 +
16466 +       flc = &ctx->flc[DECRYPT];
16467 +       desc = flc->sh_desc;
16468 +       cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
16469 +                                 true);
16470 +
16471 +       flc->flc[1] = desc_len(desc); /* SDL */
16472 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16473 +                                     desc_bytes(desc), DMA_TO_DEVICE);
16474 +       if (dma_mapping_error(dev, flc->flc_dma)) {
16475 +               dev_err(dev, "unable to map shared descriptor\n");
16476 +               return -ENOMEM;
16477 +       }
16478 +
16479 +       return 0;
16480 +}
16481 +
16482 +static int rfc4543_setauthsize(struct crypto_aead *authenc,
16483 +                              unsigned int authsize)
16484 +{
16485 +       struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16486 +
16487 +       ctx->authsize = authsize;
16488 +       rfc4543_set_sh_desc(authenc);
16489 +
16490 +       return 0;
16491 +}
16492 +
16493 +static int rfc4543_setkey(struct crypto_aead *aead,
16494 +                         const u8 *key, unsigned int keylen)
16495 +{
16496 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
16497 +       struct device *dev = ctx->dev;
16498 +       int ret;
16499 +
16500 +       if (keylen < 4)
16501 +               return -EINVAL;
16502 +
16503 +#ifdef DEBUG
16504 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16505 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16506 +#endif
16507 +
16508 +       memcpy(ctx->key, key, keylen);
16509 +       /*
16510 +        * The last four bytes of the key material are used as the salt value
16511 +        * in the nonce. Update the AES key length.
16512 +        */
16513 +       ctx->cdata.keylen = keylen - 4;
16514 +       ctx->key_dma = dma_map_single(dev, ctx->key, ctx->cdata.keylen,
16515 +                                     DMA_TO_DEVICE);
16516 +       if (dma_mapping_error(dev, ctx->key_dma)) {
16517 +               dev_err(dev, "unable to map key i/o memory\n");
16518 +               return -ENOMEM;
16519 +       }
16520 +
16521 +       ret = rfc4543_set_sh_desc(aead);
16522 +       if (ret)
16523 +               dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
16524 +                                DMA_TO_DEVICE);
16525 +
16526 +       return ret;
16527 +}
16528 +
16529 +static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
16530 +                            const u8 *key, unsigned int keylen)
16531 +{
16532 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16533 +       struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
16534 +       const char *alg_name = crypto_tfm_alg_name(tfm);
16535 +       struct device *dev = ctx->dev;
16536 +       struct caam_flc *flc;
16537 +       unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16538 +       u32 *desc;
16539 +       u32 ctx1_iv_off = 0;
16540 +       const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
16541 +                              OP_ALG_AAI_CTR_MOD128);
16542 +       const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
16543 +
16544 +       memcpy(ctx->key, key, keylen);
16545 +#ifdef DEBUG
16546 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16547 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16548 +#endif
16549 +       /*
16550 +        * AES-CTR needs to load IV in CONTEXT1 reg
16551 +        * at an offset of 128bits (16bytes)
16552 +        * CONTEXT1[255:128] = IV
16553 +        */
16554 +       if (ctr_mode)
16555 +               ctx1_iv_off = 16;
16556 +
16557 +       /*
16558 +        * RFC3686 specific:
16559 +        *      | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
16560 +        *      | *key = {KEY, NONCE}
16561 +        */
16562 +       if (is_rfc3686) {
16563 +               ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
16564 +               keylen -= CTR_RFC3686_NONCE_SIZE;
16565 +       }
16566 +
16567 +       ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
16568 +       if (dma_mapping_error(dev, ctx->key_dma)) {
16569 +               dev_err(dev, "unable to map key i/o memory\n");
16570 +               return -ENOMEM;
16571 +       }
16572 +       ctx->cdata.keylen = keylen;
16573 +       ctx->cdata.key_virt = ctx->key;
16574 +       ctx->cdata.key_inline = true;
16575 +
16576 +       /* ablkcipher_encrypt shared descriptor */
16577 +       flc = &ctx->flc[ENCRYPT];
16578 +       desc = flc->sh_desc;
16579 +
16580 +       cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize,
16581 +                                    is_rfc3686, ctx1_iv_off);
16582 +
16583 +       flc->flc[1] = desc_len(desc); /* SDL */
16584 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16585 +                                     desc_bytes(desc), DMA_TO_DEVICE);
16586 +       if (dma_mapping_error(dev, flc->flc_dma)) {
16587 +               dev_err(dev, "unable to map shared descriptor\n");
16588 +               return -ENOMEM;
16589 +       }
16590 +
16591 +       /* ablkcipher_decrypt shared descriptor */
16592 +       flc = &ctx->flc[DECRYPT];
16593 +       desc = flc->sh_desc;
16594 +
16595 +       cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize,
16596 +                                    is_rfc3686, ctx1_iv_off);
16597 +
16598 +       flc->flc[1] = desc_len(desc); /* SDL */
16599 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16600 +                                     desc_bytes(desc), DMA_TO_DEVICE);
16601 +       if (dma_mapping_error(dev, flc->flc_dma)) {
16602 +               dev_err(dev, "unable to map shared descriptor\n");
16603 +               return -ENOMEM;
16604 +       }
16605 +
16606 +       /* ablkcipher_givencrypt shared descriptor */
16607 +       flc = &ctx->flc[GIVENCRYPT];
16608 +       desc = flc->sh_desc;
16609 +
16610 +       cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata,
16611 +                                       ivsize, is_rfc3686, ctx1_iv_off);
16612 +
16613 +       flc->flc[1] = desc_len(desc); /* SDL */
16614 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16615 +                                     desc_bytes(desc), DMA_TO_DEVICE);
16616 +       if (dma_mapping_error(dev, flc->flc_dma)) {
16617 +               dev_err(dev, "unable to map shared descriptor\n");
16618 +               return -ENOMEM;
16619 +       }
16620 +
16621 +       return 0;
16622 +}
16623 +
16624 +static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
16625 +                                const u8 *key, unsigned int keylen)
16626 +{
16627 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16628 +       struct device *dev = ctx->dev;
16629 +       struct caam_flc *flc;
16630 +       u32 *desc;
16631 +
16632 +       if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
16633 +               dev_err(dev, "key size mismatch\n");
16634 +               crypto_ablkcipher_set_flags(ablkcipher,
16635 +                                           CRYPTO_TFM_RES_BAD_KEY_LEN);
16636 +               return -EINVAL;
16637 +       }
16638 +
16639 +       memcpy(ctx->key, key, keylen);
16640 +       ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
16641 +       if (dma_mapping_error(dev, ctx->key_dma)) {
16642 +               dev_err(dev, "unable to map key i/o memory\n");
16643 +               return -ENOMEM;
16644 +       }
16645 +       ctx->cdata.keylen = keylen;
16646 +       ctx->cdata.key_virt = ctx->key;
16647 +       ctx->cdata.key_inline = true;
16648 +
16649 +       /* xts_ablkcipher_encrypt shared descriptor */
16650 +       flc = &ctx->flc[ENCRYPT];
16651 +       desc = flc->sh_desc;
16652 +       cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
16653 +
16654 +       flc->flc[1] = desc_len(desc); /* SDL */
16655 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16656 +                                     desc_bytes(desc), DMA_TO_DEVICE);
16657 +       if (dma_mapping_error(dev, flc->flc_dma)) {
16658 +               dev_err(dev, "unable to map shared descriptor\n");
16659 +               return -ENOMEM;
16660 +       }
16661 +
16662 +       /* xts_ablkcipher_decrypt shared descriptor */
16663 +       flc = &ctx->flc[DECRYPT];
16664 +       desc = flc->sh_desc;
16665 +
16666 +       cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
16667 +
16668 +       flc->flc[1] = desc_len(desc); /* SDL */
16669 +       flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16670 +                                     desc_bytes(desc), DMA_TO_DEVICE);
16671 +       if (dma_mapping_error(dev, flc->flc_dma)) {
16672 +               dev_err(dev, "unable to map shared descriptor\n");
16673 +               return -ENOMEM;
16674 +       }
16675 +
16676 +       return 0;
16677 +}
16678 +
16679 +static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
16680 +                                                      *req, bool encrypt)
16681 +{
16682 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
16683 +       struct caam_request *req_ctx = ablkcipher_request_ctx(req);
16684 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
16685 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
16686 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16687 +       struct device *dev = ctx->dev;
16688 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
16689 +                      GFP_KERNEL : GFP_ATOMIC;
16690 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
16691 +       struct ablkcipher_edesc *edesc;
16692 +       dma_addr_t iv_dma;
16693 +       bool in_contig;
16694 +       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16695 +       int dst_sg_idx, qm_sg_ents;
16696 +       struct dpaa2_sg_entry *sg_table;
16697 +       enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
16698 +
16699 +       src_nents = sg_nents_for_len(req->src, req->nbytes);
16700 +       if (unlikely(src_nents < 0)) {
16701 +               dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
16702 +                       req->nbytes);
16703 +               return ERR_PTR(src_nents);
16704 +       }
16705 +
16706 +       if (unlikely(req->dst != req->src)) {
16707 +               dst_nents = sg_nents_for_len(req->dst, req->nbytes);
16708 +               if (unlikely(dst_nents < 0)) {
16709 +                       dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
16710 +                               req->nbytes);
16711 +                       return ERR_PTR(dst_nents);
16712 +               }
16713 +
16714 +               mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16715 +                                             DMA_TO_DEVICE);
16716 +               if (unlikely(!mapped_src_nents)) {
16717 +                       dev_err(dev, "unable to map source\n");
16718 +                       return ERR_PTR(-ENOMEM);
16719 +               }
16720 +
16721 +               mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
16722 +                                             DMA_FROM_DEVICE);
16723 +               if (unlikely(!mapped_dst_nents)) {
16724 +                       dev_err(dev, "unable to map destination\n");
16725 +                       dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
16726 +                       return ERR_PTR(-ENOMEM);
16727 +               }
16728 +       } else {
16729 +               mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16730 +                                             DMA_BIDIRECTIONAL);
16731 +               if (unlikely(!mapped_src_nents)) {
16732 +                       dev_err(dev, "unable to map source\n");
16733 +                       return ERR_PTR(-ENOMEM);
16734 +               }
16735 +       }
16736 +
16737 +       iv_dma = dma_map_single(dev, req->info, ivsize, DMA_TO_DEVICE);
16738 +       if (dma_mapping_error(dev, iv_dma)) {
16739 +               dev_err(dev, "unable to map IV\n");
16740 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
16741 +                          0, 0, 0, 0);
16742 +               return ERR_PTR(-ENOMEM);
16743 +       }
16744 +
16745 +       if (mapped_src_nents == 1 &&
16746 +           iv_dma + ivsize == sg_dma_address(req->src)) {
16747 +               in_contig = true;
16748 +               qm_sg_ents = 0;
16749 +       } else {
16750 +               in_contig = false;
16751 +               qm_sg_ents = 1 + mapped_src_nents;
16752 +       }
16753 +       dst_sg_idx = qm_sg_ents;
16754 +
16755 +       qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
16756 +       if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
16757 +               dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
16758 +                       qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
16759 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16760 +                          iv_dma, ivsize, op_type, 0, 0);
16761 +               return ERR_PTR(-ENOMEM);
16762 +       }
16763 +
16764 +       /* allocate space for base edesc and link tables */
16765 +       edesc = qi_cache_zalloc(GFP_DMA | flags);
16766 +       if (unlikely(!edesc)) {
16767 +               dev_err(dev, "could not allocate extended descriptor\n");
16768 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16769 +                          iv_dma, ivsize, op_type, 0, 0);
16770 +               return ERR_PTR(-ENOMEM);
16771 +       }
16772 +
16773 +       edesc->src_nents = src_nents;
16774 +       edesc->dst_nents = dst_nents;
16775 +       edesc->iv_dma = iv_dma;
16776 +       sg_table = &edesc->sgt[0];
16777 +       edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
16778 +
16779 +       if (!in_contig) {
16780 +               dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
16781 +               sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
16782 +       }
16783 +
16784 +       if (mapped_dst_nents > 1)
16785 +               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
16786 +                                dst_sg_idx, 0);
16787 +
16788 +       edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
16789 +                                         DMA_TO_DEVICE);
16790 +       if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
16791 +               dev_err(dev, "unable to map S/G table\n");
16792 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16793 +                          iv_dma, ivsize, op_type, 0, 0);
16794 +               qi_cache_free(edesc);
16795 +               return ERR_PTR(-ENOMEM);
16796 +       }
16797 +
16798 +       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
16799 +       dpaa2_fl_set_final(in_fle, true);
16800 +       dpaa2_fl_set_len(in_fle, req->nbytes + ivsize);
16801 +       dpaa2_fl_set_len(out_fle, req->nbytes);
16802 +
16803 +       if (!in_contig) {
16804 +               dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
16805 +               dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
16806 +       } else {
16807 +               dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
16808 +               dpaa2_fl_set_addr(in_fle, iv_dma);
16809 +       }
16810 +
16811 +       if (req->src == req->dst) {
16812 +               if (!in_contig) {
16813 +                       dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16814 +                       dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
16815 +                                         sizeof(*sg_table));
16816 +               } else {
16817 +                       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16818 +                       dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
16819 +               }
16820 +       } else if (mapped_dst_nents > 1) {
16821 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16822 +               dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
16823 +                                 sizeof(*sg_table));
16824 +       } else {
16825 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16826 +               dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
16827 +       }
16828 +
16829 +       return edesc;
16830 +}
16831 +
16832 +static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
16833 +       struct skcipher_givcrypt_request *greq)
16834 +{
16835 +       struct ablkcipher_request *req = &greq->creq;
16836 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
16837 +       struct caam_request *req_ctx = ablkcipher_request_ctx(req);
16838 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
16839 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
16840 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16841 +       struct device *dev = ctx->dev;
16842 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
16843 +                      GFP_KERNEL : GFP_ATOMIC;
16844 +       int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
16845 +       struct ablkcipher_edesc *edesc;
16846 +       dma_addr_t iv_dma;
16847 +       bool out_contig;
16848 +       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16849 +       struct dpaa2_sg_entry *sg_table;
16850 +       int dst_sg_idx, qm_sg_ents;
16851 +
16852 +       src_nents = sg_nents_for_len(req->src, req->nbytes);
16853 +       if (unlikely(src_nents < 0)) {
16854 +               dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
16855 +                       req->nbytes);
16856 +               return ERR_PTR(src_nents);
16857 +       }
16858 +
16859 +       if (unlikely(req->dst != req->src)) {
16860 +               dst_nents = sg_nents_for_len(req->dst, req->nbytes);
16861 +               if (unlikely(dst_nents < 0)) {
16862 +                       dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
16863 +                               req->nbytes);
16864 +                       return ERR_PTR(dst_nents);
16865 +               }
16866 +
16867 +               mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16868 +                                             DMA_TO_DEVICE);
16869 +               if (unlikely(!mapped_src_nents)) {
16870 +                       dev_err(dev, "unable to map source\n");
16871 +                       return ERR_PTR(-ENOMEM);
16872 +               }
16873 +
16874 +               mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
16875 +                                             DMA_FROM_DEVICE);
16876 +               if (unlikely(!mapped_dst_nents)) {
16877 +                       dev_err(dev, "unable to map destination\n");
16878 +                       dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
16879 +                       return ERR_PTR(-ENOMEM);
16880 +               }
16881 +       } else {
16882 +               mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16883 +                                             DMA_BIDIRECTIONAL);
16884 +               if (unlikely(!mapped_src_nents)) {
16885 +                       dev_err(dev, "unable to map source\n");
16886 +                       return ERR_PTR(-ENOMEM);
16887 +               }
16888 +
16889 +               dst_nents = src_nents;
16890 +               mapped_dst_nents = src_nents;
16891 +       }
16892 +
16893 +       iv_dma = dma_map_single(dev, greq->giv, ivsize, DMA_FROM_DEVICE);
16894 +       if (dma_mapping_error(dev, iv_dma)) {
16895 +               dev_err(dev, "unable to map IV\n");
16896 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
16897 +                          0, 0, 0, 0);
16898 +               return ERR_PTR(-ENOMEM);
16899 +       }
16900 +
16901 +       qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
16902 +       dst_sg_idx = qm_sg_ents;
16903 +       if (mapped_dst_nents == 1 &&
16904 +           iv_dma + ivsize == sg_dma_address(req->dst)) {
16905 +               out_contig = true;
16906 +       } else {
16907 +               out_contig = false;
16908 +               qm_sg_ents += 1 + mapped_dst_nents;
16909 +       }
16910 +
16911 +       if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
16912 +               dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
16913 +                       qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
16914 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16915 +                          iv_dma, ivsize, GIVENCRYPT, 0, 0);
16916 +               return ERR_PTR(-ENOMEM);
16917 +       }
16918 +
16919 +       /* allocate space for base edesc and link tables */
16920 +       edesc = qi_cache_zalloc(GFP_DMA | flags);
16921 +       if (!edesc) {
16922 +               dev_err(dev, "could not allocate extended descriptor\n");
16923 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16924 +                          iv_dma, ivsize, GIVENCRYPT, 0, 0);
16925 +               return ERR_PTR(-ENOMEM);
16926 +       }
16927 +
16928 +       edesc->src_nents = src_nents;
16929 +       edesc->dst_nents = dst_nents;
16930 +       edesc->iv_dma = iv_dma;
16931 +       sg_table = &edesc->sgt[0];
16932 +       edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
16933 +
16934 +       if (mapped_src_nents > 1)
16935 +               sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
16936 +
16937 +       if (!out_contig) {
16938 +               dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
16939 +               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
16940 +                                dst_sg_idx + 1, 0);
16941 +       }
16942 +
16943 +       edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
16944 +                                         DMA_TO_DEVICE);
16945 +       if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
16946 +               dev_err(dev, "unable to map S/G table\n");
16947 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16948 +                          iv_dma, ivsize, GIVENCRYPT, 0, 0);
16949 +               qi_cache_free(edesc);
16950 +               return ERR_PTR(-ENOMEM);
16951 +       }
16952 +
16953 +       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
16954 +       dpaa2_fl_set_final(in_fle, true);
16955 +       dpaa2_fl_set_len(in_fle, req->nbytes);
16956 +       dpaa2_fl_set_len(out_fle, ivsize + req->nbytes);
16957 +
16958 +       if (mapped_src_nents > 1) {
16959 +               dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
16960 +               dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
16961 +       } else {
16962 +               dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
16963 +               dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
16964 +       }
16965 +
16966 +       if (!out_contig) {
16967 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16968 +               dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
16969 +                                 sizeof(*sg_table));
16970 +       } else {
16971 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16972 +               dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
16973 +       }
16974 +
16975 +       return edesc;
16976 +}
16977 +
16978 +static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
16979 +                      struct aead_request *req)
16980 +{
16981 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
16982 +       int ivsize = crypto_aead_ivsize(aead);
16983 +       struct caam_request *caam_req = aead_request_ctx(req);
16984 +
16985 +       caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
16986 +                  edesc->iv_dma, ivsize, caam_req->op_type,
16987 +                  edesc->qm_sg_dma, edesc->qm_sg_bytes);
16988 +       dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
16989 +}
16990 +
16991 +static void tls_unmap(struct device *dev, struct tls_edesc *edesc,
16992 +                     struct aead_request *req)
16993 +{
16994 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
16995 +       int ivsize = crypto_aead_ivsize(tls);
16996 +       struct caam_request *caam_req = aead_request_ctx(req);
16997 +
16998 +       caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
16999 +                  edesc->dst_nents, edesc->iv_dma, ivsize, caam_req->op_type,
17000 +                  edesc->qm_sg_dma, edesc->qm_sg_bytes);
17001 +}
17002 +
17003 +static void ablkcipher_unmap(struct device *dev,
17004 +                            struct ablkcipher_edesc *edesc,
17005 +                            struct ablkcipher_request *req)
17006 +{
17007 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17008 +       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
17009 +       struct caam_request *caam_req = ablkcipher_request_ctx(req);
17010 +
17011 +       caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
17012 +                  edesc->iv_dma, ivsize, caam_req->op_type,
17013 +                  edesc->qm_sg_dma, edesc->qm_sg_bytes);
17014 +}
17015 +
17016 +static void aead_encrypt_done(void *cbk_ctx, u32 status)
17017 +{
17018 +       struct crypto_async_request *areq = cbk_ctx;
17019 +       struct aead_request *req = container_of(areq, struct aead_request,
17020 +                                               base);
17021 +       struct caam_request *req_ctx = to_caam_req(areq);
17022 +       struct aead_edesc *edesc = req_ctx->edesc;
17023 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
17024 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
17025 +       int ecode = 0;
17026 +
17027 +#ifdef DEBUG
17028 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17029 +#endif
17030 +
17031 +       if (unlikely(status)) {
17032 +               caam_qi2_strstatus(ctx->dev, status);
17033 +               ecode = -EIO;
17034 +       }
17035 +
17036 +       aead_unmap(ctx->dev, edesc, req);
17037 +       qi_cache_free(edesc);
17038 +       aead_request_complete(req, ecode);
17039 +}
17040 +
17041 +static void aead_decrypt_done(void *cbk_ctx, u32 status)
17042 +{
17043 +       struct crypto_async_request *areq = cbk_ctx;
17044 +       struct aead_request *req = container_of(areq, struct aead_request,
17045 +                                               base);
17046 +       struct caam_request *req_ctx = to_caam_req(areq);
17047 +       struct aead_edesc *edesc = req_ctx->edesc;
17048 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
17049 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
17050 +       int ecode = 0;
17051 +
17052 +#ifdef DEBUG
17053 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17054 +#endif
17055 +
17056 +       if (unlikely(status)) {
17057 +               caam_qi2_strstatus(ctx->dev, status);
17058 +               /*
17059 +                * verify hw auth check passed else return -EBADMSG
17060 +                */
17061 +               if ((status & JRSTA_CCBERR_ERRID_MASK) ==
17062 +                    JRSTA_CCBERR_ERRID_ICVCHK)
17063 +                       ecode = -EBADMSG;
17064 +               else
17065 +                       ecode = -EIO;
17066 +       }
17067 +
17068 +       aead_unmap(ctx->dev, edesc, req);
17069 +       qi_cache_free(edesc);
17070 +       aead_request_complete(req, ecode);
17071 +}
17072 +
17073 +static int aead_encrypt(struct aead_request *req)
17074 +{
17075 +       struct aead_edesc *edesc;
17076 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
17077 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
17078 +       struct caam_request *caam_req = aead_request_ctx(req);
17079 +       int ret;
17080 +
17081 +       /* allocate extended descriptor */
17082 +       edesc = aead_edesc_alloc(req, true);
17083 +       if (IS_ERR(edesc))
17084 +               return PTR_ERR(edesc);
17085 +
17086 +       caam_req->flc = &ctx->flc[ENCRYPT];
17087 +       caam_req->op_type = ENCRYPT;
17088 +       caam_req->cbk = aead_encrypt_done;
17089 +       caam_req->ctx = &req->base;
17090 +       caam_req->edesc = edesc;
17091 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17092 +       if (ret != -EINPROGRESS &&
17093 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17094 +               aead_unmap(ctx->dev, edesc, req);
17095 +               qi_cache_free(edesc);
17096 +       }
17097 +
17098 +       return ret;
17099 +}
17100 +
17101 +static int aead_decrypt(struct aead_request *req)
17102 +{
17103 +       struct aead_edesc *edesc;
17104 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
17105 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
17106 +       struct caam_request *caam_req = aead_request_ctx(req);
17107 +       int ret;
17108 +
17109 +       /* allocate extended descriptor */
17110 +       edesc = aead_edesc_alloc(req, false);
17111 +       if (IS_ERR(edesc))
17112 +               return PTR_ERR(edesc);
17113 +
17114 +       caam_req->flc = &ctx->flc[DECRYPT];
17115 +       caam_req->op_type = DECRYPT;
17116 +       caam_req->cbk = aead_decrypt_done;
17117 +       caam_req->ctx = &req->base;
17118 +       caam_req->edesc = edesc;
17119 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17120 +       if (ret != -EINPROGRESS &&
17121 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17122 +               aead_unmap(ctx->dev, edesc, req);
17123 +               qi_cache_free(edesc);
17124 +       }
17125 +
17126 +       return ret;
17127 +}
17128 +
17129 +static void tls_encrypt_done(void *cbk_ctx, u32 status)
17130 +{
17131 +       struct crypto_async_request *areq = cbk_ctx;
17132 +       struct aead_request *req = container_of(areq, struct aead_request,
17133 +                                               base);
17134 +       struct caam_request *req_ctx = to_caam_req(areq);
17135 +       struct tls_edesc *edesc = req_ctx->edesc;
17136 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
17137 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
17138 +       int ecode = 0;
17139 +
17140 +#ifdef DEBUG
17141 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17142 +#endif
17143 +
17144 +       if (unlikely(status)) {
17145 +               caam_qi2_strstatus(ctx->dev, status);
17146 +               ecode = -EIO;
17147 +       }
17148 +
17149 +       tls_unmap(ctx->dev, edesc, req);
17150 +       qi_cache_free(edesc);
17151 +       aead_request_complete(req, ecode);
17152 +}
17153 +
17154 +static void tls_decrypt_done(void *cbk_ctx, u32 status)
17155 +{
17156 +       struct crypto_async_request *areq = cbk_ctx;
17157 +       struct aead_request *req = container_of(areq, struct aead_request,
17158 +                                               base);
17159 +       struct caam_request *req_ctx = to_caam_req(areq);
17160 +       struct tls_edesc *edesc = req_ctx->edesc;
17161 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
17162 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
17163 +       int ecode = 0;
17164 +
17165 +#ifdef DEBUG
17166 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17167 +#endif
17168 +
17169 +       if (unlikely(status)) {
17170 +               caam_qi2_strstatus(ctx->dev, status);
17171 +               /*
17172 +                * verify hw auth check passed else return -EBADMSG
17173 +                */
17174 +               if ((status & JRSTA_CCBERR_ERRID_MASK) ==
17175 +                    JRSTA_CCBERR_ERRID_ICVCHK)
17176 +                       ecode = -EBADMSG;
17177 +               else
17178 +                       ecode = -EIO;
17179 +       }
17180 +
17181 +       tls_unmap(ctx->dev, edesc, req);
17182 +       qi_cache_free(edesc);
17183 +       aead_request_complete(req, ecode);
17184 +}
17185 +
17186 +static int tls_encrypt(struct aead_request *req)
17187 +{
17188 +       struct tls_edesc *edesc;
17189 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
17190 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
17191 +       struct caam_request *caam_req = aead_request_ctx(req);
17192 +       int ret;
17193 +
17194 +       /* allocate extended descriptor */
17195 +       edesc = tls_edesc_alloc(req, true);
17196 +       if (IS_ERR(edesc))
17197 +               return PTR_ERR(edesc);
17198 +
17199 +       caam_req->flc = &ctx->flc[ENCRYPT];
17200 +       caam_req->op_type = ENCRYPT;
17201 +       caam_req->cbk = tls_encrypt_done;
17202 +       caam_req->ctx = &req->base;
17203 +       caam_req->edesc = edesc;
17204 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17205 +       if (ret != -EINPROGRESS &&
17206 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17207 +               tls_unmap(ctx->dev, edesc, req);
17208 +               qi_cache_free(edesc);
17209 +       }
17210 +
17211 +       return ret;
17212 +}
17213 +
17214 +static int tls_decrypt(struct aead_request *req)
17215 +{
17216 +       struct tls_edesc *edesc;
17217 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
17218 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
17219 +       struct caam_request *caam_req = aead_request_ctx(req);
17220 +       int ret;
17221 +
17222 +       /* allocate extended descriptor */
17223 +       edesc = tls_edesc_alloc(req, false);
17224 +       if (IS_ERR(edesc))
17225 +               return PTR_ERR(edesc);
17226 +
17227 +       caam_req->flc = &ctx->flc[DECRYPT];
17228 +       caam_req->op_type = DECRYPT;
17229 +       caam_req->cbk = tls_decrypt_done;
17230 +       caam_req->ctx = &req->base;
17231 +       caam_req->edesc = edesc;
17232 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17233 +       if (ret != -EINPROGRESS &&
17234 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17235 +               tls_unmap(ctx->dev, edesc, req);
17236 +               qi_cache_free(edesc);
17237 +       }
17238 +
17239 +       return ret;
17240 +}
17241 +
17242 +static int ipsec_gcm_encrypt(struct aead_request *req)
17243 +{
17244 +       if (req->assoclen < 8)
17245 +               return -EINVAL;
17246 +
17247 +       return aead_encrypt(req);
17248 +}
17249 +
17250 +static int ipsec_gcm_decrypt(struct aead_request *req)
17251 +{
17252 +       if (req->assoclen < 8)
17253 +               return -EINVAL;
17254 +
17255 +       return aead_decrypt(req);
17256 +}
17257 +
17258 +static void ablkcipher_done(void *cbk_ctx, u32 status)
17259 +{
17260 +       struct crypto_async_request *areq = cbk_ctx;
17261 +       struct ablkcipher_request *req = ablkcipher_request_cast(areq);
17262 +       struct caam_request *req_ctx = to_caam_req(areq);
17263 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17264 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17265 +       struct ablkcipher_edesc *edesc = req_ctx->edesc;
17266 +       int ecode = 0;
17267 +       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
17268 +
17269 +#ifdef DEBUG
17270 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17271 +#endif
17272 +
17273 +       if (unlikely(status)) {
17274 +               caam_qi2_strstatus(ctx->dev, status);
17275 +               ecode = -EIO;
17276 +       }
17277 +
17278 +#ifdef DEBUG
17279 +       print_hex_dump(KERN_ERR, "dstiv  @" __stringify(__LINE__)": ",
17280 +                      DUMP_PREFIX_ADDRESS, 16, 4, req->info,
17281 +                      edesc->src_nents > 1 ? 100 : ivsize, 1);
17282 +       caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
17283 +                    DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
17284 +                    edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
17285 +#endif
17286 +
17287 +       ablkcipher_unmap(ctx->dev, edesc, req);
17288 +       qi_cache_free(edesc);
17289 +
17290 +       /*
17291 +        * The crypto API expects us to set the IV (req->info) to the last
17292 +        * ciphertext block. This is used e.g. by the CTS mode.
17293 +        */
17294 +       scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
17295 +                                ivsize, 0);
17296 +
17297 +       ablkcipher_request_complete(req, ecode);
17298 +}
17299 +
17300 +static int ablkcipher_encrypt(struct ablkcipher_request *req)
17301 +{
17302 +       struct ablkcipher_edesc *edesc;
17303 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17304 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17305 +       struct caam_request *caam_req = ablkcipher_request_ctx(req);
17306 +       int ret;
17307 +
17308 +       /* allocate extended descriptor */
17309 +       edesc = ablkcipher_edesc_alloc(req, true);
17310 +       if (IS_ERR(edesc))
17311 +               return PTR_ERR(edesc);
17312 +
17313 +       caam_req->flc = &ctx->flc[ENCRYPT];
17314 +       caam_req->op_type = ENCRYPT;
17315 +       caam_req->cbk = ablkcipher_done;
17316 +       caam_req->ctx = &req->base;
17317 +       caam_req->edesc = edesc;
17318 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17319 +       if (ret != -EINPROGRESS &&
17320 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17321 +               ablkcipher_unmap(ctx->dev, edesc, req);
17322 +               qi_cache_free(edesc);
17323 +       }
17324 +
17325 +       return ret;
17326 +}
17327 +
17328 +static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *greq)
17329 +{
17330 +       struct ablkcipher_request *req = &greq->creq;
17331 +       struct ablkcipher_edesc *edesc;
17332 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17333 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17334 +       struct caam_request *caam_req = ablkcipher_request_ctx(req);
17335 +       int ret;
17336 +
17337 +       /* allocate extended descriptor */
17338 +       edesc = ablkcipher_giv_edesc_alloc(greq);
17339 +       if (IS_ERR(edesc))
17340 +               return PTR_ERR(edesc);
17341 +
17342 +       caam_req->flc = &ctx->flc[GIVENCRYPT];
17343 +       caam_req->op_type = GIVENCRYPT;
17344 +       caam_req->cbk = ablkcipher_done;
17345 +       caam_req->ctx = &req->base;
17346 +       caam_req->edesc = edesc;
17347 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17348 +       if (ret != -EINPROGRESS &&
17349 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17350 +               ablkcipher_unmap(ctx->dev, edesc, req);
17351 +               qi_cache_free(edesc);
17352 +       }
17353 +
17354 +       return ret;
17355 +}
17356 +
17357 +static int ablkcipher_decrypt(struct ablkcipher_request *req)
17358 +{
17359 +       struct ablkcipher_edesc *edesc;
17360 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17361 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17362 +       struct caam_request *caam_req = ablkcipher_request_ctx(req);
17363 +       int ret;
17364 +
17365 +       /* allocate extended descriptor */
17366 +       edesc = ablkcipher_edesc_alloc(req, false);
17367 +       if (IS_ERR(edesc))
17368 +               return PTR_ERR(edesc);
17369 +
17370 +       caam_req->flc = &ctx->flc[DECRYPT];
17371 +       caam_req->op_type = DECRYPT;
17372 +       caam_req->cbk = ablkcipher_done;
17373 +       caam_req->ctx = &req->base;
17374 +       caam_req->edesc = edesc;
17375 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17376 +       if (ret != -EINPROGRESS &&
17377 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17378 +               ablkcipher_unmap(ctx->dev, edesc, req);
17379 +               qi_cache_free(edesc);
17380 +       }
17381 +
17382 +       return ret;
17383 +}
17384 +
17385 +struct caam_crypto_alg {
17386 +       struct list_head entry;
17387 +       struct crypto_alg crypto_alg;
17388 +       struct caam_alg_entry caam;
17389 +};
17390 +
17391 +static int caam_cra_init(struct crypto_tfm *tfm)
17392 +{
17393 +       struct crypto_alg *alg = tfm->__crt_alg;
17394 +       struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
17395 +                                                       crypto_alg);
17396 +       struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
17397 +
17398 +       /* copy descriptor header template value */
17399 +       ctx->cdata.algtype = OP_TYPE_CLASS1_ALG |
17400 +                            caam_alg->caam.class1_alg_type;
17401 +       ctx->adata.algtype = OP_TYPE_CLASS2_ALG |
17402 +                            caam_alg->caam.class2_alg_type;
17403 +
17404 +       ctx->dev = caam_alg->caam.dev;
17405 +
17406 +       return 0;
17407 +}
17408 +
17409 +static int caam_cra_init_ablkcipher(struct crypto_tfm *tfm)
17410 +{
17411 +       struct ablkcipher_tfm *ablkcipher_tfm =
17412 +               crypto_ablkcipher_crt(__crypto_ablkcipher_cast(tfm));
17413 +
17414 +       ablkcipher_tfm->reqsize = sizeof(struct caam_request);
17415 +       return caam_cra_init(tfm);
17416 +}
17417 +
17418 +static int caam_cra_init_aead(struct crypto_aead *tfm)
17419 +{
17420 +       crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
17421 +       return caam_cra_init(crypto_aead_tfm(tfm));
17422 +}
17423 +
17424 +static void caam_exit_common(struct caam_ctx *ctx)
17425 +{
17426 +       int i;
17427 +
17428 +       for (i = 0; i < NUM_OP; i++) {
17429 +               if (!ctx->flc[i].flc_dma)
17430 +                       continue;
17431 +               dma_unmap_single(ctx->dev, ctx->flc[i].flc_dma,
17432 +                                sizeof(ctx->flc[i].flc) +
17433 +                                       desc_bytes(ctx->flc[i].sh_desc),
17434 +                                DMA_TO_DEVICE);
17435 +       }
17436 +
17437 +       if (ctx->key_dma)
17438 +               dma_unmap_single(ctx->dev, ctx->key_dma,
17439 +                                ctx->cdata.keylen + ctx->adata.keylen_pad,
17440 +                                DMA_TO_DEVICE);
17441 +}
17442 +
17443 +static void caam_cra_exit(struct crypto_tfm *tfm)
17444 +{
17445 +       caam_exit_common(crypto_tfm_ctx(tfm));
17446 +}
17447 +
17448 +static void caam_cra_exit_aead(struct crypto_aead *tfm)
17449 +{
17450 +       caam_exit_common(crypto_aead_ctx(tfm));
17451 +}
17452 +
17453 +#define template_ablkcipher    template_u.ablkcipher
17454 +struct caam_alg_template {
17455 +       char name[CRYPTO_MAX_ALG_NAME];
17456 +       char driver_name[CRYPTO_MAX_ALG_NAME];
17457 +       unsigned int blocksize;
17458 +       u32 type;
17459 +       union {
17460 +               struct ablkcipher_alg ablkcipher;
17461 +       } template_u;
17462 +       u32 class1_alg_type;
17463 +       u32 class2_alg_type;
17464 +};
17465 +
17466 +static struct caam_alg_template driver_algs[] = {
17467 +       /* ablkcipher descriptor */
17468 +       {
17469 +               .name = "cbc(aes)",
17470 +               .driver_name = "cbc-aes-caam-qi2",
17471 +               .blocksize = AES_BLOCK_SIZE,
17472 +               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17473 +               .template_ablkcipher = {
17474 +                       .setkey = ablkcipher_setkey,
17475 +                       .encrypt = ablkcipher_encrypt,
17476 +                       .decrypt = ablkcipher_decrypt,
17477 +                       .givencrypt = ablkcipher_givencrypt,
17478 +                       .geniv = "<built-in>",
17479 +                       .min_keysize = AES_MIN_KEY_SIZE,
17480 +                       .max_keysize = AES_MAX_KEY_SIZE,
17481 +                       .ivsize = AES_BLOCK_SIZE,
17482 +               },
17483 +               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17484 +       },
17485 +       {
17486 +               .name = "cbc(des3_ede)",
17487 +               .driver_name = "cbc-3des-caam-qi2",
17488 +               .blocksize = DES3_EDE_BLOCK_SIZE,
17489 +               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17490 +               .template_ablkcipher = {
17491 +                       .setkey = ablkcipher_setkey,
17492 +                       .encrypt = ablkcipher_encrypt,
17493 +                       .decrypt = ablkcipher_decrypt,
17494 +                       .givencrypt = ablkcipher_givencrypt,
17495 +                       .geniv = "<built-in>",
17496 +                       .min_keysize = DES3_EDE_KEY_SIZE,
17497 +                       .max_keysize = DES3_EDE_KEY_SIZE,
17498 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
17499 +               },
17500 +               .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17501 +       },
17502 +       {
17503 +               .name = "cbc(des)",
17504 +               .driver_name = "cbc-des-caam-qi2",
17505 +               .blocksize = DES_BLOCK_SIZE,
17506 +               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17507 +               .template_ablkcipher = {
17508 +                       .setkey = ablkcipher_setkey,
17509 +                       .encrypt = ablkcipher_encrypt,
17510 +                       .decrypt = ablkcipher_decrypt,
17511 +                       .givencrypt = ablkcipher_givencrypt,
17512 +                       .geniv = "<built-in>",
17513 +                       .min_keysize = DES_KEY_SIZE,
17514 +                       .max_keysize = DES_KEY_SIZE,
17515 +                       .ivsize = DES_BLOCK_SIZE,
17516 +               },
17517 +               .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
17518 +       },
17519 +       {
17520 +               .name = "ctr(aes)",
17521 +               .driver_name = "ctr-aes-caam-qi2",
17522 +               .blocksize = 1,
17523 +               .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
17524 +               .template_ablkcipher = {
17525 +                       .setkey = ablkcipher_setkey,
17526 +                       .encrypt = ablkcipher_encrypt,
17527 +                       .decrypt = ablkcipher_decrypt,
17528 +                       .geniv = "chainiv",
17529 +                       .min_keysize = AES_MIN_KEY_SIZE,
17530 +                       .max_keysize = AES_MAX_KEY_SIZE,
17531 +                       .ivsize = AES_BLOCK_SIZE,
17532 +               },
17533 +               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
17534 +       },
17535 +       {
17536 +               .name = "rfc3686(ctr(aes))",
17537 +               .driver_name = "rfc3686-ctr-aes-caam-qi2",
17538 +               .blocksize = 1,
17539 +               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17540 +               .template_ablkcipher = {
17541 +                       .setkey = ablkcipher_setkey,
17542 +                       .encrypt = ablkcipher_encrypt,
17543 +                       .decrypt = ablkcipher_decrypt,
17544 +                       .givencrypt = ablkcipher_givencrypt,
17545 +                       .geniv = "<built-in>",
17546 +                       .min_keysize = AES_MIN_KEY_SIZE +
17547 +                                      CTR_RFC3686_NONCE_SIZE,
17548 +                       .max_keysize = AES_MAX_KEY_SIZE +
17549 +                                      CTR_RFC3686_NONCE_SIZE,
17550 +                       .ivsize = CTR_RFC3686_IV_SIZE,
17551 +               },
17552 +               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
17553 +       },
17554 +       {
17555 +               .name = "xts(aes)",
17556 +               .driver_name = "xts-aes-caam-qi2",
17557 +               .blocksize = AES_BLOCK_SIZE,
17558 +               .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
17559 +               .template_ablkcipher = {
17560 +                       .setkey = xts_ablkcipher_setkey,
17561 +                       .encrypt = ablkcipher_encrypt,
17562 +                       .decrypt = ablkcipher_decrypt,
17563 +                       .geniv = "eseqiv",
17564 +                       .min_keysize = 2 * AES_MIN_KEY_SIZE,
17565 +                       .max_keysize = 2 * AES_MAX_KEY_SIZE,
17566 +                       .ivsize = AES_BLOCK_SIZE,
17567 +               },
17568 +               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
17569 +       }
17570 +};
17571 +
17572 +static struct caam_aead_alg driver_aeads[] = {
17573 +       {
17574 +               .aead = {
17575 +                       .base = {
17576 +                               .cra_name = "rfc4106(gcm(aes))",
17577 +                               .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
17578 +                               .cra_blocksize = 1,
17579 +                       },
17580 +                       .setkey = rfc4106_setkey,
17581 +                       .setauthsize = rfc4106_setauthsize,
17582 +                       .encrypt = ipsec_gcm_encrypt,
17583 +                       .decrypt = ipsec_gcm_decrypt,
17584 +                       .ivsize = 8,
17585 +                       .maxauthsize = AES_BLOCK_SIZE,
17586 +               },
17587 +               .caam = {
17588 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17589 +               },
17590 +       },
17591 +       {
17592 +               .aead = {
17593 +                       .base = {
17594 +                               .cra_name = "rfc4543(gcm(aes))",
17595 +                               .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
17596 +                               .cra_blocksize = 1,
17597 +                       },
17598 +                       .setkey = rfc4543_setkey,
17599 +                       .setauthsize = rfc4543_setauthsize,
17600 +                       .encrypt = ipsec_gcm_encrypt,
17601 +                       .decrypt = ipsec_gcm_decrypt,
17602 +                       .ivsize = 8,
17603 +                       .maxauthsize = AES_BLOCK_SIZE,
17604 +               },
17605 +               .caam = {
17606 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17607 +               },
17608 +       },
17609 +       /* Galois Counter Mode */
17610 +       {
17611 +               .aead = {
17612 +                       .base = {
17613 +                               .cra_name = "gcm(aes)",
17614 +                               .cra_driver_name = "gcm-aes-caam-qi2",
17615 +                               .cra_blocksize = 1,
17616 +                       },
17617 +                       .setkey = gcm_setkey,
17618 +                       .setauthsize = gcm_setauthsize,
17619 +                       .encrypt = aead_encrypt,
17620 +                       .decrypt = aead_decrypt,
17621 +                       .ivsize = 12,
17622 +                       .maxauthsize = AES_BLOCK_SIZE,
17623 +               },
17624 +               .caam = {
17625 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17626 +               }
17627 +       },
17628 +       /* single-pass ipsec_esp descriptor */
17629 +       {
17630 +               .aead = {
17631 +                       .base = {
17632 +                               .cra_name = "authenc(hmac(md5),cbc(aes))",
17633 +                               .cra_driver_name = "authenc-hmac-md5-"
17634 +                                                  "cbc-aes-caam-qi2",
17635 +                               .cra_blocksize = AES_BLOCK_SIZE,
17636 +                       },
17637 +                       .setkey = aead_setkey,
17638 +                       .setauthsize = aead_setauthsize,
17639 +                       .encrypt = aead_encrypt,
17640 +                       .decrypt = aead_decrypt,
17641 +                       .ivsize = AES_BLOCK_SIZE,
17642 +                       .maxauthsize = MD5_DIGEST_SIZE,
17643 +               },
17644 +               .caam = {
17645 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17646 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17647 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17648 +               }
17649 +       },
17650 +       {
17651 +               .aead = {
17652 +                       .base = {
17653 +                               .cra_name = "echainiv(authenc(hmac(md5),"
17654 +                                           "cbc(aes)))",
17655 +                               .cra_driver_name = "echainiv-authenc-hmac-md5-"
17656 +                                                  "cbc-aes-caam-qi2",
17657 +                               .cra_blocksize = AES_BLOCK_SIZE,
17658 +                       },
17659 +                       .setkey = aead_setkey,
17660 +                       .setauthsize = aead_setauthsize,
17661 +                       .encrypt = aead_encrypt,
17662 +                       .decrypt = aead_decrypt,
17663 +                       .ivsize = AES_BLOCK_SIZE,
17664 +                       .maxauthsize = MD5_DIGEST_SIZE,
17665 +               },
17666 +               .caam = {
17667 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17668 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17669 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17670 +                       .geniv = true,
17671 +               }
17672 +       },
17673 +       {
17674 +               .aead = {
17675 +                       .base = {
17676 +                               .cra_name = "authenc(hmac(sha1),cbc(aes))",
17677 +                               .cra_driver_name = "authenc-hmac-sha1-"
17678 +                                                  "cbc-aes-caam-qi2",
17679 +                               .cra_blocksize = AES_BLOCK_SIZE,
17680 +                       },
17681 +                       .setkey = aead_setkey,
17682 +                       .setauthsize = aead_setauthsize,
17683 +                       .encrypt = aead_encrypt,
17684 +                       .decrypt = aead_decrypt,
17685 +                       .ivsize = AES_BLOCK_SIZE,
17686 +                       .maxauthsize = SHA1_DIGEST_SIZE,
17687 +               },
17688 +               .caam = {
17689 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17690 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17691 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17692 +               }
17693 +       },
17694 +       {
17695 +               .aead = {
17696 +                       .base = {
17697 +                               .cra_name = "echainiv(authenc(hmac(sha1),"
17698 +                                           "cbc(aes)))",
17699 +                               .cra_driver_name = "echainiv-authenc-"
17700 +                                                  "hmac-sha1-cbc-aes-caam-qi2",
17701 +                               .cra_blocksize = AES_BLOCK_SIZE,
17702 +                       },
17703 +                       .setkey = aead_setkey,
17704 +                       .setauthsize = aead_setauthsize,
17705 +                       .encrypt = aead_encrypt,
17706 +                       .decrypt = aead_decrypt,
17707 +                       .ivsize = AES_BLOCK_SIZE,
17708 +                       .maxauthsize = SHA1_DIGEST_SIZE,
17709 +               },
17710 +               .caam = {
17711 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17712 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17713 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17714 +                       .geniv = true,
17715 +               },
17716 +       },
17717 +       {
17718 +               .aead = {
17719 +                       .base = {
17720 +                               .cra_name = "authenc(hmac(sha224),cbc(aes))",
17721 +                               .cra_driver_name = "authenc-hmac-sha224-"
17722 +                                                  "cbc-aes-caam-qi2",
17723 +                               .cra_blocksize = AES_BLOCK_SIZE,
17724 +                       },
17725 +                       .setkey = aead_setkey,
17726 +                       .setauthsize = aead_setauthsize,
17727 +                       .encrypt = aead_encrypt,
17728 +                       .decrypt = aead_decrypt,
17729 +                       .ivsize = AES_BLOCK_SIZE,
17730 +                       .maxauthsize = SHA224_DIGEST_SIZE,
17731 +               },
17732 +               .caam = {
17733 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17734 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
17735 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17736 +               }
17737 +       },
17738 +       {
17739 +               .aead = {
17740 +                       .base = {
17741 +                               .cra_name = "echainiv(authenc(hmac(sha224),"
17742 +                                           "cbc(aes)))",
17743 +                               .cra_driver_name = "echainiv-authenc-"
17744 +                                                  "hmac-sha224-cbc-aes-caam-qi2",
17745 +                               .cra_blocksize = AES_BLOCK_SIZE,
17746 +                       },
17747 +                       .setkey = aead_setkey,
17748 +                       .setauthsize = aead_setauthsize,
17749 +                       .encrypt = aead_encrypt,
17750 +                       .decrypt = aead_decrypt,
17751 +                       .ivsize = AES_BLOCK_SIZE,
17752 +                       .maxauthsize = SHA224_DIGEST_SIZE,
17753 +               },
17754 +               .caam = {
17755 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17756 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
17757 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17758 +                       .geniv = true,
17759 +               }
17760 +       },
17761 +       {
17762 +               .aead = {
17763 +                       .base = {
17764 +                               .cra_name = "authenc(hmac(sha256),cbc(aes))",
17765 +                               .cra_driver_name = "authenc-hmac-sha256-"
17766 +                                                  "cbc-aes-caam-qi2",
17767 +                               .cra_blocksize = AES_BLOCK_SIZE,
17768 +                       },
17769 +                       .setkey = aead_setkey,
17770 +                       .setauthsize = aead_setauthsize,
17771 +                       .encrypt = aead_encrypt,
17772 +                       .decrypt = aead_decrypt,
17773 +                       .ivsize = AES_BLOCK_SIZE,
17774 +                       .maxauthsize = SHA256_DIGEST_SIZE,
17775 +               },
17776 +               .caam = {
17777 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17778 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
17779 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17780 +               }
17781 +       },
17782 +       {
17783 +               .aead = {
17784 +                       .base = {
17785 +                               .cra_name = "echainiv(authenc(hmac(sha256),"
17786 +                                           "cbc(aes)))",
17787 +                               .cra_driver_name = "echainiv-authenc-"
17788 +                                                  "hmac-sha256-cbc-aes-"
17789 +                                                  "caam-qi2",
17790 +                               .cra_blocksize = AES_BLOCK_SIZE,
17791 +                       },
17792 +                       .setkey = aead_setkey,
17793 +                       .setauthsize = aead_setauthsize,
17794 +                       .encrypt = aead_encrypt,
17795 +                       .decrypt = aead_decrypt,
17796 +                       .ivsize = AES_BLOCK_SIZE,
17797 +                       .maxauthsize = SHA256_DIGEST_SIZE,
17798 +               },
17799 +               .caam = {
17800 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17801 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
17802 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17803 +                       .geniv = true,
17804 +               }
17805 +       },
17806 +       {
17807 +               .aead = {
17808 +                       .base = {
17809 +                               .cra_name = "authenc(hmac(sha384),cbc(aes))",
17810 +                               .cra_driver_name = "authenc-hmac-sha384-"
17811 +                                                  "cbc-aes-caam-qi2",
17812 +                               .cra_blocksize = AES_BLOCK_SIZE,
17813 +                       },
17814 +                       .setkey = aead_setkey,
17815 +                       .setauthsize = aead_setauthsize,
17816 +                       .encrypt = aead_encrypt,
17817 +                       .decrypt = aead_decrypt,
17818 +                       .ivsize = AES_BLOCK_SIZE,
17819 +                       .maxauthsize = SHA384_DIGEST_SIZE,
17820 +               },
17821 +               .caam = {
17822 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17823 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
17824 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17825 +               }
17826 +       },
17827 +       {
17828 +               .aead = {
17829 +                       .base = {
17830 +                               .cra_name = "echainiv(authenc(hmac(sha384),"
17831 +                                           "cbc(aes)))",
17832 +                               .cra_driver_name = "echainiv-authenc-"
17833 +                                                  "hmac-sha384-cbc-aes-"
17834 +                                                  "caam-qi2",
17835 +                               .cra_blocksize = AES_BLOCK_SIZE,
17836 +                       },
17837 +                       .setkey = aead_setkey,
17838 +                       .setauthsize = aead_setauthsize,
17839 +                       .encrypt = aead_encrypt,
17840 +                       .decrypt = aead_decrypt,
17841 +                       .ivsize = AES_BLOCK_SIZE,
17842 +                       .maxauthsize = SHA384_DIGEST_SIZE,
17843 +               },
17844 +               .caam = {
17845 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17846 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
17847 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17848 +                       .geniv = true,
17849 +               }
17850 +       },
17851 +       {
17852 +               .aead = {
17853 +                       .base = {
17854 +                               .cra_name = "authenc(hmac(sha512),cbc(aes))",
17855 +                               .cra_driver_name = "authenc-hmac-sha512-"
17856 +                                                  "cbc-aes-caam-qi2",
17857 +                               .cra_blocksize = AES_BLOCK_SIZE,
17858 +                       },
17859 +                       .setkey = aead_setkey,
17860 +                       .setauthsize = aead_setauthsize,
17861 +                       .encrypt = aead_encrypt,
17862 +                       .decrypt = aead_decrypt,
17863 +                       .ivsize = AES_BLOCK_SIZE,
17864 +                       .maxauthsize = SHA512_DIGEST_SIZE,
17865 +               },
17866 +               .caam = {
17867 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17868 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
17869 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17870 +               }
17871 +       },
17872 +       {
17873 +               .aead = {
17874 +                       .base = {
17875 +                               .cra_name = "echainiv(authenc(hmac(sha512),"
17876 +                                           "cbc(aes)))",
17877 +                               .cra_driver_name = "echainiv-authenc-"
17878 +                                                  "hmac-sha512-cbc-aes-"
17879 +                                                  "caam-qi2",
17880 +                               .cra_blocksize = AES_BLOCK_SIZE,
17881 +                       },
17882 +                       .setkey = aead_setkey,
17883 +                       .setauthsize = aead_setauthsize,
17884 +                       .encrypt = aead_encrypt,
17885 +                       .decrypt = aead_decrypt,
17886 +                       .ivsize = AES_BLOCK_SIZE,
17887 +                       .maxauthsize = SHA512_DIGEST_SIZE,
17888 +               },
17889 +               .caam = {
17890 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17891 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
17892 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17893 +                       .geniv = true,
17894 +               }
17895 +       },
17896 +       {
17897 +               .aead = {
17898 +                       .base = {
17899 +                               .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
17900 +                               .cra_driver_name = "authenc-hmac-md5-"
17901 +                                                  "cbc-des3_ede-caam-qi2",
17902 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17903 +                       },
17904 +                       .setkey = aead_setkey,
17905 +                       .setauthsize = aead_setauthsize,
17906 +                       .encrypt = aead_encrypt,
17907 +                       .decrypt = aead_decrypt,
17908 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
17909 +                       .maxauthsize = MD5_DIGEST_SIZE,
17910 +               },
17911 +               .caam = {
17912 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17913 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17914 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17915 +               }
17916 +       },
17917 +       {
17918 +               .aead = {
17919 +                       .base = {
17920 +                               .cra_name = "echainiv(authenc(hmac(md5),"
17921 +                                           "cbc(des3_ede)))",
17922 +                               .cra_driver_name = "echainiv-authenc-hmac-md5-"
17923 +                                                  "cbc-des3_ede-caam-qi2",
17924 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17925 +                       },
17926 +                       .setkey = aead_setkey,
17927 +                       .setauthsize = aead_setauthsize,
17928 +                       .encrypt = aead_encrypt,
17929 +                       .decrypt = aead_decrypt,
17930 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
17931 +                       .maxauthsize = MD5_DIGEST_SIZE,
17932 +               },
17933 +               .caam = {
17934 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17935 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17936 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17937 +                       .geniv = true,
17938 +               }
17939 +       },
17940 +       {
17941 +               .aead = {
17942 +                       .base = {
17943 +                               .cra_name = "authenc(hmac(sha1),"
17944 +                                           "cbc(des3_ede))",
17945 +                               .cra_driver_name = "authenc-hmac-sha1-"
17946 +                                                  "cbc-des3_ede-caam-qi2",
17947 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17948 +                       },
17949 +                       .setkey = aead_setkey,
17950 +                       .setauthsize = aead_setauthsize,
17951 +                       .encrypt = aead_encrypt,
17952 +                       .decrypt = aead_decrypt,
17953 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
17954 +                       .maxauthsize = SHA1_DIGEST_SIZE,
17955 +               },
17956 +               .caam = {
17957 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17958 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17959 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17960 +               },
17961 +       },
17962 +       {
17963 +               .aead = {
17964 +                       .base = {
17965 +                               .cra_name = "echainiv(authenc(hmac(sha1),"
17966 +                                           "cbc(des3_ede)))",
17967 +                               .cra_driver_name = "echainiv-authenc-"
17968 +                                                  "hmac-sha1-"
17969 +                                                  "cbc-des3_ede-caam-qi2",
17970 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17971 +                       },
17972 +                       .setkey = aead_setkey,
17973 +                       .setauthsize = aead_setauthsize,
17974 +                       .encrypt = aead_encrypt,
17975 +                       .decrypt = aead_decrypt,
17976 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
17977 +                       .maxauthsize = SHA1_DIGEST_SIZE,
17978 +               },
17979 +               .caam = {
17980 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17981 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17982 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17983 +                       .geniv = true,
17984 +               }
17985 +       },
17986 +       {
17987 +               .aead = {
17988 +                       .base = {
17989 +                               .cra_name = "authenc(hmac(sha224),"
17990 +                                           "cbc(des3_ede))",
17991 +                               .cra_driver_name = "authenc-hmac-sha224-"
17992 +                                                  "cbc-des3_ede-caam-qi2",
17993 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17994 +                       },
17995 +                       .setkey = aead_setkey,
17996 +                       .setauthsize = aead_setauthsize,
17997 +                       .encrypt = aead_encrypt,
17998 +                       .decrypt = aead_decrypt,
17999 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
18000 +                       .maxauthsize = SHA224_DIGEST_SIZE,
18001 +               },
18002 +               .caam = {
18003 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18004 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18005 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18006 +               },
18007 +       },
18008 +       {
18009 +               .aead = {
18010 +                       .base = {
18011 +                               .cra_name = "echainiv(authenc(hmac(sha224),"
18012 +                                           "cbc(des3_ede)))",
18013 +                               .cra_driver_name = "echainiv-authenc-"
18014 +                                                  "hmac-sha224-"
18015 +                                                  "cbc-des3_ede-caam-qi2",
18016 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18017 +                       },
18018 +                       .setkey = aead_setkey,
18019 +                       .setauthsize = aead_setauthsize,
18020 +                       .encrypt = aead_encrypt,
18021 +                       .decrypt = aead_decrypt,
18022 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
18023 +                       .maxauthsize = SHA224_DIGEST_SIZE,
18024 +               },
18025 +               .caam = {
18026 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18027 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18028 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18029 +                       .geniv = true,
18030 +               }
18031 +       },
18032 +       {
18033 +               .aead = {
18034 +                       .base = {
18035 +                               .cra_name = "authenc(hmac(sha256),"
18036 +                                           "cbc(des3_ede))",
18037 +                               .cra_driver_name = "authenc-hmac-sha256-"
18038 +                                                  "cbc-des3_ede-caam-qi2",
18039 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18040 +                       },
18041 +                       .setkey = aead_setkey,
18042 +                       .setauthsize = aead_setauthsize,
18043 +                       .encrypt = aead_encrypt,
18044 +                       .decrypt = aead_decrypt,
18045 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
18046 +                       .maxauthsize = SHA256_DIGEST_SIZE,
18047 +               },
18048 +               .caam = {
18049 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18050 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18051 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18052 +               },
18053 +       },
18054 +       {
18055 +               .aead = {
18056 +                       .base = {
18057 +                               .cra_name = "echainiv(authenc(hmac(sha256),"
18058 +                                           "cbc(des3_ede)))",
18059 +                               .cra_driver_name = "echainiv-authenc-"
18060 +                                                  "hmac-sha256-"
18061 +                                                  "cbc-des3_ede-caam-qi2",
18062 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18063 +                       },
18064 +                       .setkey = aead_setkey,
18065 +                       .setauthsize = aead_setauthsize,
18066 +                       .encrypt = aead_encrypt,
18067 +                       .decrypt = aead_decrypt,
18068 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
18069 +                       .maxauthsize = SHA256_DIGEST_SIZE,
18070 +               },
18071 +               .caam = {
18072 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18073 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18074 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18075 +                       .geniv = true,
18076 +               }
18077 +       },
18078 +       {
18079 +               .aead = {
18080 +                       .base = {
18081 +                               .cra_name = "authenc(hmac(sha384),"
18082 +                                           "cbc(des3_ede))",
18083 +                               .cra_driver_name = "authenc-hmac-sha384-"
18084 +                                                  "cbc-des3_ede-caam-qi2",
18085 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18086 +                       },
18087 +                       .setkey = aead_setkey,
18088 +                       .setauthsize = aead_setauthsize,
18089 +                       .encrypt = aead_encrypt,
18090 +                       .decrypt = aead_decrypt,
18091 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
18092 +                       .maxauthsize = SHA384_DIGEST_SIZE,
18093 +               },
18094 +               .caam = {
18095 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18096 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18097 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18098 +               },
18099 +       },
18100 +       {
18101 +               .aead = {
18102 +                       .base = {
18103 +                               .cra_name = "echainiv(authenc(hmac(sha384),"
18104 +                                           "cbc(des3_ede)))",
18105 +                               .cra_driver_name = "echainiv-authenc-"
18106 +                                                  "hmac-sha384-"
18107 +                                                  "cbc-des3_ede-caam-qi2",
18108 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18109 +                       },
18110 +                       .setkey = aead_setkey,
18111 +                       .setauthsize = aead_setauthsize,
18112 +                       .encrypt = aead_encrypt,
18113 +                       .decrypt = aead_decrypt,
18114 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
18115 +                       .maxauthsize = SHA384_DIGEST_SIZE,
18116 +               },
18117 +               .caam = {
18118 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18119 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18120 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18121 +                       .geniv = true,
18122 +               }
18123 +       },
18124 +       {
18125 +               .aead = {
18126 +                       .base = {
18127 +                               .cra_name = "authenc(hmac(sha512),"
18128 +                                           "cbc(des3_ede))",
18129 +                               .cra_driver_name = "authenc-hmac-sha512-"
18130 +                                                  "cbc-des3_ede-caam-qi2",
18131 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18132 +                       },
18133 +                       .setkey = aead_setkey,
18134 +                       .setauthsize = aead_setauthsize,
18135 +                       .encrypt = aead_encrypt,
18136 +                       .decrypt = aead_decrypt,
18137 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
18138 +                       .maxauthsize = SHA512_DIGEST_SIZE,
18139 +               },
18140 +               .caam = {
18141 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18142 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18143 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18144 +               },
18145 +       },
18146 +       {
18147 +               .aead = {
18148 +                       .base = {
18149 +                               .cra_name = "echainiv(authenc(hmac(sha512),"
18150 +                                           "cbc(des3_ede)))",
18151 +                               .cra_driver_name = "echainiv-authenc-"
18152 +                                                  "hmac-sha512-"
18153 +                                                  "cbc-des3_ede-caam-qi2",
18154 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18155 +                       },
18156 +                       .setkey = aead_setkey,
18157 +                       .setauthsize = aead_setauthsize,
18158 +                       .encrypt = aead_encrypt,
18159 +                       .decrypt = aead_decrypt,
18160 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
18161 +                       .maxauthsize = SHA512_DIGEST_SIZE,
18162 +               },
18163 +               .caam = {
18164 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18165 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18166 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18167 +                       .geniv = true,
18168 +               }
18169 +       },
18170 +       {
18171 +               .aead = {
18172 +                       .base = {
18173 +                               .cra_name = "authenc(hmac(md5),cbc(des))",
18174 +                               .cra_driver_name = "authenc-hmac-md5-"
18175 +                                                  "cbc-des-caam-qi2",
18176 +                               .cra_blocksize = DES_BLOCK_SIZE,
18177 +                       },
18178 +                       .setkey = aead_setkey,
18179 +                       .setauthsize = aead_setauthsize,
18180 +                       .encrypt = aead_encrypt,
18181 +                       .decrypt = aead_decrypt,
18182 +                       .ivsize = DES_BLOCK_SIZE,
18183 +                       .maxauthsize = MD5_DIGEST_SIZE,
18184 +               },
18185 +               .caam = {
18186 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18187 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18188 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18189 +               },
18190 +       },
18191 +       {
18192 +               .aead = {
18193 +                       .base = {
18194 +                               .cra_name = "echainiv(authenc(hmac(md5),"
18195 +                                           "cbc(des)))",
18196 +                               .cra_driver_name = "echainiv-authenc-hmac-md5-"
18197 +                                                  "cbc-des-caam-qi2",
18198 +                               .cra_blocksize = DES_BLOCK_SIZE,
18199 +                       },
18200 +                       .setkey = aead_setkey,
18201 +                       .setauthsize = aead_setauthsize,
18202 +                       .encrypt = aead_encrypt,
18203 +                       .decrypt = aead_decrypt,
18204 +                       .ivsize = DES_BLOCK_SIZE,
18205 +                       .maxauthsize = MD5_DIGEST_SIZE,
18206 +               },
18207 +               .caam = {
18208 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18209 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18210 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18211 +                       .geniv = true,
18212 +               }
18213 +       },
18214 +       {
18215 +               .aead = {
18216 +                       .base = {
18217 +                               .cra_name = "authenc(hmac(sha1),cbc(des))",
18218 +                               .cra_driver_name = "authenc-hmac-sha1-"
18219 +                                                  "cbc-des-caam-qi2",
18220 +                               .cra_blocksize = DES_BLOCK_SIZE,
18221 +                       },
18222 +                       .setkey = aead_setkey,
18223 +                       .setauthsize = aead_setauthsize,
18224 +                       .encrypt = aead_encrypt,
18225 +                       .decrypt = aead_decrypt,
18226 +                       .ivsize = DES_BLOCK_SIZE,
18227 +                       .maxauthsize = SHA1_DIGEST_SIZE,
18228 +               },
18229 +               .caam = {
18230 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18231 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18232 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18233 +               },
18234 +       },
18235 +       {
18236 +               .aead = {
18237 +                       .base = {
18238 +                               .cra_name = "echainiv(authenc(hmac(sha1),"
18239 +                                           "cbc(des)))",
18240 +                               .cra_driver_name = "echainiv-authenc-"
18241 +                                                  "hmac-sha1-cbc-des-caam-qi2",
18242 +                               .cra_blocksize = DES_BLOCK_SIZE,
18243 +                       },
18244 +                       .setkey = aead_setkey,
18245 +                       .setauthsize = aead_setauthsize,
18246 +                       .encrypt = aead_encrypt,
18247 +                       .decrypt = aead_decrypt,
18248 +                       .ivsize = DES_BLOCK_SIZE,
18249 +                       .maxauthsize = SHA1_DIGEST_SIZE,
18250 +               },
18251 +               .caam = {
18252 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18253 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18254 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18255 +                       .geniv = true,
18256 +               }
18257 +       },
18258 +       {
18259 +               .aead = {
18260 +                       .base = {
18261 +                               .cra_name = "authenc(hmac(sha224),cbc(des))",
18262 +                               .cra_driver_name = "authenc-hmac-sha224-"
18263 +                                                  "cbc-des-caam-qi2",
18264 +                               .cra_blocksize = DES_BLOCK_SIZE,
18265 +                       },
18266 +                       .setkey = aead_setkey,
18267 +                       .setauthsize = aead_setauthsize,
18268 +                       .encrypt = aead_encrypt,
18269 +                       .decrypt = aead_decrypt,
18270 +                       .ivsize = DES_BLOCK_SIZE,
18271 +                       .maxauthsize = SHA224_DIGEST_SIZE,
18272 +               },
18273 +               .caam = {
18274 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18275 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18276 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18277 +               },
18278 +       },
18279 +       {
18280 +               .aead = {
18281 +                       .base = {
18282 +                               .cra_name = "echainiv(authenc(hmac(sha224),"
18283 +                                           "cbc(des)))",
18284 +                               .cra_driver_name = "echainiv-authenc-"
18285 +                                                  "hmac-sha224-cbc-des-"
18286 +                                                  "caam-qi2",
18287 +                               .cra_blocksize = DES_BLOCK_SIZE,
18288 +                       },
18289 +                       .setkey = aead_setkey,
18290 +                       .setauthsize = aead_setauthsize,
18291 +                       .encrypt = aead_encrypt,
18292 +                       .decrypt = aead_decrypt,
18293 +                       .ivsize = DES_BLOCK_SIZE,
18294 +                       .maxauthsize = SHA224_DIGEST_SIZE,
18295 +               },
18296 +               .caam = {
18297 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18298 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18299 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18300 +                       .geniv = true,
18301 +               }
18302 +       },
18303 +       {
18304 +               .aead = {
18305 +                       .base = {
18306 +                               .cra_name = "authenc(hmac(sha256),cbc(des))",
18307 +                               .cra_driver_name = "authenc-hmac-sha256-"
18308 +                                                  "cbc-des-caam-qi2",
18309 +                               .cra_blocksize = DES_BLOCK_SIZE,
18310 +                       },
18311 +                       .setkey = aead_setkey,
18312 +                       .setauthsize = aead_setauthsize,
18313 +                       .encrypt = aead_encrypt,
18314 +                       .decrypt = aead_decrypt,
18315 +                       .ivsize = DES_BLOCK_SIZE,
18316 +                       .maxauthsize = SHA256_DIGEST_SIZE,
18317 +               },
18318 +               .caam = {
18319 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18320 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18321 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18322 +               },
18323 +       },
18324 +       {
18325 +               .aead = {
18326 +                       .base = {
18327 +                               .cra_name = "echainiv(authenc(hmac(sha256),"
18328 +                                           "cbc(des)))",
18329 +                               .cra_driver_name = "echainiv-authenc-"
18330 +                                                  "hmac-sha256-cbc-desi-"
18331 +                                                  "caam-qi2",
18332 +                               .cra_blocksize = DES_BLOCK_SIZE,
18333 +                       },
18334 +                       .setkey = aead_setkey,
18335 +                       .setauthsize = aead_setauthsize,
18336 +                       .encrypt = aead_encrypt,
18337 +                       .decrypt = aead_decrypt,
18338 +                       .ivsize = DES_BLOCK_SIZE,
18339 +                       .maxauthsize = SHA256_DIGEST_SIZE,
18340 +               },
18341 +               .caam = {
18342 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18343 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18344 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18345 +                       .geniv = true,
18346 +               },
18347 +       },
18348 +       {
18349 +               .aead = {
18350 +                       .base = {
18351 +                               .cra_name = "authenc(hmac(sha384),cbc(des))",
18352 +                               .cra_driver_name = "authenc-hmac-sha384-"
18353 +                                                  "cbc-des-caam-qi2",
18354 +                               .cra_blocksize = DES_BLOCK_SIZE,
18355 +                       },
18356 +                       .setkey = aead_setkey,
18357 +                       .setauthsize = aead_setauthsize,
18358 +                       .encrypt = aead_encrypt,
18359 +                       .decrypt = aead_decrypt,
18360 +                       .ivsize = DES_BLOCK_SIZE,
18361 +                       .maxauthsize = SHA384_DIGEST_SIZE,
18362 +               },
18363 +               .caam = {
18364 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18365 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18366 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18367 +               },
18368 +       },
18369 +       {
18370 +               .aead = {
18371 +                       .base = {
18372 +                               .cra_name = "echainiv(authenc(hmac(sha384),"
18373 +                                           "cbc(des)))",
18374 +                               .cra_driver_name = "echainiv-authenc-"
18375 +                                                  "hmac-sha384-cbc-des-"
18376 +                                                  "caam-qi2",
18377 +                               .cra_blocksize = DES_BLOCK_SIZE,
18378 +                       },
18379 +                       .setkey = aead_setkey,
18380 +                       .setauthsize = aead_setauthsize,
18381 +                       .encrypt = aead_encrypt,
18382 +                       .decrypt = aead_decrypt,
18383 +                       .ivsize = DES_BLOCK_SIZE,
18384 +                       .maxauthsize = SHA384_DIGEST_SIZE,
18385 +               },
18386 +               .caam = {
18387 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18388 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18389 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18390 +                       .geniv = true,
18391 +               }
18392 +       },
18393 +       {
18394 +               .aead = {
18395 +                       .base = {
18396 +                               .cra_name = "authenc(hmac(sha512),cbc(des))",
18397 +                               .cra_driver_name = "authenc-hmac-sha512-"
18398 +                                                  "cbc-des-caam-qi2",
18399 +                               .cra_blocksize = DES_BLOCK_SIZE,
18400 +                       },
18401 +                       .setkey = aead_setkey,
18402 +                       .setauthsize = aead_setauthsize,
18403 +                       .encrypt = aead_encrypt,
18404 +                       .decrypt = aead_decrypt,
18405 +                       .ivsize = DES_BLOCK_SIZE,
18406 +                       .maxauthsize = SHA512_DIGEST_SIZE,
18407 +               },
18408 +               .caam = {
18409 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18410 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18411 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18412 +               }
18413 +       },
18414 +       {
18415 +               .aead = {
18416 +                       .base = {
18417 +                               .cra_name = "echainiv(authenc(hmac(sha512),"
18418 +                                           "cbc(des)))",
18419 +                               .cra_driver_name = "echainiv-authenc-"
18420 +                                                  "hmac-sha512-cbc-des-"
18421 +                                                  "caam-qi2",
18422 +                               .cra_blocksize = DES_BLOCK_SIZE,
18423 +                       },
18424 +                       .setkey = aead_setkey,
18425 +                       .setauthsize = aead_setauthsize,
18426 +                       .encrypt = aead_encrypt,
18427 +                       .decrypt = aead_decrypt,
18428 +                       .ivsize = DES_BLOCK_SIZE,
18429 +                       .maxauthsize = SHA512_DIGEST_SIZE,
18430 +               },
18431 +               .caam = {
18432 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18433 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18434 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18435 +                       .geniv = true,
18436 +               }
18437 +       },
18438 +       {
18439 +               .aead = {
18440 +                       .base = {
18441 +                               .cra_name = "authenc(hmac(md5),"
18442 +                                           "rfc3686(ctr(aes)))",
18443 +                               .cra_driver_name = "authenc-hmac-md5-"
18444 +                                                  "rfc3686-ctr-aes-caam-qi2",
18445 +                               .cra_blocksize = 1,
18446 +                       },
18447 +                       .setkey = aead_setkey,
18448 +                       .setauthsize = aead_setauthsize,
18449 +                       .encrypt = aead_encrypt,
18450 +                       .decrypt = aead_decrypt,
18451 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18452 +                       .maxauthsize = MD5_DIGEST_SIZE,
18453 +               },
18454 +               .caam = {
18455 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18456 +                                          OP_ALG_AAI_CTR_MOD128,
18457 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18458 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18459 +                       .rfc3686 = true,
18460 +               },
18461 +       },
18462 +       {
18463 +               .aead = {
18464 +                       .base = {
18465 +                               .cra_name = "seqiv(authenc("
18466 +                                           "hmac(md5),rfc3686(ctr(aes))))",
18467 +                               .cra_driver_name = "seqiv-authenc-hmac-md5-"
18468 +                                                  "rfc3686-ctr-aes-caam-qi2",
18469 +                               .cra_blocksize = 1,
18470 +                       },
18471 +                       .setkey = aead_setkey,
18472 +                       .setauthsize = aead_setauthsize,
18473 +                       .encrypt = aead_encrypt,
18474 +                       .decrypt = aead_decrypt,
18475 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18476 +                       .maxauthsize = MD5_DIGEST_SIZE,
18477 +               },
18478 +               .caam = {
18479 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18480 +                                          OP_ALG_AAI_CTR_MOD128,
18481 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18482 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18483 +                       .rfc3686 = true,
18484 +                       .geniv = true,
18485 +               },
18486 +       },
18487 +       {
18488 +               .aead = {
18489 +                       .base = {
18490 +                               .cra_name = "authenc(hmac(sha1),"
18491 +                                           "rfc3686(ctr(aes)))",
18492 +                               .cra_driver_name = "authenc-hmac-sha1-"
18493 +                                                  "rfc3686-ctr-aes-caam-qi2",
18494 +                               .cra_blocksize = 1,
18495 +                       },
18496 +                       .setkey = aead_setkey,
18497 +                       .setauthsize = aead_setauthsize,
18498 +                       .encrypt = aead_encrypt,
18499 +                       .decrypt = aead_decrypt,
18500 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18501 +                       .maxauthsize = SHA1_DIGEST_SIZE,
18502 +               },
18503 +               .caam = {
18504 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18505 +                                          OP_ALG_AAI_CTR_MOD128,
18506 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18507 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18508 +                       .rfc3686 = true,
18509 +               },
18510 +       },
18511 +       {
18512 +               .aead = {
18513 +                       .base = {
18514 +                               .cra_name = "seqiv(authenc("
18515 +                                           "hmac(sha1),rfc3686(ctr(aes))))",
18516 +                               .cra_driver_name = "seqiv-authenc-hmac-sha1-"
18517 +                                                  "rfc3686-ctr-aes-caam-qi2",
18518 +                               .cra_blocksize = 1,
18519 +                       },
18520 +                       .setkey = aead_setkey,
18521 +                       .setauthsize = aead_setauthsize,
18522 +                       .encrypt = aead_encrypt,
18523 +                       .decrypt = aead_decrypt,
18524 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18525 +                       .maxauthsize = SHA1_DIGEST_SIZE,
18526 +               },
18527 +               .caam = {
18528 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18529 +                                          OP_ALG_AAI_CTR_MOD128,
18530 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18531 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18532 +                       .rfc3686 = true,
18533 +                       .geniv = true,
18534 +               },
18535 +       },
18536 +       {
18537 +               .aead = {
18538 +                       .base = {
18539 +                               .cra_name = "authenc(hmac(sha224),"
18540 +                                           "rfc3686(ctr(aes)))",
18541 +                               .cra_driver_name = "authenc-hmac-sha224-"
18542 +                                                  "rfc3686-ctr-aes-caam-qi2",
18543 +                               .cra_blocksize = 1,
18544 +                       },
18545 +                       .setkey = aead_setkey,
18546 +                       .setauthsize = aead_setauthsize,
18547 +                       .encrypt = aead_encrypt,
18548 +                       .decrypt = aead_decrypt,
18549 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18550 +                       .maxauthsize = SHA224_DIGEST_SIZE,
18551 +               },
18552 +               .caam = {
18553 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18554 +                                          OP_ALG_AAI_CTR_MOD128,
18555 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18556 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18557 +                       .rfc3686 = true,
18558 +               },
18559 +       },
18560 +       {
18561 +               .aead = {
18562 +                       .base = {
18563 +                               .cra_name = "seqiv(authenc("
18564 +                                           "hmac(sha224),rfc3686(ctr(aes))))",
18565 +                               .cra_driver_name = "seqiv-authenc-hmac-sha224-"
18566 +                                                  "rfc3686-ctr-aes-caam-qi2",
18567 +                               .cra_blocksize = 1,
18568 +                       },
18569 +                       .setkey = aead_setkey,
18570 +                       .setauthsize = aead_setauthsize,
18571 +                       .encrypt = aead_encrypt,
18572 +                       .decrypt = aead_decrypt,
18573 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18574 +                       .maxauthsize = SHA224_DIGEST_SIZE,
18575 +               },
18576 +               .caam = {
18577 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18578 +                                          OP_ALG_AAI_CTR_MOD128,
18579 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18580 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18581 +                       .rfc3686 = true,
18582 +                       .geniv = true,
18583 +               },
18584 +       },
18585 +       {
18586 +               .aead = {
18587 +                       .base = {
18588 +                               .cra_name = "authenc(hmac(sha256),"
18589 +                                           "rfc3686(ctr(aes)))",
18590 +                               .cra_driver_name = "authenc-hmac-sha256-"
18591 +                                                  "rfc3686-ctr-aes-caam-qi2",
18592 +                               .cra_blocksize = 1,
18593 +                       },
18594 +                       .setkey = aead_setkey,
18595 +                       .setauthsize = aead_setauthsize,
18596 +                       .encrypt = aead_encrypt,
18597 +                       .decrypt = aead_decrypt,
18598 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18599 +                       .maxauthsize = SHA256_DIGEST_SIZE,
18600 +               },
18601 +               .caam = {
18602 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18603 +                                          OP_ALG_AAI_CTR_MOD128,
18604 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18605 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18606 +                       .rfc3686 = true,
18607 +               },
18608 +       },
18609 +       {
18610 +               .aead = {
18611 +                       .base = {
18612 +                               .cra_name = "seqiv(authenc(hmac(sha256),"
18613 +                                           "rfc3686(ctr(aes))))",
18614 +                               .cra_driver_name = "seqiv-authenc-hmac-sha256-"
18615 +                                                  "rfc3686-ctr-aes-caam-qi2",
18616 +                               .cra_blocksize = 1,
18617 +                       },
18618 +                       .setkey = aead_setkey,
18619 +                       .setauthsize = aead_setauthsize,
18620 +                       .encrypt = aead_encrypt,
18621 +                       .decrypt = aead_decrypt,
18622 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18623 +                       .maxauthsize = SHA256_DIGEST_SIZE,
18624 +               },
18625 +               .caam = {
18626 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18627 +                                          OP_ALG_AAI_CTR_MOD128,
18628 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18629 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18630 +                       .rfc3686 = true,
18631 +                       .geniv = true,
18632 +               },
18633 +       },
18634 +       {
18635 +               .aead = {
18636 +                       .base = {
18637 +                               .cra_name = "authenc(hmac(sha384),"
18638 +                                           "rfc3686(ctr(aes)))",
18639 +                               .cra_driver_name = "authenc-hmac-sha384-"
18640 +                                                  "rfc3686-ctr-aes-caam-qi2",
18641 +                               .cra_blocksize = 1,
18642 +                       },
18643 +                       .setkey = aead_setkey,
18644 +                       .setauthsize = aead_setauthsize,
18645 +                       .encrypt = aead_encrypt,
18646 +                       .decrypt = aead_decrypt,
18647 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18648 +                       .maxauthsize = SHA384_DIGEST_SIZE,
18649 +               },
18650 +               .caam = {
18651 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18652 +                                          OP_ALG_AAI_CTR_MOD128,
18653 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18654 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18655 +                       .rfc3686 = true,
18656 +               },
18657 +       },
18658 +       {
18659 +               .aead = {
18660 +                       .base = {
18661 +                               .cra_name = "seqiv(authenc(hmac(sha384),"
18662 +                                           "rfc3686(ctr(aes))))",
18663 +                               .cra_driver_name = "seqiv-authenc-hmac-sha384-"
18664 +                                                  "rfc3686-ctr-aes-caam-qi2",
18665 +                               .cra_blocksize = 1,
18666 +                       },
18667 +                       .setkey = aead_setkey,
18668 +                       .setauthsize = aead_setauthsize,
18669 +                       .encrypt = aead_encrypt,
18670 +                       .decrypt = aead_decrypt,
18671 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18672 +                       .maxauthsize = SHA384_DIGEST_SIZE,
18673 +               },
18674 +               .caam = {
18675 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18676 +                                          OP_ALG_AAI_CTR_MOD128,
18677 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18678 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18679 +                       .rfc3686 = true,
18680 +                       .geniv = true,
18681 +               },
18682 +       },
18683 +       {
18684 +               .aead = {
18685 +                       .base = {
18686 +                               .cra_name = "authenc(hmac(sha512),"
18687 +                                           "rfc3686(ctr(aes)))",
18688 +                               .cra_driver_name = "authenc-hmac-sha512-"
18689 +                                                  "rfc3686-ctr-aes-caam-qi2",
18690 +                               .cra_blocksize = 1,
18691 +                       },
18692 +                       .setkey = aead_setkey,
18693 +                       .setauthsize = aead_setauthsize,
18694 +                       .encrypt = aead_encrypt,
18695 +                       .decrypt = aead_decrypt,
18696 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18697 +                       .maxauthsize = SHA512_DIGEST_SIZE,
18698 +               },
18699 +               .caam = {
18700 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18701 +                                          OP_ALG_AAI_CTR_MOD128,
18702 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18703 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18704 +                       .rfc3686 = true,
18705 +               },
18706 +       },
18707 +       {
18708 +               .aead = {
18709 +                       .base = {
18710 +                               .cra_name = "seqiv(authenc(hmac(sha512),"
18711 +                                           "rfc3686(ctr(aes))))",
18712 +                               .cra_driver_name = "seqiv-authenc-hmac-sha512-"
18713 +                                                  "rfc3686-ctr-aes-caam-qi2",
18714 +                               .cra_blocksize = 1,
18715 +                       },
18716 +                       .setkey = aead_setkey,
18717 +                       .setauthsize = aead_setauthsize,
18718 +                       .encrypt = aead_encrypt,
18719 +                       .decrypt = aead_decrypt,
18720 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18721 +                       .maxauthsize = SHA512_DIGEST_SIZE,
18722 +               },
18723 +               .caam = {
18724 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18725 +                                          OP_ALG_AAI_CTR_MOD128,
18726 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18727 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18728 +                       .rfc3686 = true,
18729 +                       .geniv = true,
18730 +               },
18731 +       },
18732 +       {
18733 +               .aead = {
18734 +                       .base = {
18735 +                               .cra_name = "tls10(hmac(sha1),cbc(aes))",
18736 +                               .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2",
18737 +                               .cra_blocksize = AES_BLOCK_SIZE,
18738 +                       },
18739 +                       .setkey = tls_setkey,
18740 +                       .setauthsize = tls_setauthsize,
18741 +                       .encrypt = tls_encrypt,
18742 +                       .decrypt = tls_decrypt,
18743 +                       .ivsize = AES_BLOCK_SIZE,
18744 +                       .maxauthsize = SHA1_DIGEST_SIZE,
18745 +               },
18746 +               .caam = {
18747 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
18748 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18749 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18750 +               },
18751 +       },
18752 +};
18753 +
18754 +static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
18755 +                                             *template)
18756 +{
18757 +       struct caam_crypto_alg *t_alg;
18758 +       struct crypto_alg *alg;
18759 +
18760 +       t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
18761 +       if (!t_alg)
18762 +               return ERR_PTR(-ENOMEM);
18763 +
18764 +       alg = &t_alg->crypto_alg;
18765 +
18766 +       snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
18767 +       snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
18768 +                template->driver_name);
18769 +       alg->cra_module = THIS_MODULE;
18770 +       alg->cra_exit = caam_cra_exit;
18771 +       alg->cra_priority = CAAM_CRA_PRIORITY;
18772 +       alg->cra_blocksize = template->blocksize;
18773 +       alg->cra_alignmask = 0;
18774 +       alg->cra_ctxsize = sizeof(struct caam_ctx);
18775 +       alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
18776 +                        template->type;
18777 +       switch (template->type) {
18778 +       case CRYPTO_ALG_TYPE_GIVCIPHER:
18779 +               alg->cra_init = caam_cra_init_ablkcipher;
18780 +               alg->cra_type = &crypto_givcipher_type;
18781 +               alg->cra_ablkcipher = template->template_ablkcipher;
18782 +               break;
18783 +       case CRYPTO_ALG_TYPE_ABLKCIPHER:
18784 +               alg->cra_init = caam_cra_init_ablkcipher;
18785 +               alg->cra_type = &crypto_ablkcipher_type;
18786 +               alg->cra_ablkcipher = template->template_ablkcipher;
18787 +               break;
18788 +       }
18789 +
18790 +       t_alg->caam.class1_alg_type = template->class1_alg_type;
18791 +       t_alg->caam.class2_alg_type = template->class2_alg_type;
18792 +
18793 +       return t_alg;
18794 +}
18795 +
18796 +static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
18797 +{
18798 +       struct aead_alg *alg = &t_alg->aead;
18799 +
18800 +       alg->base.cra_module = THIS_MODULE;
18801 +       alg->base.cra_priority = CAAM_CRA_PRIORITY;
18802 +       alg->base.cra_ctxsize = sizeof(struct caam_ctx);
18803 +       alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
18804 +
18805 +       alg->init = caam_cra_init_aead;
18806 +       alg->exit = caam_cra_exit_aead;
18807 +}
18808 +
18809 +static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
18810 +{
18811 +       struct dpaa2_caam_priv_per_cpu *ppriv;
18812 +
18813 +       ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
18814 +       napi_schedule_irqoff(&ppriv->napi);
18815 +}
18816 +
18817 +static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
18818 +{
18819 +       struct device *dev = priv->dev;
18820 +       struct dpaa2_io_notification_ctx *nctx;
18821 +       struct dpaa2_caam_priv_per_cpu *ppriv;
18822 +       int err, i = 0, cpu;
18823 +
18824 +       for_each_online_cpu(cpu) {
18825 +               ppriv = per_cpu_ptr(priv->ppriv, cpu);
18826 +               ppriv->priv = priv;
18827 +               nctx = &ppriv->nctx;
18828 +               nctx->is_cdan = 0;
18829 +               nctx->id = ppriv->rsp_fqid;
18830 +               nctx->desired_cpu = cpu;
18831 +               nctx->cb = dpaa2_caam_fqdan_cb;
18832 +
18833 +               /* Register notification callbacks */
18834 +               err = dpaa2_io_service_register(NULL, nctx);
18835 +               if (unlikely(err)) {
18836 +                       dev_err(dev, "notification register failed\n");
18837 +                       nctx->cb = NULL;
18838 +                       goto err;
18839 +               }
18840 +
18841 +               ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
18842 +                                                    dev);
18843 +               if (unlikely(!ppriv->store)) {
18844 +                       dev_err(dev, "dpaa2_io_store_create() failed\n");
18845 +                       goto err;
18846 +               }
18847 +
18848 +               if (++i == priv->num_pairs)
18849 +                       break;
18850 +       }
18851 +
18852 +       return 0;
18853 +
18854 +err:
18855 +       for_each_online_cpu(cpu) {
18856 +               ppriv = per_cpu_ptr(priv->ppriv, cpu);
18857 +               if (!ppriv->nctx.cb)
18858 +                       break;
18859 +               dpaa2_io_service_deregister(NULL, &ppriv->nctx);
18860 +       }
18861 +
18862 +       for_each_online_cpu(cpu) {
18863 +               ppriv = per_cpu_ptr(priv->ppriv, cpu);
18864 +               if (!ppriv->store)
18865 +                       break;
18866 +               dpaa2_io_store_destroy(ppriv->store);
18867 +       }
18868 +
18869 +       return err;
18870 +}
18871 +
18872 +static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
18873 +{
18874 +       struct dpaa2_caam_priv_per_cpu *ppriv;
18875 +       int i = 0, cpu;
18876 +
18877 +       for_each_online_cpu(cpu) {
18878 +               ppriv = per_cpu_ptr(priv->ppriv, cpu);
18879 +               dpaa2_io_service_deregister(NULL, &ppriv->nctx);
18880 +               dpaa2_io_store_destroy(ppriv->store);
18881 +
18882 +               if (++i == priv->num_pairs)
18883 +                       return;
18884 +       }
18885 +}
18886 +
18887 +static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
18888 +{
18889 +       struct dpseci_rx_queue_cfg rx_queue_cfg;
18890 +       struct device *dev = priv->dev;
18891 +       struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
18892 +       struct dpaa2_caam_priv_per_cpu *ppriv;
18893 +       int err = 0, i = 0, cpu;
18894 +
18895 +       /* Configure Rx queues */
18896 +       for_each_online_cpu(cpu) {
18897 +               ppriv = per_cpu_ptr(priv->ppriv, cpu);
18898 +
18899 +               rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
18900 +                                      DPSECI_QUEUE_OPT_USER_CTX;
18901 +               rx_queue_cfg.order_preservation_en = 0;
18902 +               rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
18903 +               rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
18904 +               /*
18905 +                * Rx priority (WQ) doesn't really matter, since we use
18906 +                * pull mode, i.e. volatile dequeues from specific FQs
18907 +                */
18908 +               rx_queue_cfg.dest_cfg.priority = 0;
18909 +               rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
18910 +
18911 +               err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
18912 +                                         &rx_queue_cfg);
18913 +               if (err) {
18914 +                       dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
18915 +                               err);
18916 +                       return err;
18917 +               }
18918 +
18919 +               if (++i == priv->num_pairs)
18920 +                       break;
18921 +       }
18922 +
18923 +       return err;
18924 +}
18925 +
18926 +static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
18927 +{
18928 +       struct device *dev = priv->dev;
18929 +
18930 +       if (!priv->cscn_mem)
18931 +               return;
18932 +
18933 +       dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
18934 +       kfree(priv->cscn_mem);
18935 +}
18936 +
18937 +static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
18938 +{
18939 +       struct device *dev = priv->dev;
18940 +       struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
18941 +
18942 +       dpaa2_dpseci_congestion_free(priv);
18943 +       dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
18944 +}
18945 +
18946 +static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
18947 +                                 const struct dpaa2_fd *fd)
18948 +{
18949 +       struct caam_request *req;
18950 +       u32 fd_err;
18951 +
18952 +       if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
18953 +               dev_err(priv->dev, "Only Frame List FD format is supported!\n");
18954 +               return;
18955 +       }
18956 +
18957 +       fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
18958 +       if (unlikely(fd_err))
18959 +               dev_err(priv->dev, "FD error: %08x\n", fd_err);
18960 +
18961 +       /*
18962 +        * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
18963 +        * in FD[ERR] or FD[FRC].
18964 +        */
18965 +       req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
18966 +       dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
18967 +                        DMA_BIDIRECTIONAL);
18968 +       req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
18969 +}
18970 +
18971 +static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
18972 +{
18973 +       int err;
18974 +
18975 +       /* Retry while portal is busy */
18976 +       do {
18977 +               err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
18978 +                                              ppriv->store);
18979 +       } while (err == -EBUSY);
18980 +
18981 +       if (unlikely(err))
18982 +               dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
18983 +
18984 +       return err;
18985 +}
18986 +
18987 +static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
18988 +{
18989 +       struct dpaa2_dq *dq;
18990 +       int cleaned = 0, is_last;
18991 +
18992 +       do {
18993 +               dq = dpaa2_io_store_next(ppriv->store, &is_last);
18994 +               if (unlikely(!dq)) {
18995 +                       if (unlikely(!is_last)) {
18996 +                               dev_dbg(ppriv->priv->dev,
18997 +                                       "FQ %d returned no valid frames\n",
18998 +                                       ppriv->rsp_fqid);
18999 +                               /*
19000 +                                * MUST retry until we get some sort of
19001 +                                * valid response token (be it "empty dequeue"
19002 +                                * or a valid frame).
19003 +                                */
19004 +                               continue;
19005 +                       }
19006 +                       break;
19007 +               }
19008 +
19009 +               /* Process FD */
19010 +               dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
19011 +               cleaned++;
19012 +       } while (!is_last);
19013 +
19014 +       return cleaned;
19015 +}
19016 +
19017 +static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
19018 +{
19019 +       struct dpaa2_caam_priv_per_cpu *ppriv;
19020 +       struct dpaa2_caam_priv *priv;
19021 +       int err, cleaned = 0, store_cleaned;
19022 +
19023 +       ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
19024 +       priv = ppriv->priv;
19025 +
19026 +       if (unlikely(dpaa2_caam_pull_fq(ppriv)))
19027 +               return 0;
19028 +
19029 +       do {
19030 +               store_cleaned = dpaa2_caam_store_consume(ppriv);
19031 +               cleaned += store_cleaned;
19032 +
19033 +               if (store_cleaned == 0 ||
19034 +                   cleaned > budget - DPAA2_CAAM_STORE_SIZE)
19035 +                       break;
19036 +
19037 +               /* Try to dequeue some more */
19038 +               err = dpaa2_caam_pull_fq(ppriv);
19039 +               if (unlikely(err))
19040 +                       break;
19041 +       } while (1);
19042 +
19043 +       if (cleaned < budget) {
19044 +               napi_complete_done(napi, cleaned);
19045 +               err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
19046 +               if (unlikely(err))
19047 +                       dev_err(priv->dev, "Notification rearm failed: %d\n",
19048 +                               err);
19049 +       }
19050 +
19051 +       return cleaned;
19052 +}
19053 +
19054 +static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
19055 +                                        u16 token)
19056 +{
19057 +       struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
19058 +       struct device *dev = priv->dev;
19059 +       int err;
19060 +
19061 +       /*
19062 +        * Congestion group feature supported starting with DPSECI API v5.1
19063 +        * and only when object has been created with this capability.
19064 +        */
19065 +       if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
19066 +           !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
19067 +               return 0;
19068 +
19069 +       priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
19070 +                                GFP_KERNEL | GFP_DMA);
19071 +       if (!priv->cscn_mem)
19072 +               return -ENOMEM;
19073 +
19074 +       priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
19075 +       priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
19076 +                                       DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
19077 +       if (dma_mapping_error(dev, priv->cscn_dma)) {
19078 +               dev_err(dev, "Error mapping CSCN memory area\n");
19079 +               err = -ENOMEM;
19080 +               goto err_dma_map;
19081 +       }
19082 +
19083 +       cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
19084 +       cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
19085 +       cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
19086 +       cong_notif_cfg.message_ctx = (u64)priv;
19087 +       cong_notif_cfg.message_iova = priv->cscn_dma;
19088 +       cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
19089 +                                       DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
19090 +                                       DPSECI_CGN_MODE_COHERENT_WRITE;
19091 +
19092 +       err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
19093 +                                                &cong_notif_cfg);
19094 +       if (err) {
19095 +               dev_err(dev, "dpseci_set_congestion_notification failed\n");
19096 +               goto err_set_cong;
19097 +       }
19098 +
19099 +       return 0;
19100 +
19101 +err_set_cong:
19102 +       dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
19103 +err_dma_map:
19104 +       kfree(priv->cscn_mem);
19105 +
19106 +       return err;
19107 +}
19108 +
19109 +static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
19110 +{
19111 +       struct device *dev = &ls_dev->dev;
19112 +       struct dpaa2_caam_priv *priv;
19113 +       struct dpaa2_caam_priv_per_cpu *ppriv;
19114 +       int err, cpu;
19115 +       u8 i;
19116 +
19117 +       priv = dev_get_drvdata(dev);
19118 +
19119 +       priv->dev = dev;
19120 +       priv->dpsec_id = ls_dev->obj_desc.id;
19121 +
19122 +       /* Get a handle for the DPSECI this interface is associate with */
19123 +       err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
19124 +       if (err) {
19125 +               dev_err(dev, "dpsec_open() failed: %d\n", err);
19126 +               goto err_open;
19127 +       }
19128 +
19129 +       dev_info(dev, "Opened dpseci object successfully\n");
19130 +
19131 +       err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
19132 +                                    &priv->minor_ver);
19133 +       if (err) {
19134 +               dev_err(dev, "dpseci_get_api_version() failed\n");
19135 +               goto err_get_vers;
19136 +       }
19137 +
19138 +       err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
19139 +                                   &priv->dpseci_attr);
19140 +       if (err) {
19141 +               dev_err(dev, "dpseci_get_attributes() failed\n");
19142 +               goto err_get_vers;
19143 +       }
19144 +
19145 +       err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
19146 +                                 &priv->sec_attr);
19147 +       if (err) {
19148 +               dev_err(dev, "dpseci_get_sec_attr() failed\n");
19149 +               goto err_get_vers;
19150 +       }
19151 +
19152 +       err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
19153 +       if (err) {
19154 +               dev_err(dev, "setup_congestion() failed\n");
19155 +               goto err_get_vers;
19156 +       }
19157 +
19158 +       priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
19159 +                             priv->dpseci_attr.num_tx_queues);
19160 +       if (priv->num_pairs > num_online_cpus()) {
19161 +               dev_warn(dev, "%d queues won't be used\n",
19162 +                        priv->num_pairs - num_online_cpus());
19163 +               priv->num_pairs = num_online_cpus();
19164 +       }
19165 +
19166 +       for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
19167 +               err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
19168 +                                         &priv->rx_queue_attr[i]);
19169 +               if (err) {
19170 +                       dev_err(dev, "dpseci_get_rx_queue() failed\n");
19171 +                       goto err_get_rx_queue;
19172 +               }
19173 +       }
19174 +
19175 +       for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
19176 +               err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
19177 +                                         &priv->tx_queue_attr[i]);
19178 +               if (err) {
19179 +                       dev_err(dev, "dpseci_get_tx_queue() failed\n");
19180 +                       goto err_get_rx_queue;
19181 +               }
19182 +       }
19183 +
19184 +       i = 0;
19185 +       for_each_online_cpu(cpu) {
19186 +               dev_info(dev, "prio %d: rx queue %d, tx queue %d\n", i,
19187 +                        priv->rx_queue_attr[i].fqid,
19188 +                        priv->tx_queue_attr[i].fqid);
19189 +
19190 +               ppriv = per_cpu_ptr(priv->ppriv, cpu);
19191 +               ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
19192 +               ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
19193 +               ppriv->prio = i;
19194 +
19195 +               ppriv->net_dev.dev = *dev;
19196 +               INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
19197 +               netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
19198 +                              DPAA2_CAAM_NAPI_WEIGHT);
19199 +               if (++i == priv->num_pairs)
19200 +                       break;
19201 +       }
19202 +
19203 +       return 0;
19204 +
19205 +err_get_rx_queue:
19206 +       dpaa2_dpseci_congestion_free(priv);
19207 +err_get_vers:
19208 +       dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
19209 +err_open:
19210 +       return err;
19211 +}
19212 +
19213 +static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
19214 +{
19215 +       struct device *dev = priv->dev;
19216 +       struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
19217 +       struct dpaa2_caam_priv_per_cpu *ppriv;
19218 +       int err, i;
19219 +
19220 +       for (i = 0; i < priv->num_pairs; i++) {
19221 +               ppriv = per_cpu_ptr(priv->ppriv, i);
19222 +               napi_enable(&ppriv->napi);
19223 +       }
19224 +
19225 +       err = dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
19226 +       if (err) {
19227 +               dev_err(dev, "dpseci_enable() failed\n");
19228 +               return err;
19229 +       }
19230 +
19231 +       dev_info(dev, "DPSECI version %d.%d\n",
19232 +                priv->major_ver,
19233 +                priv->minor_ver);
19234 +
19235 +       return 0;
19236 +}
19237 +
19238 +static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
19239 +{
19240 +       struct device *dev = priv->dev;
19241 +       struct dpaa2_caam_priv_per_cpu *ppriv;
19242 +       struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
19243 +       int i, err = 0, enabled;
19244 +
19245 +       err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
19246 +       if (err) {
19247 +               dev_err(dev, "dpseci_disable() failed\n");
19248 +               return err;
19249 +       }
19250 +
19251 +       err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
19252 +       if (err) {
19253 +               dev_err(dev, "dpseci_is_enabled() failed\n");
19254 +               return err;
19255 +       }
19256 +
19257 +       dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
19258 +
19259 +       for (i = 0; i < priv->num_pairs; i++) {
19260 +               ppriv = per_cpu_ptr(priv->ppriv, i);
19261 +               napi_disable(&ppriv->napi);
19262 +               netif_napi_del(&ppriv->napi);
19263 +       }
19264 +
19265 +       return 0;
19266 +}
19267 +
19268 +static struct list_head alg_list;
19269 +
19270 +static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
19271 +{
19272 +       struct device *dev;
19273 +       struct dpaa2_caam_priv *priv;
19274 +       int i, err = 0;
19275 +       bool registered = false;
19276 +
19277 +       /*
19278 +        * There is no way to get CAAM endianness - there is no direct register
19279 +        * space access and MC f/w does not provide this attribute.
19280 +        * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
19281 +        * property.
19282 +        */
19283 +       caam_little_end = true;
19284 +
19285 +       caam_imx = false;
19286 +
19287 +       dev = &dpseci_dev->dev;
19288 +
19289 +       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
19290 +       if (!priv)
19291 +               return -ENOMEM;
19292 +
19293 +       dev_set_drvdata(dev, priv);
19294 +
19295 +       priv->domain = iommu_get_domain_for_dev(dev);
19296 +
19297 +       qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
19298 +                                    0, SLAB_CACHE_DMA, NULL);
19299 +       if (!qi_cache) {
19300 +               dev_err(dev, "Can't allocate SEC cache\n");
19301 +               err = -ENOMEM;
19302 +               goto err_qicache;
19303 +       }
19304 +
19305 +       err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
19306 +       if (err) {
19307 +               dev_err(dev, "dma_set_mask_and_coherent() failed\n");
19308 +               goto err_dma_mask;
19309 +       }
19310 +
19311 +       /* Obtain a MC portal */
19312 +       err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
19313 +       if (err) {
19314 +               dev_err(dev, "MC portal allocation failed\n");
19315 +               goto err_dma_mask;
19316 +       }
19317 +
19318 +       priv->ppriv = alloc_percpu(*priv->ppriv);
19319 +       if (!priv->ppriv) {
19320 +               dev_err(dev, "alloc_percpu() failed\n");
19321 +               goto err_alloc_ppriv;
19322 +       }
19323 +
19324 +       /* DPSECI initialization */
19325 +       err = dpaa2_dpseci_setup(dpseci_dev);
19326 +       if (err < 0) {
19327 +               dev_err(dev, "dpaa2_dpseci_setup() failed\n");
19328 +               goto err_dpseci_setup;
19329 +       }
19330 +
19331 +       /* DPIO */
19332 +       err = dpaa2_dpseci_dpio_setup(priv);
19333 +       if (err) {
19334 +               dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
19335 +               goto err_dpio_setup;
19336 +       }
19337 +
19338 +       /* DPSECI binding to DPIO */
19339 +       err = dpaa2_dpseci_bind(priv);
19340 +       if (err) {
19341 +               dev_err(dev, "dpaa2_dpseci_bind() failed\n");
19342 +               goto err_bind;
19343 +       }
19344 +
19345 +       /* DPSECI enable */
19346 +       err = dpaa2_dpseci_enable(priv);
19347 +       if (err) {
19348 +               dev_err(dev, "dpaa2_dpseci_enable() failed");
19349 +               goto err_bind;
19350 +       }
19351 +
19352 +       /* register crypto algorithms the device supports */
19353 +       INIT_LIST_HEAD(&alg_list);
19354 +       for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
19355 +               struct caam_crypto_alg *t_alg;
19356 +               struct caam_alg_template *alg = driver_algs + i;
19357 +               u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
19358 +
19359 +               /* Skip DES algorithms if not supported by device */
19360 +               if (!priv->sec_attr.des_acc_num &&
19361 +                   ((alg_sel == OP_ALG_ALGSEL_3DES) ||
19362 +                    (alg_sel == OP_ALG_ALGSEL_DES)))
19363 +                       continue;
19364 +
19365 +               /* Skip AES algorithms if not supported by device */
19366 +               if (!priv->sec_attr.aes_acc_num &&
19367 +                   (alg_sel == OP_ALG_ALGSEL_AES))
19368 +                       continue;
19369 +
19370 +               t_alg = caam_alg_alloc(alg);
19371 +               if (IS_ERR(t_alg)) {
19372 +                       err = PTR_ERR(t_alg);
19373 +                       dev_warn(dev, "%s alg allocation failed: %d\n",
19374 +                                alg->driver_name, err);
19375 +                       continue;
19376 +               }
19377 +               t_alg->caam.dev = dev;
19378 +
19379 +               err = crypto_register_alg(&t_alg->crypto_alg);
19380 +               if (err) {
19381 +                       dev_warn(dev, "%s alg registration failed: %d\n",
19382 +                                t_alg->crypto_alg.cra_driver_name, err);
19383 +                       kfree(t_alg);
19384 +                       continue;
19385 +               }
19386 +
19387 +               list_add_tail(&t_alg->entry, &alg_list);
19388 +               registered = true;
19389 +       }
19390 +
19391 +       for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
19392 +               struct caam_aead_alg *t_alg = driver_aeads + i;
19393 +               u32 c1_alg_sel = t_alg->caam.class1_alg_type &
19394 +                                OP_ALG_ALGSEL_MASK;
19395 +               u32 c2_alg_sel = t_alg->caam.class2_alg_type &
19396 +                                OP_ALG_ALGSEL_MASK;
19397 +
19398 +               /* Skip DES algorithms if not supported by device */
19399 +               if (!priv->sec_attr.des_acc_num &&
19400 +                   ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
19401 +                    (c1_alg_sel == OP_ALG_ALGSEL_DES)))
19402 +                       continue;
19403 +
19404 +               /* Skip AES algorithms if not supported by device */
19405 +               if (!priv->sec_attr.aes_acc_num &&
19406 +                   (c1_alg_sel == OP_ALG_ALGSEL_AES))
19407 +                       continue;
19408 +
19409 +               /*
19410 +                * Skip algorithms requiring message digests
19411 +                * if MD not supported by device.
19412 +                */
19413 +               if (!priv->sec_attr.md_acc_num && c2_alg_sel)
19414 +                       continue;
19415 +
19416 +               t_alg->caam.dev = dev;
19417 +               caam_aead_alg_init(t_alg);
19418 +
19419 +               err = crypto_register_aead(&t_alg->aead);
19420 +               if (err) {
19421 +                       dev_warn(dev, "%s alg registration failed: %d\n",
19422 +                                t_alg->aead.base.cra_driver_name, err);
19423 +                       continue;
19424 +               }
19425 +
19426 +               t_alg->registered = true;
19427 +               registered = true;
19428 +       }
19429 +       if (registered)
19430 +               dev_info(dev, "algorithms registered in /proc/crypto\n");
19431 +
19432 +       return err;
19433 +
19434 +err_bind:
19435 +       dpaa2_dpseci_dpio_free(priv);
19436 +err_dpio_setup:
19437 +       dpaa2_dpseci_free(priv);
19438 +err_dpseci_setup:
19439 +       free_percpu(priv->ppriv);
19440 +err_alloc_ppriv:
19441 +       fsl_mc_portal_free(priv->mc_io);
19442 +err_dma_mask:
19443 +       kmem_cache_destroy(qi_cache);
19444 +err_qicache:
19445 +       dev_set_drvdata(dev, NULL);
19446 +
19447 +       return err;
19448 +}
19449 +
19450 +static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
19451 +{
19452 +       struct device *dev;
19453 +       struct dpaa2_caam_priv *priv;
19454 +       int i;
19455 +
19456 +       dev = &ls_dev->dev;
19457 +       priv = dev_get_drvdata(dev);
19458 +
19459 +       for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
19460 +               struct caam_aead_alg *t_alg = driver_aeads + i;
19461 +
19462 +               if (t_alg->registered)
19463 +                       crypto_unregister_aead(&t_alg->aead);
19464 +       }
19465 +
19466 +       if (alg_list.next) {
19467 +               struct caam_crypto_alg *t_alg, *n;
19468 +
19469 +               list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
19470 +                       crypto_unregister_alg(&t_alg->crypto_alg);
19471 +                       list_del(&t_alg->entry);
19472 +                       kfree(t_alg);
19473 +               }
19474 +       }
19475 +
19476 +       dpaa2_dpseci_disable(priv);
19477 +       dpaa2_dpseci_dpio_free(priv);
19478 +       dpaa2_dpseci_free(priv);
19479 +       free_percpu(priv->ppriv);
19480 +       fsl_mc_portal_free(priv->mc_io);
19481 +       dev_set_drvdata(dev, NULL);
19482 +       kmem_cache_destroy(qi_cache);
19483 +
19484 +       return 0;
19485 +}
19486 +
19487 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
19488 +{
19489 +       struct dpaa2_fd fd;
19490 +       struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
19491 +       int err = 0, i, id;
19492 +
19493 +       if (IS_ERR(req))
19494 +               return PTR_ERR(req);
19495 +
19496 +       if (priv->cscn_mem) {
19497 +               dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
19498 +                                       DPAA2_CSCN_SIZE,
19499 +                                       DMA_FROM_DEVICE);
19500 +               if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
19501 +                       dev_dbg_ratelimited(dev, "Dropping request\n");
19502 +                       return -EBUSY;
19503 +               }
19504 +       }
19505 +
19506 +       dpaa2_fl_set_flc(&req->fd_flt[1], req->flc->flc_dma);
19507 +
19508 +       req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
19509 +                                        DMA_BIDIRECTIONAL);
19510 +       if (dma_mapping_error(dev, req->fd_flt_dma)) {
19511 +               dev_err(dev, "DMA mapping error for QI enqueue request\n");
19512 +               goto err_out;
19513 +       }
19514 +
19515 +       memset(&fd, 0, sizeof(fd));
19516 +       dpaa2_fd_set_format(&fd, dpaa2_fd_list);
19517 +       dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
19518 +       dpaa2_fd_set_len(&fd, req->fd_flt[1].len);
19519 +       dpaa2_fd_set_flc(&fd, req->flc->flc_dma);
19520 +
19521 +       /*
19522 +        * There is no guarantee that preemption is disabled here,
19523 +        * thus take action.
19524 +        */
19525 +       preempt_disable();
19526 +       id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
19527 +       for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
19528 +               err = dpaa2_io_service_enqueue_fq(NULL,
19529 +                                                 priv->tx_queue_attr[id].fqid,
19530 +                                                 &fd);
19531 +               if (err != -EBUSY)
19532 +                       break;
19533 +       }
19534 +       preempt_enable();
19535 +
19536 +       if (unlikely(err < 0)) {
19537 +               dev_err(dev, "Error enqueuing frame: %d\n", err);
19538 +               goto err_out;
19539 +       }
19540 +
19541 +       return -EINPROGRESS;
19542 +
19543 +err_out:
19544 +       dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
19545 +                        DMA_BIDIRECTIONAL);
19546 +       return -EIO;
19547 +}
19548 +EXPORT_SYMBOL(dpaa2_caam_enqueue);
19549 +
19550 +const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
19551 +       {
19552 +               .vendor = FSL_MC_VENDOR_FREESCALE,
19553 +               .obj_type = "dpseci",
19554 +       },
19555 +       { .vendor = 0x0 }
19556 +};
19557 +
19558 +static struct fsl_mc_driver dpaa2_caam_driver = {
19559 +       .driver = {
19560 +               .name           = KBUILD_MODNAME,
19561 +               .owner          = THIS_MODULE,
19562 +       },
19563 +       .probe          = dpaa2_caam_probe,
19564 +       .remove         = dpaa2_caam_remove,
19565 +       .match_id_table = dpaa2_caam_match_id_table
19566 +};
19567 +
19568 +MODULE_LICENSE("Dual BSD/GPL");
19569 +MODULE_AUTHOR("Freescale Semiconductor, Inc");
19570 +MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
19571 +
19572 +module_fsl_mc_driver(dpaa2_caam_driver);
19573 --- /dev/null
19574 +++ b/drivers/crypto/caam/caamalg_qi2.h
19575 @@ -0,0 +1,265 @@
19576 +/*
19577 + * Copyright 2015-2016 Freescale Semiconductor Inc.
19578 + * Copyright 2017 NXP
19579 + *
19580 + * Redistribution and use in source and binary forms, with or without
19581 + * modification, are permitted provided that the following conditions are met:
19582 + *     * Redistributions of source code must retain the above copyright
19583 + *      notice, this list of conditions and the following disclaimer.
19584 + *     * Redistributions in binary form must reproduce the above copyright
19585 + *      notice, this list of conditions and the following disclaimer in the
19586 + *      documentation and/or other materials provided with the distribution.
19587 + *     * Neither the names of the above-listed copyright holders nor the
19588 + *      names of any contributors may be used to endorse or promote products
19589 + *      derived from this software without specific prior written permission.
19590 + *
19591 + *
19592 + * ALTERNATIVELY, this software may be distributed under the terms of the
19593 + * GNU General Public License ("GPL") as published by the Free Software
19594 + * Foundation, either version 2 of that License or (at your option) any
19595 + * later version.
19596 + *
19597 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19598 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19599 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19600 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
19601 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19602 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19603 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19604 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
19605 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
19606 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
19607 + * POSSIBILITY OF SUCH DAMAGE.
19608 + */
19609 +
19610 +#ifndef _CAAMALG_QI2_H_
19611 +#define _CAAMALG_QI2_H_
19612 +
19613 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
19614 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
19615 +#include <linux/threads.h>
19616 +#include "dpseci.h"
19617 +#include "desc_constr.h"
19618 +
19619 +#define DPAA2_CAAM_STORE_SIZE  16
19620 +/* NAPI weight *must* be a multiple of the store size. */
19621 +#define DPAA2_CAAM_NAPI_WEIGHT 64
19622 +
19623 +/* The congestion entrance threshold was chosen so that on LS2088
19624 + * we support the maximum throughput for the available memory
19625 + */
19626 +#define DPAA2_SEC_CONG_ENTRY_THRESH    (128 * 1024 * 1024)
19627 +#define DPAA2_SEC_CONG_EXIT_THRESH     (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10)
19628 +
19629 +/**
19630 + * dpaa2_caam_priv - driver private data
19631 + * @dpseci_id: DPSECI object unique ID
19632 + * @major_ver: DPSECI major version
19633 + * @minor_ver: DPSECI minor version
19634 + * @dpseci_attr: DPSECI attributes
19635 + * @sec_attr: SEC engine attributes
19636 + * @rx_queue_attr: array of Rx queue attributes
19637 + * @tx_queue_attr: array of Tx queue attributes
19638 + * @cscn_mem: pointer to memory region containing the
19639 + *     dpaa2_cscn struct; it's size is larger than
19640 + *     sizeof(struct dpaa2_cscn) to accommodate alignment
19641 + * @cscn_mem_aligned: pointer to struct dpaa2_cscn; it is computed
19642 + *     as PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
19643 + * @cscn_dma: dma address used by the QMAN to write CSCN messages
19644 + * @dev: device associated with the DPSECI object
19645 + * @mc_io: pointer to MC portal's I/O object
19646 + * @domain: IOMMU domain
19647 + * @ppriv: per CPU pointers to privata data
19648 + */
19649 +struct dpaa2_caam_priv {
19650 +       int dpsec_id;
19651 +
19652 +       u16 major_ver;
19653 +       u16 minor_ver;
19654 +
19655 +       struct dpseci_attr dpseci_attr;
19656 +       struct dpseci_sec_attr sec_attr;
19657 +       struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_PRIO_NUM];
19658 +       struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_PRIO_NUM];
19659 +       int num_pairs;
19660 +
19661 +       /* congestion */
19662 +       void *cscn_mem;
19663 +       void *cscn_mem_aligned;
19664 +       dma_addr_t cscn_dma;
19665 +
19666 +       struct device *dev;
19667 +       struct fsl_mc_io *mc_io;
19668 +       struct iommu_domain *domain;
19669 +
19670 +       struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
19671 +};
19672 +
19673 +/**
19674 + * dpaa2_caam_priv_per_cpu - per CPU private data
19675 + * @napi: napi structure
19676 + * @net_dev: netdev used by napi
19677 + * @req_fqid: (virtual) request (Tx / enqueue) FQID
19678 + * @rsp_fqid: (virtual) response (Rx / dequeue) FQID
19679 + * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr
19680 + * @nctx: notification context of response FQ
19681 + * @store: where dequeued frames are stored
19682 + * @priv: backpointer to dpaa2_caam_priv
19683 + */
19684 +struct dpaa2_caam_priv_per_cpu {
19685 +       struct napi_struct napi;
19686 +       struct net_device net_dev;
19687 +       int req_fqid;
19688 +       int rsp_fqid;
19689 +       int prio;
19690 +       struct dpaa2_io_notification_ctx nctx;
19691 +       struct dpaa2_io_store *store;
19692 +       struct dpaa2_caam_priv *priv;
19693 +};
19694 +
19695 +/*
19696 + * The CAAM QI hardware constructs a job descriptor which points
19697 + * to shared descriptor (as pointed by context_a of FQ to CAAM).
19698 + * When the job descriptor is executed by deco, the whole job
19699 + * descriptor together with shared descriptor gets loaded in
19700 + * deco buffer which is 64 words long (each 32-bit).
19701 + *
19702 + * The job descriptor constructed by QI hardware has layout:
19703 + *
19704 + *     HEADER          (1 word)
19705 + *     Shdesc ptr      (1 or 2 words)
19706 + *     SEQ_OUT_PTR     (1 word)
19707 + *     Out ptr         (1 or 2 words)
19708 + *     Out length      (1 word)
19709 + *     SEQ_IN_PTR      (1 word)
19710 + *     In ptr          (1 or 2 words)
19711 + *     In length       (1 word)
19712 + *
19713 + * The shdesc ptr is used to fetch shared descriptor contents
19714 + * into deco buffer.
19715 + *
19716 + * Apart from shdesc contents, the total number of words that
19717 + * get loaded in deco buffer are '8' or '11'. The remaining words
19718 + * in deco buffer can be used for storing shared descriptor.
19719 + */
19720 +#define MAX_SDLEN      ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
19721 +
19722 +/* Length of a single buffer in the QI driver memory cache */
19723 +#define CAAM_QI_MEMCACHE_SIZE  512
19724 +
19725 +/*
19726 + * aead_edesc - s/w-extended aead descriptor
19727 + * @src_nents: number of segments in input scatterlist
19728 + * @dst_nents: number of segments in output scatterlist
19729 + * @iv_dma: dma address of iv for checking continuity and link table
19730 + * @qm_sg_bytes: length of dma mapped h/w link table
19731 + * @qm_sg_dma: bus physical mapped address of h/w link table
19732 + * @assoclen_dma: bus physical mapped address of req->assoclen
19733 + * @sgt: the h/w link table
19734 + */
19735 +struct aead_edesc {
19736 +       int src_nents;
19737 +       int dst_nents;
19738 +       dma_addr_t iv_dma;
19739 +       int qm_sg_bytes;
19740 +       dma_addr_t qm_sg_dma;
19741 +       dma_addr_t assoclen_dma;
19742 +#define CAAM_QI_MAX_AEAD_SG                                            \
19743 +       ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) /   \
19744 +        sizeof(struct dpaa2_sg_entry))
19745 +       struct dpaa2_sg_entry sgt[0];
19746 +};
19747 +
19748 +/*
19749 + * tls_edesc - s/w-extended tls descriptor
19750 + * @src_nents: number of segments in input scatterlist
19751 + * @dst_nents: number of segments in output scatterlist
19752 + * @iv_dma: dma address of iv for checking continuity and link table
19753 + * @qm_sg_bytes: length of dma mapped h/w link table
19754 + * @qm_sg_dma: bus physical mapped address of h/w link table
19755 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
19756 + * @dst: pointer to output scatterlist, usefull for unmapping
19757 + * @sgt: the h/w link table
19758 + */
19759 +struct tls_edesc {
19760 +       int src_nents;
19761 +       int dst_nents;
19762 +       dma_addr_t iv_dma;
19763 +       int qm_sg_bytes;
19764 +       dma_addr_t qm_sg_dma;
19765 +       struct scatterlist tmp[2];
19766 +       struct scatterlist *dst;
19767 +       struct dpaa2_sg_entry sgt[0];
19768 +};
19769 +
19770 +/*
19771 + * ablkcipher_edesc - s/w-extended ablkcipher descriptor
19772 + * @src_nents: number of segments in input scatterlist
19773 + * @dst_nents: number of segments in output scatterlist
19774 + * @iv_dma: dma address of iv for checking continuity and link table
19775 + * @qm_sg_bytes: length of dma mapped qm_sg space
19776 + * @qm_sg_dma: I/O virtual address of h/w link table
19777 + * @sgt: the h/w link table
19778 + */
19779 +struct ablkcipher_edesc {
19780 +       int src_nents;
19781 +       int dst_nents;
19782 +       dma_addr_t iv_dma;
19783 +       int qm_sg_bytes;
19784 +       dma_addr_t qm_sg_dma;
19785 +#define CAAM_QI_MAX_ABLKCIPHER_SG                                          \
19786 +       ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
19787 +        sizeof(struct dpaa2_sg_entry))
19788 +       struct dpaa2_sg_entry sgt[0];
19789 +};
19790 +
19791 +/**
19792 + * caam_flc - Flow Context (FLC)
19793 + * @flc: Flow Context options
19794 + * @sh_desc: Shared Descriptor
19795 + * @flc_dma: DMA address of the Flow Context
19796 + */
19797 +struct caam_flc {
19798 +       u32 flc[16];
19799 +       u32 sh_desc[MAX_SDLEN];
19800 +       dma_addr_t flc_dma;
19801 +} ____cacheline_aligned;
19802 +
19803 +enum optype {
19804 +       ENCRYPT = 0,
19805 +       DECRYPT,
19806 +       GIVENCRYPT,
19807 +       NUM_OP
19808 +};
19809 +
19810 +/**
19811 + * caam_request - the request structure the driver application should fill while
19812 + *                submitting a job to driver.
19813 + * @fd_flt: Frame list table defining input and output
19814 + *          fd_flt[0] - FLE pointing to output buffer
19815 + *          fd_flt[1] - FLE pointing to input buffer
19816 + * @fd_flt_dma: DMA address for the frame list table
19817 + * @flc: Flow Context
19818 + * @op_type: operation type
19819 + * @cbk: Callback function to invoke when job is completed
19820 + * @ctx: arbit context attached with request by the application
19821 + * @edesc: extended descriptor; points to one of {ablkcipher,aead}_edesc
19822 + */
19823 +struct caam_request {
19824 +       struct dpaa2_fl_entry fd_flt[2];
19825 +       dma_addr_t fd_flt_dma;
19826 +       struct caam_flc *flc;
19827 +       enum optype op_type;
19828 +       void (*cbk)(void *ctx, u32 err);
19829 +       void *ctx;
19830 +       void *edesc;
19831 +};
19832 +
19833 +/**
19834 + * dpaa2_caam_enqueue() - enqueue a crypto request
19835 + * @dev: device associated with the DPSECI object
19836 + * @req: pointer to caam_request
19837 + */
19838 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
19839 +
19840 +#endif /* _CAAMALG_QI2_H_ */
19841 --- a/drivers/crypto/caam/caamhash.c
19842 +++ b/drivers/crypto/caam/caamhash.c
19843 @@ -72,7 +72,7 @@
19844  #define CAAM_MAX_HASH_DIGEST_SIZE      SHA512_DIGEST_SIZE
19845  
19846  /* length of descriptors text */
19847 -#define DESC_AHASH_BASE                        (4 * CAAM_CMD_SZ)
19848 +#define DESC_AHASH_BASE                        (3 * CAAM_CMD_SZ)
19849  #define DESC_AHASH_UPDATE_LEN          (6 * CAAM_CMD_SZ)
19850  #define DESC_AHASH_UPDATE_FIRST_LEN    (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
19851  #define DESC_AHASH_FINAL_LEN           (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
19852 @@ -103,20 +103,14 @@ struct caam_hash_ctx {
19853         u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
19854         u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
19855         u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
19856 -       u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
19857         dma_addr_t sh_desc_update_dma ____cacheline_aligned;
19858         dma_addr_t sh_desc_update_first_dma;
19859         dma_addr_t sh_desc_fin_dma;
19860         dma_addr_t sh_desc_digest_dma;
19861 -       dma_addr_t sh_desc_finup_dma;
19862         struct device *jrdev;
19863 -       u32 alg_type;
19864 -       u32 alg_op;
19865         u8 key[CAAM_MAX_HASH_KEY_SIZE];
19866 -       dma_addr_t key_dma;
19867         int ctx_len;
19868 -       unsigned int split_key_len;
19869 -       unsigned int split_key_pad_len;
19870 +       struct alginfo adata;
19871  };
19872  
19873  /* ahash state */
19874 @@ -143,6 +137,31 @@ struct caam_export_state {
19875         int (*finup)(struct ahash_request *req);
19876  };
19877  
19878 +static inline void switch_buf(struct caam_hash_state *state)
19879 +{
19880 +       state->current_buf ^= 1;
19881 +}
19882 +
19883 +static inline u8 *current_buf(struct caam_hash_state *state)
19884 +{
19885 +       return state->current_buf ? state->buf_1 : state->buf_0;
19886 +}
19887 +
19888 +static inline u8 *alt_buf(struct caam_hash_state *state)
19889 +{
19890 +       return state->current_buf ? state->buf_0 : state->buf_1;
19891 +}
19892 +
19893 +static inline int *current_buflen(struct caam_hash_state *state)
19894 +{
19895 +       return state->current_buf ? &state->buflen_1 : &state->buflen_0;
19896 +}
19897 +
19898 +static inline int *alt_buflen(struct caam_hash_state *state)
19899 +{
19900 +       return state->current_buf ? &state->buflen_0 : &state->buflen_1;
19901 +}
19902 +
19903  /* Common job descriptor seq in/out ptr routines */
19904  
19905  /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
19906 @@ -175,36 +194,27 @@ static inline dma_addr_t map_seq_out_ptr
19907         return dst_dma;
19908  }
19909  
19910 -/* Map current buffer in state and put it in link table */
19911 -static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
19912 -                                           struct sec4_sg_entry *sec4_sg,
19913 -                                           u8 *buf, int buflen)
19914 +/* Map current buffer in state (if length > 0) and put it in link table */
19915 +static inline int buf_map_to_sec4_sg(struct device *jrdev,
19916 +                                    struct sec4_sg_entry *sec4_sg,
19917 +                                    struct caam_hash_state *state)
19918  {
19919 -       dma_addr_t buf_dma;
19920 +       int buflen = *current_buflen(state);
19921  
19922 -       buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
19923 -       dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
19924 +       if (!buflen)
19925 +               return 0;
19926  
19927 -       return buf_dma;
19928 -}
19929 +       state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
19930 +                                       DMA_TO_DEVICE);
19931 +       if (dma_mapping_error(jrdev, state->buf_dma)) {
19932 +               dev_err(jrdev, "unable to map buf\n");
19933 +               state->buf_dma = 0;
19934 +               return -ENOMEM;
19935 +       }
19936  
19937 -/*
19938 - * Only put buffer in link table if it contains data, which is possible,
19939 - * since a buffer has previously been used, and needs to be unmapped,
19940 - */
19941 -static inline dma_addr_t
19942 -try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
19943 -                      u8 *buf, dma_addr_t buf_dma, int buflen,
19944 -                      int last_buflen)
19945 -{
19946 -       if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
19947 -               dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
19948 -       if (buflen)
19949 -               buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
19950 -       else
19951 -               buf_dma = 0;
19952 +       dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
19953  
19954 -       return buf_dma;
19955 +       return 0;
19956  }
19957  
19958  /* Map state->caam_ctx, and add it to link table */
19959 @@ -224,89 +234,54 @@ static inline int ctx_map_to_sec4_sg(u32
19960         return 0;
19961  }
19962  
19963 -/* Common shared descriptor commands */
19964 -static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
19965 -{
19966 -       append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
19967 -                         ctx->split_key_len, CLASS_2 |
19968 -                         KEY_DEST_MDHA_SPLIT | KEY_ENC);
19969 -}
19970 -
19971 -/* Append key if it has been set */
19972 -static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
19973 -{
19974 -       u32 *key_jump_cmd;
19975 -
19976 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
19977 -
19978 -       if (ctx->split_key_len) {
19979 -               /* Skip if already shared */
19980 -               key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
19981 -                                          JUMP_COND_SHRD);
19982 -
19983 -               append_key_ahash(desc, ctx);
19984 -
19985 -               set_jump_tgt_here(desc, key_jump_cmd);
19986 -       }
19987 -
19988 -       /* Propagate errors from shared to job descriptor */
19989 -       append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
19990 -}
19991 -
19992  /*
19993 - * For ahash read data from seqin following state->caam_ctx,
19994 - * and write resulting class2 context to seqout, which may be state->caam_ctx
19995 - * or req->result
19996 + * For ahash update, final and finup (import_ctx = true)
19997 + *     import context, read and write to seqout
19998 + * For ahash firsts and digest (import_ctx = false)
19999 + *     read and write to seqout
20000   */
20001 -static inline void ahash_append_load_str(u32 *desc, int digestsize)
20002 +static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
20003 +                                    struct caam_hash_ctx *ctx, bool import_ctx)
20004  {
20005 -       /* Calculate remaining bytes to read */
20006 -       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
20007 +       u32 op = ctx->adata.algtype;
20008 +       u32 *skip_key_load;
20009  
20010 -       /* Read remaining bytes */
20011 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
20012 -                            FIFOLD_TYPE_MSG | KEY_VLF);
20013 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
20014  
20015 -       /* Store class2 context bytes */
20016 -       append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
20017 -                        LDST_SRCDST_BYTE_CONTEXT);
20018 -}
20019 +       /* Append key if it has been set; ahash update excluded */
20020 +       if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
20021 +               /* Skip key loading if already shared */
20022 +               skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
20023 +                                           JUMP_COND_SHRD);
20024  
20025 -/*
20026 - * For ahash update, final and finup, import context, read and write to seqout
20027 - */
20028 -static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
20029 -                                        int digestsize,
20030 -                                        struct caam_hash_ctx *ctx)
20031 -{
20032 -       init_sh_desc_key_ahash(desc, ctx);
20033 -
20034 -       /* Import context from software */
20035 -       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
20036 -                  LDST_CLASS_2_CCB | ctx->ctx_len);
20037 +               append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
20038 +                                 ctx->adata.keylen, CLASS_2 |
20039 +                                 KEY_DEST_MDHA_SPLIT | KEY_ENC);
20040  
20041 -       /* Class 2 operation */
20042 -       append_operation(desc, op | state | OP_ALG_ENCRYPT);
20043 +               set_jump_tgt_here(desc, skip_key_load);
20044  
20045 -       /*
20046 -        * Load from buf and/or src and write to req->result or state->context
20047 -        */
20048 -       ahash_append_load_str(desc, digestsize);
20049 -}
20050 +               op |= OP_ALG_AAI_HMAC_PRECOMP;
20051 +       }
20052  
20053 -/* For ahash firsts and digest, read and write to seqout */
20054 -static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
20055 -                                    int digestsize, struct caam_hash_ctx *ctx)
20056 -{
20057 -       init_sh_desc_key_ahash(desc, ctx);
20058 +       /* If needed, import context from software */
20059 +       if (import_ctx)
20060 +               append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
20061 +                               LDST_SRCDST_BYTE_CONTEXT);
20062  
20063         /* Class 2 operation */
20064         append_operation(desc, op | state | OP_ALG_ENCRYPT);
20065  
20066         /*
20067          * Load from buf and/or src and write to req->result or state->context
20068 +        * Calculate remaining bytes to read
20069          */
20070 -       ahash_append_load_str(desc, digestsize);
20071 +       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
20072 +       /* Read remaining bytes */
20073 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
20074 +                            FIFOLD_TYPE_MSG | KEY_VLF);
20075 +       /* Store class2 context bytes */
20076 +       append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
20077 +                        LDST_SRCDST_BYTE_CONTEXT);
20078  }
20079  
20080  static int ahash_set_sh_desc(struct crypto_ahash *ahash)
20081 @@ -314,34 +289,13 @@ static int ahash_set_sh_desc(struct cryp
20082         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20083         int digestsize = crypto_ahash_digestsize(ahash);
20084         struct device *jrdev = ctx->jrdev;
20085 -       u32 have_key = 0;
20086         u32 *desc;
20087  
20088 -       if (ctx->split_key_len)
20089 -               have_key = OP_ALG_AAI_HMAC_PRECOMP;
20090 -
20091         /* ahash_update shared descriptor */
20092         desc = ctx->sh_desc_update;
20093 -
20094 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
20095 -
20096 -       /* Import context from software */
20097 -       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
20098 -                  LDST_CLASS_2_CCB | ctx->ctx_len);
20099 -
20100 -       /* Class 2 operation */
20101 -       append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
20102 -                        OP_ALG_ENCRYPT);
20103 -
20104 -       /* Load data and write to result or context */
20105 -       ahash_append_load_str(desc, ctx->ctx_len);
20106 -
20107 -       ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
20108 -                                                DMA_TO_DEVICE);
20109 -       if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
20110 -               dev_err(jrdev, "unable to map shared descriptor\n");
20111 -               return -ENOMEM;
20112 -       }
20113 +       ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
20114 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
20115 +                                  desc_bytes(desc), DMA_TO_DEVICE);
20116  #ifdef DEBUG
20117         print_hex_dump(KERN_ERR,
20118                        "ahash update shdesc@"__stringify(__LINE__)": ",
20119 @@ -350,17 +304,9 @@ static int ahash_set_sh_desc(struct cryp
20120  
20121         /* ahash_update_first shared descriptor */
20122         desc = ctx->sh_desc_update_first;
20123 -
20124 -       ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
20125 -                         ctx->ctx_len, ctx);
20126 -
20127 -       ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
20128 -                                                      desc_bytes(desc),
20129 -                                                      DMA_TO_DEVICE);
20130 -       if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
20131 -               dev_err(jrdev, "unable to map shared descriptor\n");
20132 -               return -ENOMEM;
20133 -       }
20134 +       ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
20135 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
20136 +                                  desc_bytes(desc), DMA_TO_DEVICE);
20137  #ifdef DEBUG
20138         print_hex_dump(KERN_ERR,
20139                        "ahash update first shdesc@"__stringify(__LINE__)": ",
20140 @@ -369,53 +315,20 @@ static int ahash_set_sh_desc(struct cryp
20141  
20142         /* ahash_final shared descriptor */
20143         desc = ctx->sh_desc_fin;
20144 -
20145 -       ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
20146 -                             OP_ALG_AS_FINALIZE, digestsize, ctx);
20147 -
20148 -       ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
20149 -                                             DMA_TO_DEVICE);
20150 -       if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
20151 -               dev_err(jrdev, "unable to map shared descriptor\n");
20152 -               return -ENOMEM;
20153 -       }
20154 +       ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
20155 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
20156 +                                  desc_bytes(desc), DMA_TO_DEVICE);
20157  #ifdef DEBUG
20158         print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
20159                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
20160                        desc_bytes(desc), 1);
20161  #endif
20162  
20163 -       /* ahash_finup shared descriptor */
20164 -       desc = ctx->sh_desc_finup;
20165 -
20166 -       ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
20167 -                             OP_ALG_AS_FINALIZE, digestsize, ctx);
20168 -
20169 -       ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
20170 -                                               DMA_TO_DEVICE);
20171 -       if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
20172 -               dev_err(jrdev, "unable to map shared descriptor\n");
20173 -               return -ENOMEM;
20174 -       }
20175 -#ifdef DEBUG
20176 -       print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
20177 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
20178 -                      desc_bytes(desc), 1);
20179 -#endif
20180 -
20181         /* ahash_digest shared descriptor */
20182         desc = ctx->sh_desc_digest;
20183 -
20184 -       ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
20185 -                         digestsize, ctx);
20186 -
20187 -       ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
20188 -                                                desc_bytes(desc),
20189 -                                                DMA_TO_DEVICE);
20190 -       if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
20191 -               dev_err(jrdev, "unable to map shared descriptor\n");
20192 -               return -ENOMEM;
20193 -       }
20194 +       ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
20195 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
20196 +                                  desc_bytes(desc), DMA_TO_DEVICE);
20197  #ifdef DEBUG
20198         print_hex_dump(KERN_ERR,
20199                        "ahash digest shdesc@"__stringify(__LINE__)": ",
20200 @@ -426,14 +339,6 @@ static int ahash_set_sh_desc(struct cryp
20201         return 0;
20202  }
20203  
20204 -static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
20205 -                             u32 keylen)
20206 -{
20207 -       return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
20208 -                              ctx->split_key_pad_len, key_in, keylen,
20209 -                              ctx->alg_op);
20210 -}
20211 -
20212  /* Digest hash size if it is too large */
20213  static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
20214                            u32 *keylen, u8 *key_out, u32 digestsize)
20215 @@ -469,7 +374,7 @@ static int hash_digest_key(struct caam_h
20216         }
20217  
20218         /* Job descriptor to perform unkeyed hash on key_in */
20219 -       append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
20220 +       append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
20221                          OP_ALG_AS_INITFINAL);
20222         append_seq_in_ptr(desc, src_dma, *keylen, 0);
20223         append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
20224 @@ -513,10 +418,7 @@ static int hash_digest_key(struct caam_h
20225  static int ahash_setkey(struct crypto_ahash *ahash,
20226                         const u8 *key, unsigned int keylen)
20227  {
20228 -       /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
20229 -       static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
20230         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20231 -       struct device *jrdev = ctx->jrdev;
20232         int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
20233         int digestsize = crypto_ahash_digestsize(ahash);
20234         int ret;
20235 @@ -539,43 +441,19 @@ static int ahash_setkey(struct crypto_ah
20236                 key = hashed_key;
20237         }
20238  
20239 -       /* Pick class 2 key length from algorithm submask */
20240 -       ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
20241 -                                     OP_ALG_ALGSEL_SHIFT] * 2;
20242 -       ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
20243 -
20244 -#ifdef DEBUG
20245 -       printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
20246 -              ctx->split_key_len, ctx->split_key_pad_len);
20247 -       print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
20248 -                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
20249 -#endif
20250 -
20251 -       ret = gen_split_hash_key(ctx, key, keylen);
20252 +       ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
20253 +                           CAAM_MAX_HASH_KEY_SIZE);
20254         if (ret)
20255                 goto bad_free_key;
20256  
20257 -       ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
20258 -                                     DMA_TO_DEVICE);
20259 -       if (dma_mapping_error(jrdev, ctx->key_dma)) {
20260 -               dev_err(jrdev, "unable to map key i/o memory\n");
20261 -               ret = -ENOMEM;
20262 -               goto error_free_key;
20263 -       }
20264  #ifdef DEBUG
20265         print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
20266                        DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
20267 -                      ctx->split_key_pad_len, 1);
20268 +                      ctx->adata.keylen_pad, 1);
20269  #endif
20270  
20271 -       ret = ahash_set_sh_desc(ahash);
20272 -       if (ret) {
20273 -               dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
20274 -                                DMA_TO_DEVICE);
20275 -       }
20276 - error_free_key:
20277         kfree(hashed_key);
20278 -       return ret;
20279 +       return ahash_set_sh_desc(ahash);
20280   bad_free_key:
20281         kfree(hashed_key);
20282         crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
20283 @@ -604,6 +482,8 @@ static inline void ahash_unmap(struct de
20284                         struct ahash_edesc *edesc,
20285                         struct ahash_request *req, int dst_len)
20286  {
20287 +       struct caam_hash_state *state = ahash_request_ctx(req);
20288 +
20289         if (edesc->src_nents)
20290                 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
20291         if (edesc->dst_dma)
20292 @@ -612,6 +492,12 @@ static inline void ahash_unmap(struct de
20293         if (edesc->sec4_sg_bytes)
20294                 dma_unmap_single(dev, edesc->sec4_sg_dma,
20295                                  edesc->sec4_sg_bytes, DMA_TO_DEVICE);
20296 +
20297 +       if (state->buf_dma) {
20298 +               dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
20299 +                                DMA_TO_DEVICE);
20300 +               state->buf_dma = 0;
20301 +       }
20302  }
20303  
20304  static inline void ahash_unmap_ctx(struct device *dev,
20305 @@ -643,8 +529,7 @@ static void ahash_done(struct device *jr
20306         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
20307  #endif
20308  
20309 -       edesc = (struct ahash_edesc *)((char *)desc -
20310 -                offsetof(struct ahash_edesc, hw_desc));
20311 +       edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
20312         if (err)
20313                 caam_jr_strstatus(jrdev, err);
20314  
20315 @@ -671,19 +556,19 @@ static void ahash_done_bi(struct device
20316         struct ahash_edesc *edesc;
20317         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
20318         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20319 -#ifdef DEBUG
20320         struct caam_hash_state *state = ahash_request_ctx(req);
20321 +#ifdef DEBUG
20322         int digestsize = crypto_ahash_digestsize(ahash);
20323  
20324         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
20325  #endif
20326  
20327 -       edesc = (struct ahash_edesc *)((char *)desc -
20328 -                offsetof(struct ahash_edesc, hw_desc));
20329 +       edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
20330         if (err)
20331                 caam_jr_strstatus(jrdev, err);
20332  
20333         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
20334 +       switch_buf(state);
20335         kfree(edesc);
20336  
20337  #ifdef DEBUG
20338 @@ -713,8 +598,7 @@ static void ahash_done_ctx_src(struct de
20339         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
20340  #endif
20341  
20342 -       edesc = (struct ahash_edesc *)((char *)desc -
20343 -                offsetof(struct ahash_edesc, hw_desc));
20344 +       edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
20345         if (err)
20346                 caam_jr_strstatus(jrdev, err);
20347  
20348 @@ -741,19 +625,19 @@ static void ahash_done_ctx_dst(struct de
20349         struct ahash_edesc *edesc;
20350         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
20351         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20352 -#ifdef DEBUG
20353         struct caam_hash_state *state = ahash_request_ctx(req);
20354 +#ifdef DEBUG
20355         int digestsize = crypto_ahash_digestsize(ahash);
20356  
20357         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
20358  #endif
20359  
20360 -       edesc = (struct ahash_edesc *)((char *)desc -
20361 -                offsetof(struct ahash_edesc, hw_desc));
20362 +       edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
20363         if (err)
20364                 caam_jr_strstatus(jrdev, err);
20365  
20366         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
20367 +       switch_buf(state);
20368         kfree(edesc);
20369  
20370  #ifdef DEBUG
20371 @@ -835,13 +719,12 @@ static int ahash_update_ctx(struct ahash
20372         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20373         struct caam_hash_state *state = ahash_request_ctx(req);
20374         struct device *jrdev = ctx->jrdev;
20375 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20376 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20377 -       u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20378 -       int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
20379 -       u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
20380 -       int *next_buflen = state->current_buf ? &state->buflen_0 :
20381 -                          &state->buflen_1, last_buflen;
20382 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20383 +                      GFP_KERNEL : GFP_ATOMIC;
20384 +       u8 *buf = current_buf(state);
20385 +       int *buflen = current_buflen(state);
20386 +       u8 *next_buf = alt_buf(state);
20387 +       int *next_buflen = alt_buflen(state), last_buflen;
20388         int in_len = *buflen + req->nbytes, to_hash;
20389         u32 *desc;
20390         int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
20391 @@ -895,10 +778,9 @@ static int ahash_update_ctx(struct ahash
20392                 if (ret)
20393                         goto unmap_ctx;
20394  
20395 -               state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
20396 -                                                       edesc->sec4_sg + 1,
20397 -                                                       buf, state->buf_dma,
20398 -                                                       *buflen, last_buflen);
20399 +               ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
20400 +               if (ret)
20401 +                       goto unmap_ctx;
20402  
20403                 if (mapped_nents) {
20404                         sg_to_sec4_sg_last(req->src, mapped_nents,
20405 @@ -909,12 +791,10 @@ static int ahash_update_ctx(struct ahash
20406                                                          to_hash - *buflen,
20407                                                          *next_buflen, 0);
20408                 } else {
20409 -                       (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
20410 -                               cpu_to_caam32(SEC4_SG_LEN_FIN);
20411 +                       sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
20412 +                                           1);
20413                 }
20414  
20415 -               state->current_buf = !state->current_buf;
20416 -
20417                 desc = edesc->hw_desc;
20418  
20419                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
20420 @@ -969,12 +849,9 @@ static int ahash_final_ctx(struct ahash_
20421         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20422         struct caam_hash_state *state = ahash_request_ctx(req);
20423         struct device *jrdev = ctx->jrdev;
20424 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20425 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20426 -       u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20427 -       int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
20428 -       int last_buflen = state->current_buf ? state->buflen_0 :
20429 -                         state->buflen_1;
20430 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20431 +                      GFP_KERNEL : GFP_ATOMIC;
20432 +       int buflen = *current_buflen(state);
20433         u32 *desc;
20434         int sec4_sg_bytes, sec4_sg_src_index;
20435         int digestsize = crypto_ahash_digestsize(ahash);
20436 @@ -1001,11 +878,11 @@ static int ahash_final_ctx(struct ahash_
20437         if (ret)
20438                 goto unmap_ctx;
20439  
20440 -       state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
20441 -                                               buf, state->buf_dma, buflen,
20442 -                                               last_buflen);
20443 -       (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
20444 -               cpu_to_caam32(SEC4_SG_LEN_FIN);
20445 +       ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
20446 +       if (ret)
20447 +               goto unmap_ctx;
20448 +
20449 +       sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
20450  
20451         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
20452                                             sec4_sg_bytes, DMA_TO_DEVICE);
20453 @@ -1048,12 +925,9 @@ static int ahash_finup_ctx(struct ahash_
20454         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20455         struct caam_hash_state *state = ahash_request_ctx(req);
20456         struct device *jrdev = ctx->jrdev;
20457 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20458 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20459 -       u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20460 -       int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
20461 -       int last_buflen = state->current_buf ? state->buflen_0 :
20462 -                         state->buflen_1;
20463 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20464 +                      GFP_KERNEL : GFP_ATOMIC;
20465 +       int buflen = *current_buflen(state);
20466         u32 *desc;
20467         int sec4_sg_src_index;
20468         int src_nents, mapped_nents;
20469 @@ -1082,7 +956,7 @@ static int ahash_finup_ctx(struct ahash_
20470  
20471         /* allocate space for base edesc and hw desc commands, link tables */
20472         edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
20473 -                                 ctx->sh_desc_finup, ctx->sh_desc_finup_dma,
20474 +                                 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
20475                                   flags);
20476         if (!edesc) {
20477                 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
20478 @@ -1098,9 +972,9 @@ static int ahash_finup_ctx(struct ahash_
20479         if (ret)
20480                 goto unmap_ctx;
20481  
20482 -       state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
20483 -                                               buf, state->buf_dma, buflen,
20484 -                                               last_buflen);
20485 +       ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
20486 +       if (ret)
20487 +               goto unmap_ctx;
20488  
20489         ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
20490                                   sec4_sg_src_index, ctx->ctx_len + buflen,
20491 @@ -1136,15 +1010,18 @@ static int ahash_digest(struct ahash_req
20492  {
20493         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
20494         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20495 +       struct caam_hash_state *state = ahash_request_ctx(req);
20496         struct device *jrdev = ctx->jrdev;
20497 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20498 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20499 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20500 +                      GFP_KERNEL : GFP_ATOMIC;
20501         u32 *desc;
20502         int digestsize = crypto_ahash_digestsize(ahash);
20503         int src_nents, mapped_nents;
20504         struct ahash_edesc *edesc;
20505         int ret;
20506  
20507 +       state->buf_dma = 0;
20508 +
20509         src_nents = sg_nents_for_len(req->src, req->nbytes);
20510         if (src_nents < 0) {
20511                 dev_err(jrdev, "Invalid number of src SG.\n");
20512 @@ -1215,10 +1092,10 @@ static int ahash_final_no_ctx(struct aha
20513         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20514         struct caam_hash_state *state = ahash_request_ctx(req);
20515         struct device *jrdev = ctx->jrdev;
20516 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20517 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20518 -       u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20519 -       int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
20520 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20521 +                      GFP_KERNEL : GFP_ATOMIC;
20522 +       u8 *buf = current_buf(state);
20523 +       int buflen = *current_buflen(state);
20524         u32 *desc;
20525         int digestsize = crypto_ahash_digestsize(ahash);
20526         struct ahash_edesc *edesc;
20527 @@ -1276,13 +1153,12 @@ static int ahash_update_no_ctx(struct ah
20528         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20529         struct caam_hash_state *state = ahash_request_ctx(req);
20530         struct device *jrdev = ctx->jrdev;
20531 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20532 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20533 -       u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20534 -       int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
20535 -       u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
20536 -       int *next_buflen = state->current_buf ? &state->buflen_0 :
20537 -                          &state->buflen_1;
20538 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20539 +                      GFP_KERNEL : GFP_ATOMIC;
20540 +       u8 *buf = current_buf(state);
20541 +       int *buflen = current_buflen(state);
20542 +       u8 *next_buf = alt_buf(state);
20543 +       int *next_buflen = alt_buflen(state);
20544         int in_len = *buflen + req->nbytes, to_hash;
20545         int sec4_sg_bytes, src_nents, mapped_nents;
20546         struct ahash_edesc *edesc;
20547 @@ -1331,8 +1207,10 @@ static int ahash_update_no_ctx(struct ah
20548                 edesc->sec4_sg_bytes = sec4_sg_bytes;
20549                 edesc->dst_dma = 0;
20550  
20551 -               state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
20552 -                                                   buf, *buflen);
20553 +               ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
20554 +               if (ret)
20555 +                       goto unmap_ctx;
20556 +
20557                 sg_to_sec4_sg_last(req->src, mapped_nents,
20558                                    edesc->sec4_sg + 1, 0);
20559  
20560 @@ -1342,8 +1220,6 @@ static int ahash_update_no_ctx(struct ah
20561                                                  *next_buflen, 0);
20562                 }
20563  
20564 -               state->current_buf = !state->current_buf;
20565 -
20566                 desc = edesc->hw_desc;
20567  
20568                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
20569 @@ -1403,12 +1279,9 @@ static int ahash_finup_no_ctx(struct aha
20570         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20571         struct caam_hash_state *state = ahash_request_ctx(req);
20572         struct device *jrdev = ctx->jrdev;
20573 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20574 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20575 -       u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20576 -       int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
20577 -       int last_buflen = state->current_buf ? state->buflen_0 :
20578 -                         state->buflen_1;
20579 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20580 +                      GFP_KERNEL : GFP_ATOMIC;
20581 +       int buflen = *current_buflen(state);
20582         u32 *desc;
20583         int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
20584         int digestsize = crypto_ahash_digestsize(ahash);
20585 @@ -1450,9 +1323,9 @@ static int ahash_finup_no_ctx(struct aha
20586         edesc->src_nents = src_nents;
20587         edesc->sec4_sg_bytes = sec4_sg_bytes;
20588  
20589 -       state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
20590 -                                               state->buf_dma, buflen,
20591 -                                               last_buflen);
20592 +       ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
20593 +       if (ret)
20594 +               goto unmap;
20595  
20596         ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
20597                                   req->nbytes);
20598 @@ -1496,11 +1369,10 @@ static int ahash_update_first(struct aha
20599         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20600         struct caam_hash_state *state = ahash_request_ctx(req);
20601         struct device *jrdev = ctx->jrdev;
20602 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20603 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20604 -       u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
20605 -       int *next_buflen = state->current_buf ?
20606 -               &state->buflen_1 : &state->buflen_0;
20607 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20608 +                      GFP_KERNEL : GFP_ATOMIC;
20609 +       u8 *next_buf = alt_buf(state);
20610 +       int *next_buflen = alt_buflen(state);
20611         int to_hash;
20612         u32 *desc;
20613         int src_nents, mapped_nents;
20614 @@ -1582,6 +1454,7 @@ static int ahash_update_first(struct aha
20615                 state->final = ahash_final_no_ctx;
20616                 scatterwalk_map_and_copy(next_buf, req->src, 0,
20617                                          req->nbytes, 0);
20618 +               switch_buf(state);
20619         }
20620  #ifdef DEBUG
20621         print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
20622 @@ -1688,7 +1561,6 @@ struct caam_hash_template {
20623         unsigned int blocksize;
20624         struct ahash_alg template_ahash;
20625         u32 alg_type;
20626 -       u32 alg_op;
20627  };
20628  
20629  /* ahash descriptors */
20630 @@ -1714,7 +1586,6 @@ static struct caam_hash_template driver_
20631                         },
20632                 },
20633                 .alg_type = OP_ALG_ALGSEL_SHA1,
20634 -               .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
20635         }, {
20636                 .name = "sha224",
20637                 .driver_name = "sha224-caam",
20638 @@ -1736,7 +1607,6 @@ static struct caam_hash_template driver_
20639                         },
20640                 },
20641                 .alg_type = OP_ALG_ALGSEL_SHA224,
20642 -               .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
20643         }, {
20644                 .name = "sha256",
20645                 .driver_name = "sha256-caam",
20646 @@ -1758,7 +1628,6 @@ static struct caam_hash_template driver_
20647                         },
20648                 },
20649                 .alg_type = OP_ALG_ALGSEL_SHA256,
20650 -               .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
20651         }, {
20652                 .name = "sha384",
20653                 .driver_name = "sha384-caam",
20654 @@ -1780,7 +1649,6 @@ static struct caam_hash_template driver_
20655                         },
20656                 },
20657                 .alg_type = OP_ALG_ALGSEL_SHA384,
20658 -               .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
20659         }, {
20660                 .name = "sha512",
20661                 .driver_name = "sha512-caam",
20662 @@ -1802,7 +1670,6 @@ static struct caam_hash_template driver_
20663                         },
20664                 },
20665                 .alg_type = OP_ALG_ALGSEL_SHA512,
20666 -               .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
20667         }, {
20668                 .name = "md5",
20669                 .driver_name = "md5-caam",
20670 @@ -1824,14 +1691,12 @@ static struct caam_hash_template driver_
20671                         },
20672                 },
20673                 .alg_type = OP_ALG_ALGSEL_MD5,
20674 -               .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
20675         },
20676  };
20677  
20678  struct caam_hash_alg {
20679         struct list_head entry;
20680         int alg_type;
20681 -       int alg_op;
20682         struct ahash_alg ahash_alg;
20683  };
20684  
20685 @@ -1853,6 +1718,7 @@ static int caam_hash_cra_init(struct cry
20686                                          HASH_MSG_LEN + SHA256_DIGEST_SIZE,
20687                                          HASH_MSG_LEN + 64,
20688                                          HASH_MSG_LEN + SHA512_DIGEST_SIZE };
20689 +       dma_addr_t dma_addr;
20690  
20691         /*
20692          * Get a Job ring from Job Ring driver to ensure in-order
20693 @@ -1863,11 +1729,31 @@ static int caam_hash_cra_init(struct cry
20694                 pr_err("Job Ring Device allocation for transform failed\n");
20695                 return PTR_ERR(ctx->jrdev);
20696         }
20697 +
20698 +       dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
20699 +                                       offsetof(struct caam_hash_ctx,
20700 +                                                sh_desc_update_dma),
20701 +                                       DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
20702 +       if (dma_mapping_error(ctx->jrdev, dma_addr)) {
20703 +               dev_err(ctx->jrdev, "unable to map shared descriptors\n");
20704 +               caam_jr_free(ctx->jrdev);
20705 +               return -ENOMEM;
20706 +       }
20707 +
20708 +       ctx->sh_desc_update_dma = dma_addr;
20709 +       ctx->sh_desc_update_first_dma = dma_addr +
20710 +                                       offsetof(struct caam_hash_ctx,
20711 +                                                sh_desc_update_first);
20712 +       ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
20713 +                                                  sh_desc_fin);
20714 +       ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
20715 +                                                     sh_desc_digest);
20716 +
20717         /* copy descriptor header template value */
20718 -       ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
20719 -       ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
20720 +       ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
20721  
20722 -       ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
20723 +       ctx->ctx_len = runninglen[(ctx->adata.algtype &
20724 +                                  OP_ALG_ALGSEL_SUBMASK) >>
20725                                   OP_ALG_ALGSEL_SHIFT];
20726  
20727         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
20728 @@ -1879,30 +1765,10 @@ static void caam_hash_cra_exit(struct cr
20729  {
20730         struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
20731  
20732 -       if (ctx->sh_desc_update_dma &&
20733 -           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
20734 -               dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
20735 -                                desc_bytes(ctx->sh_desc_update),
20736 -                                DMA_TO_DEVICE);
20737 -       if (ctx->sh_desc_update_first_dma &&
20738 -           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
20739 -               dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
20740 -                                desc_bytes(ctx->sh_desc_update_first),
20741 -                                DMA_TO_DEVICE);
20742 -       if (ctx->sh_desc_fin_dma &&
20743 -           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
20744 -               dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
20745 -                                desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
20746 -       if (ctx->sh_desc_digest_dma &&
20747 -           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
20748 -               dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
20749 -                                desc_bytes(ctx->sh_desc_digest),
20750 -                                DMA_TO_DEVICE);
20751 -       if (ctx->sh_desc_finup_dma &&
20752 -           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
20753 -               dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
20754 -                                desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
20755 -
20756 +       dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
20757 +                              offsetof(struct caam_hash_ctx,
20758 +                                       sh_desc_update_dma),
20759 +                              DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
20760         caam_jr_free(ctx->jrdev);
20761  }
20762  
20763 @@ -1961,7 +1827,6 @@ caam_hash_alloc(struct caam_hash_templat
20764         alg->cra_type = &crypto_ahash_type;
20765  
20766         t_alg->alg_type = template->alg_type;
20767 -       t_alg->alg_op = template->alg_op;
20768  
20769         return t_alg;
20770  }
20771 --- a/drivers/crypto/caam/caampkc.c
20772 +++ b/drivers/crypto/caam/caampkc.c
20773 @@ -18,6 +18,10 @@
20774  #define DESC_RSA_PUB_LEN       (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
20775  #define DESC_RSA_PRIV_F1_LEN   (2 * CAAM_CMD_SZ + \
20776                                  sizeof(struct rsa_priv_f1_pdb))
20777 +#define DESC_RSA_PRIV_F2_LEN   (2 * CAAM_CMD_SZ + \
20778 +                                sizeof(struct rsa_priv_f2_pdb))
20779 +#define DESC_RSA_PRIV_F3_LEN   (2 * CAAM_CMD_SZ + \
20780 +                                sizeof(struct rsa_priv_f3_pdb))
20781  
20782  static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
20783                          struct akcipher_request *req)
20784 @@ -54,6 +58,42 @@ static void rsa_priv_f1_unmap(struct dev
20785         dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
20786  }
20787  
20788 +static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
20789 +                             struct akcipher_request *req)
20790 +{
20791 +       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
20792 +       struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
20793 +       struct caam_rsa_key *key = &ctx->key;
20794 +       struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
20795 +       size_t p_sz = key->p_sz;
20796 +       size_t q_sz = key->p_sz;
20797 +
20798 +       dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
20799 +       dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
20800 +       dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
20801 +       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
20802 +       dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
20803 +}
20804 +
20805 +static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
20806 +                             struct akcipher_request *req)
20807 +{
20808 +       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
20809 +       struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
20810 +       struct caam_rsa_key *key = &ctx->key;
20811 +       struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
20812 +       size_t p_sz = key->p_sz;
20813 +       size_t q_sz = key->p_sz;
20814 +
20815 +       dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
20816 +       dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
20817 +       dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
20818 +       dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
20819 +       dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
20820 +       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
20821 +       dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
20822 +}
20823 +
20824  /* RSA Job Completion handler */
20825  static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
20826  {
20827 @@ -90,6 +130,42 @@ static void rsa_priv_f1_done(struct devi
20828         akcipher_request_complete(req, err);
20829  }
20830  
20831 +static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
20832 +                            void *context)
20833 +{
20834 +       struct akcipher_request *req = context;
20835 +       struct rsa_edesc *edesc;
20836 +
20837 +       if (err)
20838 +               caam_jr_strstatus(dev, err);
20839 +
20840 +       edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
20841 +
20842 +       rsa_priv_f2_unmap(dev, edesc, req);
20843 +       rsa_io_unmap(dev, edesc, req);
20844 +       kfree(edesc);
20845 +
20846 +       akcipher_request_complete(req, err);
20847 +}
20848 +
20849 +static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
20850 +                            void *context)
20851 +{
20852 +       struct akcipher_request *req = context;
20853 +       struct rsa_edesc *edesc;
20854 +
20855 +       if (err)
20856 +               caam_jr_strstatus(dev, err);
20857 +
20858 +       edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
20859 +
20860 +       rsa_priv_f3_unmap(dev, edesc, req);
20861 +       rsa_io_unmap(dev, edesc, req);
20862 +       kfree(edesc);
20863 +
20864 +       akcipher_request_complete(req, err);
20865 +}
20866 +
20867  static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
20868                                          size_t desclen)
20869  {
20870 @@ -97,8 +173,8 @@ static struct rsa_edesc *rsa_edesc_alloc
20871         struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
20872         struct device *dev = ctx->dev;
20873         struct rsa_edesc *edesc;
20874 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20875 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20876 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20877 +                      GFP_KERNEL : GFP_ATOMIC;
20878         int sgc;
20879         int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
20880         int src_nents, dst_nents;
20881 @@ -258,6 +334,172 @@ static int set_rsa_priv_f1_pdb(struct ak
20882         return 0;
20883  }
20884  
20885 +static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
20886 +                              struct rsa_edesc *edesc)
20887 +{
20888 +       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
20889 +       struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
20890 +       struct caam_rsa_key *key = &ctx->key;
20891 +       struct device *dev = ctx->dev;
20892 +       struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
20893 +       int sec4_sg_index = 0;
20894 +       size_t p_sz = key->p_sz;
20895 +       size_t q_sz = key->p_sz;
20896 +
20897 +       pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
20898 +       if (dma_mapping_error(dev, pdb->d_dma)) {
20899 +               dev_err(dev, "Unable to map RSA private exponent memory\n");
20900 +               return -ENOMEM;
20901 +       }
20902 +
20903 +       pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
20904 +       if (dma_mapping_error(dev, pdb->p_dma)) {
20905 +               dev_err(dev, "Unable to map RSA prime factor p memory\n");
20906 +               goto unmap_d;
20907 +       }
20908 +
20909 +       pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
20910 +       if (dma_mapping_error(dev, pdb->q_dma)) {
20911 +               dev_err(dev, "Unable to map RSA prime factor q memory\n");
20912 +               goto unmap_p;
20913 +       }
20914 +
20915 +       pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
20916 +       if (dma_mapping_error(dev, pdb->tmp1_dma)) {
20917 +               dev_err(dev, "Unable to map RSA tmp1 memory\n");
20918 +               goto unmap_q;
20919 +       }
20920 +
20921 +       pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
20922 +       if (dma_mapping_error(dev, pdb->tmp2_dma)) {
20923 +               dev_err(dev, "Unable to map RSA tmp2 memory\n");
20924 +               goto unmap_tmp1;
20925 +       }
20926 +
20927 +       if (edesc->src_nents > 1) {
20928 +               pdb->sgf |= RSA_PRIV_PDB_SGF_G;
20929 +               pdb->g_dma = edesc->sec4_sg_dma;
20930 +               sec4_sg_index += edesc->src_nents;
20931 +       } else {
20932 +               pdb->g_dma = sg_dma_address(req->src);
20933 +       }
20934 +
20935 +       if (edesc->dst_nents > 1) {
20936 +               pdb->sgf |= RSA_PRIV_PDB_SGF_F;
20937 +               pdb->f_dma = edesc->sec4_sg_dma +
20938 +                            sec4_sg_index * sizeof(struct sec4_sg_entry);
20939 +       } else {
20940 +               pdb->f_dma = sg_dma_address(req->dst);
20941 +       }
20942 +
20943 +       pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
20944 +       pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
20945 +
20946 +       return 0;
20947 +
20948 +unmap_tmp1:
20949 +       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
20950 +unmap_q:
20951 +       dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
20952 +unmap_p:
20953 +       dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
20954 +unmap_d:
20955 +       dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
20956 +
20957 +       return -ENOMEM;
20958 +}
20959 +
20960 +static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
20961 +                              struct rsa_edesc *edesc)
20962 +{
20963 +       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
20964 +       struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
20965 +       struct caam_rsa_key *key = &ctx->key;
20966 +       struct device *dev = ctx->dev;
20967 +       struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
20968 +       int sec4_sg_index = 0;
20969 +       size_t p_sz = key->p_sz;
20970 +       size_t q_sz = key->p_sz;
20971 +
20972 +       pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
20973 +       if (dma_mapping_error(dev, pdb->p_dma)) {
20974 +               dev_err(dev, "Unable to map RSA prime factor p memory\n");
20975 +               return -ENOMEM;
20976 +       }
20977 +
20978 +       pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
20979 +       if (dma_mapping_error(dev, pdb->q_dma)) {
20980 +               dev_err(dev, "Unable to map RSA prime factor q memory\n");
20981 +               goto unmap_p;
20982 +       }
20983 +
20984 +       pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
20985 +       if (dma_mapping_error(dev, pdb->dp_dma)) {
20986 +               dev_err(dev, "Unable to map RSA exponent dp memory\n");
20987 +               goto unmap_q;
20988 +       }
20989 +
20990 +       pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
20991 +       if (dma_mapping_error(dev, pdb->dq_dma)) {
20992 +               dev_err(dev, "Unable to map RSA exponent dq memory\n");
20993 +               goto unmap_dp;
20994 +       }
20995 +
20996 +       pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
20997 +       if (dma_mapping_error(dev, pdb->c_dma)) {
20998 +               dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
20999 +               goto unmap_dq;
21000 +       }
21001 +
21002 +       pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
21003 +       if (dma_mapping_error(dev, pdb->tmp1_dma)) {
21004 +               dev_err(dev, "Unable to map RSA tmp1 memory\n");
21005 +               goto unmap_qinv;
21006 +       }
21007 +
21008 +       pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
21009 +       if (dma_mapping_error(dev, pdb->tmp2_dma)) {
21010 +               dev_err(dev, "Unable to map RSA tmp2 memory\n");
21011 +               goto unmap_tmp1;
21012 +       }
21013 +
21014 +       if (edesc->src_nents > 1) {
21015 +               pdb->sgf |= RSA_PRIV_PDB_SGF_G;
21016 +               pdb->g_dma = edesc->sec4_sg_dma;
21017 +               sec4_sg_index += edesc->src_nents;
21018 +       } else {
21019 +               pdb->g_dma = sg_dma_address(req->src);
21020 +       }
21021 +
21022 +       if (edesc->dst_nents > 1) {
21023 +               pdb->sgf |= RSA_PRIV_PDB_SGF_F;
21024 +               pdb->f_dma = edesc->sec4_sg_dma +
21025 +                            sec4_sg_index * sizeof(struct sec4_sg_entry);
21026 +       } else {
21027 +               pdb->f_dma = sg_dma_address(req->dst);
21028 +       }
21029 +
21030 +       pdb->sgf |= key->n_sz;
21031 +       pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
21032 +
21033 +       return 0;
21034 +
21035 +unmap_tmp1:
21036 +       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
21037 +unmap_qinv:
21038 +       dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
21039 +unmap_dq:
21040 +       dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
21041 +unmap_dp:
21042 +       dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
21043 +unmap_q:
21044 +       dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
21045 +unmap_p:
21046 +       dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
21047 +
21048 +       return -ENOMEM;
21049 +}
21050 +
21051  static int caam_rsa_enc(struct akcipher_request *req)
21052  {
21053         struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21054 @@ -301,24 +543,14 @@ init_fail:
21055         return ret;
21056  }
21057  
21058 -static int caam_rsa_dec(struct akcipher_request *req)
21059 +static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
21060  {
21061         struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21062         struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21063 -       struct caam_rsa_key *key = &ctx->key;
21064         struct device *jrdev = ctx->dev;
21065         struct rsa_edesc *edesc;
21066         int ret;
21067  
21068 -       if (unlikely(!key->n || !key->d))
21069 -               return -EINVAL;
21070 -
21071 -       if (req->dst_len < key->n_sz) {
21072 -               req->dst_len = key->n_sz;
21073 -               dev_err(jrdev, "Output buffer length less than parameter n\n");
21074 -               return -EOVERFLOW;
21075 -       }
21076 -
21077         /* Allocate extended descriptor */
21078         edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
21079         if (IS_ERR(edesc))
21080 @@ -344,17 +576,147 @@ init_fail:
21081         return ret;
21082  }
21083  
21084 +static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
21085 +{
21086 +       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21087 +       struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21088 +       struct device *jrdev = ctx->dev;
21089 +       struct rsa_edesc *edesc;
21090 +       int ret;
21091 +
21092 +       /* Allocate extended descriptor */
21093 +       edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
21094 +       if (IS_ERR(edesc))
21095 +               return PTR_ERR(edesc);
21096 +
21097 +       /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
21098 +       ret = set_rsa_priv_f2_pdb(req, edesc);
21099 +       if (ret)
21100 +               goto init_fail;
21101 +
21102 +       /* Initialize Job Descriptor */
21103 +       init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
21104 +
21105 +       ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
21106 +       if (!ret)
21107 +               return -EINPROGRESS;
21108 +
21109 +       rsa_priv_f2_unmap(jrdev, edesc, req);
21110 +
21111 +init_fail:
21112 +       rsa_io_unmap(jrdev, edesc, req);
21113 +       kfree(edesc);
21114 +       return ret;
21115 +}
21116 +
21117 +static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
21118 +{
21119 +       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21120 +       struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21121 +       struct device *jrdev = ctx->dev;
21122 +       struct rsa_edesc *edesc;
21123 +       int ret;
21124 +
21125 +       /* Allocate extended descriptor */
21126 +       edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
21127 +       if (IS_ERR(edesc))
21128 +               return PTR_ERR(edesc);
21129 +
21130 +       /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
21131 +       ret = set_rsa_priv_f3_pdb(req, edesc);
21132 +       if (ret)
21133 +               goto init_fail;
21134 +
21135 +       /* Initialize Job Descriptor */
21136 +       init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
21137 +
21138 +       ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
21139 +       if (!ret)
21140 +               return -EINPROGRESS;
21141 +
21142 +       rsa_priv_f3_unmap(jrdev, edesc, req);
21143 +
21144 +init_fail:
21145 +       rsa_io_unmap(jrdev, edesc, req);
21146 +       kfree(edesc);
21147 +       return ret;
21148 +}
21149 +
21150 +static int caam_rsa_dec(struct akcipher_request *req)
21151 +{
21152 +       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21153 +       struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21154 +       struct caam_rsa_key *key = &ctx->key;
21155 +       int ret;
21156 +
21157 +       if (unlikely(!key->n || !key->d))
21158 +               return -EINVAL;
21159 +
21160 +       if (req->dst_len < key->n_sz) {
21161 +               req->dst_len = key->n_sz;
21162 +               dev_err(ctx->dev, "Output buffer length less than parameter n\n");
21163 +               return -EOVERFLOW;
21164 +       }
21165 +
21166 +       if (key->priv_form == FORM3)
21167 +               ret = caam_rsa_dec_priv_f3(req);
21168 +       else if (key->priv_form == FORM2)
21169 +               ret = caam_rsa_dec_priv_f2(req);
21170 +       else
21171 +               ret = caam_rsa_dec_priv_f1(req);
21172 +
21173 +       return ret;
21174 +}
21175 +
21176  static void caam_rsa_free_key(struct caam_rsa_key *key)
21177  {
21178         kzfree(key->d);
21179 +       kzfree(key->p);
21180 +       kzfree(key->q);
21181 +       kzfree(key->dp);
21182 +       kzfree(key->dq);
21183 +       kzfree(key->qinv);
21184 +       kzfree(key->tmp1);
21185 +       kzfree(key->tmp2);
21186         kfree(key->e);
21187         kfree(key->n);
21188 -       key->d = NULL;
21189 -       key->e = NULL;
21190 -       key->n = NULL;
21191 -       key->d_sz = 0;
21192 -       key->e_sz = 0;
21193 -       key->n_sz = 0;
21194 +       memset(key, 0, sizeof(*key));
21195 +}
21196 +
21197 +static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
21198 +{
21199 +       while (!**ptr && *nbytes) {
21200 +               (*ptr)++;
21201 +               (*nbytes)--;
21202 +       }
21203 +}
21204 +
21205 +/**
21206 + * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
21207 + * dP, dQ and qInv could decode to less than corresponding p, q length, as the
21208 + * BER-encoding requires that the minimum number of bytes be used to encode the
21209 + * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
21210 + * length.
21211 + *
21212 + * @ptr   : pointer to {dP, dQ, qInv} CRT member
21213 + * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
21214 + * @dstlen: length in bytes of corresponding p or q prime factor
21215 + */
21216 +static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
21217 +{
21218 +       u8 *dst;
21219 +
21220 +       caam_rsa_drop_leading_zeros(&ptr, &nbytes);
21221 +       if (!nbytes)
21222 +               return NULL;
21223 +
21224 +       dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
21225 +       if (!dst)
21226 +               return NULL;
21227 +
21228 +       memcpy(dst + (dstlen - nbytes), ptr, nbytes);
21229 +
21230 +       return dst;
21231  }
21232  
21233  /**
21234 @@ -370,10 +732,9 @@ static inline u8 *caam_read_raw_data(con
21235  {
21236         u8 *val;
21237  
21238 -       while (!*buf && *nbytes) {
21239 -               buf++;
21240 -               (*nbytes)--;
21241 -       }
21242 +       caam_rsa_drop_leading_zeros(&buf, nbytes);
21243 +       if (!*nbytes)
21244 +               return NULL;
21245  
21246         val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL);
21247         if (!val)
21248 @@ -395,7 +756,7 @@ static int caam_rsa_set_pub_key(struct c
21249                                 unsigned int keylen)
21250  {
21251         struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21252 -       struct rsa_key raw_key = {0};
21253 +       struct rsa_key raw_key = {NULL};
21254         struct caam_rsa_key *rsa_key = &ctx->key;
21255         int ret;
21256  
21257 @@ -437,11 +798,69 @@ err:
21258         return -ENOMEM;
21259  }
21260  
21261 +static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
21262 +                                      struct rsa_key *raw_key)
21263 +{
21264 +       struct caam_rsa_key *rsa_key = &ctx->key;
21265 +       size_t p_sz = raw_key->p_sz;
21266 +       size_t q_sz = raw_key->q_sz;
21267 +
21268 +       rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
21269 +       if (!rsa_key->p)
21270 +               return;
21271 +       rsa_key->p_sz = p_sz;
21272 +
21273 +       rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
21274 +       if (!rsa_key->q)
21275 +               goto free_p;
21276 +       rsa_key->q_sz = q_sz;
21277 +
21278 +       rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
21279 +       if (!rsa_key->tmp1)
21280 +               goto free_q;
21281 +
21282 +       rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
21283 +       if (!rsa_key->tmp2)
21284 +               goto free_tmp1;
21285 +
21286 +       rsa_key->priv_form = FORM2;
21287 +
21288 +       rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
21289 +       if (!rsa_key->dp)
21290 +               goto free_tmp2;
21291 +
21292 +       rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
21293 +       if (!rsa_key->dq)
21294 +               goto free_dp;
21295 +
21296 +       rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
21297 +                                         q_sz);
21298 +       if (!rsa_key->qinv)
21299 +               goto free_dq;
21300 +
21301 +       rsa_key->priv_form = FORM3;
21302 +
21303 +       return;
21304 +
21305 +free_dq:
21306 +       kzfree(rsa_key->dq);
21307 +free_dp:
21308 +       kzfree(rsa_key->dp);
21309 +free_tmp2:
21310 +       kzfree(rsa_key->tmp2);
21311 +free_tmp1:
21312 +       kzfree(rsa_key->tmp1);
21313 +free_q:
21314 +       kzfree(rsa_key->q);
21315 +free_p:
21316 +       kzfree(rsa_key->p);
21317 +}
21318 +
21319  static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
21320                                  unsigned int keylen)
21321  {
21322         struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21323 -       struct rsa_key raw_key = {0};
21324 +       struct rsa_key raw_key = {NULL};
21325         struct caam_rsa_key *rsa_key = &ctx->key;
21326         int ret;
21327  
21328 @@ -483,6 +902,8 @@ static int caam_rsa_set_priv_key(struct
21329         memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
21330         memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
21331  
21332 +       caam_rsa_set_priv_key_form(ctx, &raw_key);
21333 +
21334         return 0;
21335  
21336  err:
21337 --- a/drivers/crypto/caam/caampkc.h
21338 +++ b/drivers/crypto/caam/caampkc.h
21339 @@ -13,21 +13,75 @@
21340  #include "pdb.h"
21341  
21342  /**
21343 + * caam_priv_key_form - CAAM RSA private key representation
21344 + * CAAM RSA private key may have either of three forms.
21345 + *
21346 + * 1. The first representation consists of the pair (n, d), where the
21347 + *    components have the following meanings:
21348 + *        n      the RSA modulus
21349 + *        d      the RSA private exponent
21350 + *
21351 + * 2. The second representation consists of the triplet (p, q, d), where the
21352 + *    components have the following meanings:
21353 + *        p      the first prime factor of the RSA modulus n
21354 + *        q      the second prime factor of the RSA modulus n
21355 + *        d      the RSA private exponent
21356 + *
21357 + * 3. The third representation consists of the quintuple (p, q, dP, dQ, qInv),
21358 + *    where the components have the following meanings:
21359 + *        p      the first prime factor of the RSA modulus n
21360 + *        q      the second prime factor of the RSA modulus n
21361 + *        dP     the first factors's CRT exponent
21362 + *        dQ     the second factors's CRT exponent
21363 + *        qInv   the (first) CRT coefficient
21364 + *
21365 + * The benefit of using the third or the second key form is lower computational
21366 + * cost for the decryption and signature operations.
21367 + */
21368 +enum caam_priv_key_form {
21369 +       FORM1,
21370 +       FORM2,
21371 +       FORM3
21372 +};
21373 +
21374 +/**
21375   * caam_rsa_key - CAAM RSA key structure. Keys are allocated in DMA zone.
21376   * @n           : RSA modulus raw byte stream
21377   * @e           : RSA public exponent raw byte stream
21378   * @d           : RSA private exponent raw byte stream
21379 + * @p           : RSA prime factor p of RSA modulus n
21380 + * @q           : RSA prime factor q of RSA modulus n
21381 + * @dp          : RSA CRT exponent of p
21382 + * @dp          : RSA CRT exponent of q
21383 + * @qinv        : RSA CRT coefficient
21384 + * @tmp1        : CAAM uses this temporary buffer as internal state buffer.
21385 + *                It is assumed to be as long as p.
21386 + * @tmp2        : CAAM uses this temporary buffer as internal state buffer.
21387 + *                It is assumed to be as long as q.
21388   * @n_sz        : length in bytes of RSA modulus n
21389   * @e_sz        : length in bytes of RSA public exponent
21390   * @d_sz        : length in bytes of RSA private exponent
21391 + * @p_sz        : length in bytes of RSA prime factor p of RSA modulus n
21392 + * @q_sz        : length in bytes of RSA prime factor q of RSA modulus n
21393 + * @priv_form   : CAAM RSA private key representation
21394   */
21395  struct caam_rsa_key {
21396         u8 *n;
21397         u8 *e;
21398         u8 *d;
21399 +       u8 *p;
21400 +       u8 *q;
21401 +       u8 *dp;
21402 +       u8 *dq;
21403 +       u8 *qinv;
21404 +       u8 *tmp1;
21405 +       u8 *tmp2;
21406         size_t n_sz;
21407         size_t e_sz;
21408         size_t d_sz;
21409 +       size_t p_sz;
21410 +       size_t q_sz;
21411 +       enum caam_priv_key_form priv_form;
21412  };
21413  
21414  /**
21415 @@ -59,6 +113,8 @@ struct rsa_edesc {
21416         union {
21417                 struct rsa_pub_pdb pub;
21418                 struct rsa_priv_f1_pdb priv_f1;
21419 +               struct rsa_priv_f2_pdb priv_f2;
21420 +               struct rsa_priv_f3_pdb priv_f3;
21421         } pdb;
21422         u32 hw_desc[];
21423  };
21424 @@ -66,5 +122,7 @@ struct rsa_edesc {
21425  /* Descriptor construction primitives. */
21426  void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb);
21427  void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb);
21428 +void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb);
21429 +void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb);
21430  
21431  #endif
21432 --- a/drivers/crypto/caam/caamrng.c
21433 +++ b/drivers/crypto/caam/caamrng.c
21434 @@ -52,7 +52,7 @@
21435  
21436  /* length of descriptors */
21437  #define DESC_JOB_O_LEN                 (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
21438 -#define DESC_RNG_LEN                   (4 * CAAM_CMD_SZ)
21439 +#define DESC_RNG_LEN                   (3 * CAAM_CMD_SZ)
21440  
21441  /* Buffer, its dma address and lock */
21442  struct buf_data {
21443 @@ -100,8 +100,7 @@ static void rng_done(struct device *jrde
21444  {
21445         struct buf_data *bd;
21446  
21447 -       bd = (struct buf_data *)((char *)desc -
21448 -             offsetof(struct buf_data, hw_desc));
21449 +       bd = container_of(desc, struct buf_data, hw_desc[0]);
21450  
21451         if (err)
21452                 caam_jr_strstatus(jrdev, err);
21453 @@ -196,9 +195,6 @@ static inline int rng_create_sh_desc(str
21454  
21455         init_sh_desc(desc, HDR_SHARE_SERIAL);
21456  
21457 -       /* Propagate errors from shared to job descriptor */
21458 -       append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
21459 -
21460         /* Generate random bytes */
21461         append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
21462  
21463 @@ -289,11 +285,7 @@ static int caam_init_rng(struct caam_rng
21464         if (err)
21465                 return err;
21466  
21467 -       err = caam_init_buf(ctx, 1);
21468 -       if (err)
21469 -               return err;
21470 -
21471 -       return 0;
21472 +       return caam_init_buf(ctx, 1);
21473  }
21474  
21475  static struct hwrng caam_rng = {
21476 @@ -351,7 +343,7 @@ static int __init caam_rng_init(void)
21477                 pr_err("Job Ring Device allocation for transform failed\n");
21478                 return PTR_ERR(dev);
21479         }
21480 -       rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA);
21481 +       rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
21482         if (!rng_ctx) {
21483                 err = -ENOMEM;
21484                 goto free_caam_alloc;
21485 --- a/drivers/crypto/caam/compat.h
21486 +++ b/drivers/crypto/caam/compat.h
21487 @@ -16,6 +16,7 @@
21488  #include <linux/of_platform.h>
21489  #include <linux/dma-mapping.h>
21490  #include <linux/io.h>
21491 +#include <linux/iommu.h>
21492  #include <linux/spinlock.h>
21493  #include <linux/rtnetlink.h>
21494  #include <linux/in.h>
21495 --- a/drivers/crypto/caam/ctrl.c
21496 +++ b/drivers/crypto/caam/ctrl.c
21497 @@ -2,40 +2,41 @@
21498   * Controller-level driver, kernel property detection, initialization
21499   *
21500   * Copyright 2008-2012 Freescale Semiconductor, Inc.
21501 + * Copyright 2017 NXP
21502   */
21503  
21504  #include <linux/device.h>
21505  #include <linux/of_address.h>
21506  #include <linux/of_irq.h>
21507 +#include <linux/sys_soc.h>
21508  
21509  #include "compat.h"
21510  #include "regs.h"
21511  #include "intern.h"
21512  #include "jr.h"
21513  #include "desc_constr.h"
21514 -#include "error.h"
21515  #include "ctrl.h"
21516  
21517  bool caam_little_end;
21518  EXPORT_SYMBOL(caam_little_end);
21519 +bool caam_imx;
21520 +EXPORT_SYMBOL(caam_imx);
21521 +bool caam_dpaa2;
21522 +EXPORT_SYMBOL(caam_dpaa2);
21523 +
21524 +#ifdef CONFIG_CAAM_QI
21525 +#include "qi.h"
21526 +#endif
21527  
21528  /*
21529   * i.MX targets tend to have clock control subsystems that can
21530   * enable/disable clocking to our device.
21531   */
21532 -#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
21533 -static inline struct clk *caam_drv_identify_clk(struct device *dev,
21534 -                                               char *clk_name)
21535 -{
21536 -       return devm_clk_get(dev, clk_name);
21537 -}
21538 -#else
21539  static inline struct clk *caam_drv_identify_clk(struct device *dev,
21540                                                 char *clk_name)
21541  {
21542 -       return NULL;
21543 +       return caam_imx ? devm_clk_get(dev, clk_name) : NULL;
21544  }
21545 -#endif
21546  
21547  /*
21548   * Descriptor to instantiate RNG State Handle 0 in normal mode and
21549 @@ -270,7 +271,7 @@ static int deinstantiate_rng(struct devi
21550                 /*
21551                  * If the corresponding bit is set, then it means the state
21552                  * handle was initialized by us, and thus it needs to be
21553 -                * deintialized as well
21554 +                * deinitialized as well
21555                  */
21556                 if ((1 << sh_idx) & state_handle_mask) {
21557                         /*
21558 @@ -303,20 +304,24 @@ static int caam_remove(struct platform_d
21559         struct device *ctrldev;
21560         struct caam_drv_private *ctrlpriv;
21561         struct caam_ctrl __iomem *ctrl;
21562 -       int ring;
21563  
21564         ctrldev = &pdev->dev;
21565         ctrlpriv = dev_get_drvdata(ctrldev);
21566         ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
21567  
21568 -       /* Remove platform devices for JobRs */
21569 -       for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
21570 -               if (ctrlpriv->jrpdev[ring])
21571 -                       of_device_unregister(ctrlpriv->jrpdev[ring]);
21572 -       }
21573 +       /* Remove platform devices under the crypto node */
21574 +       of_platform_depopulate(ctrldev);
21575 +
21576 +#ifdef CONFIG_CAAM_QI
21577 +       if (ctrlpriv->qidev)
21578 +               caam_qi_shutdown(ctrlpriv->qidev);
21579 +#endif
21580  
21581 -       /* De-initialize RNG state handles initialized by this driver. */
21582 -       if (ctrlpriv->rng4_sh_init)
21583 +       /*
21584 +        * De-initialize RNG state handles initialized by this driver.
21585 +        * In case of DPAA 2.x, RNG is managed by MC firmware.
21586 +        */
21587 +       if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
21588                 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
21589  
21590         /* Shut down debug views */
21591 @@ -331,8 +336,8 @@ static int caam_remove(struct platform_d
21592         clk_disable_unprepare(ctrlpriv->caam_ipg);
21593         clk_disable_unprepare(ctrlpriv->caam_mem);
21594         clk_disable_unprepare(ctrlpriv->caam_aclk);
21595 -       clk_disable_unprepare(ctrlpriv->caam_emi_slow);
21596 -
21597 +       if (ctrlpriv->caam_emi_slow)
21598 +               clk_disable_unprepare(ctrlpriv->caam_emi_slow);
21599         return 0;
21600  }
21601  
21602 @@ -366,11 +371,8 @@ static void kick_trng(struct platform_de
21603          */
21604         val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
21605               >> RTSDCTL_ENT_DLY_SHIFT;
21606 -       if (ent_delay <= val) {
21607 -               /* put RNG4 into run mode */
21608 -               clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, 0);
21609 -               return;
21610 -       }
21611 +       if (ent_delay <= val)
21612 +               goto start_rng;
21613  
21614         val = rd_reg32(&r4tst->rtsdctl);
21615         val = (val & ~RTSDCTL_ENT_DLY_MASK) |
21616 @@ -382,15 +384,12 @@ static void kick_trng(struct platform_de
21617         wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
21618         /* read the control register */
21619         val = rd_reg32(&r4tst->rtmctl);
21620 +start_rng:
21621         /*
21622          * select raw sampling in both entropy shifter
21623 -        * and statistical checker
21624 +        * and statistical checker; ; put RNG4 into run mode
21625          */
21626 -       clrsetbits_32(&val, 0, RTMCTL_SAMP_MODE_RAW_ES_SC);
21627 -       /* put RNG4 into run mode */
21628 -       clrsetbits_32(&val, RTMCTL_PRGM, 0);
21629 -       /* write back the control register */
21630 -       wr_reg32(&r4tst->rtmctl, val);
21631 +       clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
21632  }
21633  
21634  /**
21635 @@ -411,28 +410,26 @@ int caam_get_era(void)
21636  }
21637  EXPORT_SYMBOL(caam_get_era);
21638  
21639 -#ifdef CONFIG_DEBUG_FS
21640 -static int caam_debugfs_u64_get(void *data, u64 *val)
21641 -{
21642 -       *val = caam64_to_cpu(*(u64 *)data);
21643 -       return 0;
21644 -}
21645 -
21646 -static int caam_debugfs_u32_get(void *data, u64 *val)
21647 -{
21648 -       *val = caam32_to_cpu(*(u32 *)data);
21649 -       return 0;
21650 -}
21651 -
21652 -DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
21653 -DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
21654 -#endif
21655 +static const struct of_device_id caam_match[] = {
21656 +       {
21657 +               .compatible = "fsl,sec-v4.0",
21658 +       },
21659 +       {
21660 +               .compatible = "fsl,sec4.0",
21661 +       },
21662 +       {},
21663 +};
21664 +MODULE_DEVICE_TABLE(of, caam_match);
21665  
21666  /* Probe routine for CAAM top (controller) level */
21667  static int caam_probe(struct platform_device *pdev)
21668  {
21669 -       int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
21670 +       int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
21671         u64 caam_id;
21672 +       static const struct soc_device_attribute imx_soc[] = {
21673 +               {.family = "Freescale i.MX"},
21674 +               {},
21675 +       };
21676         struct device *dev;
21677         struct device_node *nprop, *np;
21678         struct caam_ctrl __iomem *ctrl;
21679 @@ -452,9 +449,10 @@ static int caam_probe(struct platform_de
21680  
21681         dev = &pdev->dev;
21682         dev_set_drvdata(dev, ctrlpriv);
21683 -       ctrlpriv->pdev = pdev;
21684         nprop = pdev->dev.of_node;
21685  
21686 +       caam_imx = (bool)soc_device_match(imx_soc);
21687 +
21688         /* Enable clocking */
21689         clk = caam_drv_identify_clk(&pdev->dev, "ipg");
21690         if (IS_ERR(clk)) {
21691 @@ -483,14 +481,16 @@ static int caam_probe(struct platform_de
21692         }
21693         ctrlpriv->caam_aclk = clk;
21694  
21695 -       clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
21696 -       if (IS_ERR(clk)) {
21697 -               ret = PTR_ERR(clk);
21698 -               dev_err(&pdev->dev,
21699 -                       "can't identify CAAM emi_slow clk: %d\n", ret);
21700 -               return ret;
21701 +       if (!of_machine_is_compatible("fsl,imx6ul")) {
21702 +               clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
21703 +               if (IS_ERR(clk)) {
21704 +                       ret = PTR_ERR(clk);
21705 +                       dev_err(&pdev->dev,
21706 +                               "can't identify CAAM emi_slow clk: %d\n", ret);
21707 +                       return ret;
21708 +               }
21709 +               ctrlpriv->caam_emi_slow = clk;
21710         }
21711 -       ctrlpriv->caam_emi_slow = clk;
21712  
21713         ret = clk_prepare_enable(ctrlpriv->caam_ipg);
21714         if (ret < 0) {
21715 @@ -511,11 +511,13 @@ static int caam_probe(struct platform_de
21716                 goto disable_caam_mem;
21717         }
21718  
21719 -       ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
21720 -       if (ret < 0) {
21721 -               dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
21722 -                       ret);
21723 -               goto disable_caam_aclk;
21724 +       if (ctrlpriv->caam_emi_slow) {
21725 +               ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
21726 +               if (ret < 0) {
21727 +                       dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
21728 +                               ret);
21729 +                       goto disable_caam_aclk;
21730 +               }
21731         }
21732  
21733         /* Get configuration properties from device tree */
21734 @@ -542,13 +544,13 @@ static int caam_probe(struct platform_de
21735         else
21736                 BLOCK_OFFSET = PG_SIZE_64K;
21737  
21738 -       ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
21739 -       ctrlpriv->assure = (struct caam_assurance __force *)
21740 -                          ((uint8_t *)ctrl +
21741 +       ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
21742 +       ctrlpriv->assure = (struct caam_assurance __iomem __force *)
21743 +                          ((__force uint8_t *)ctrl +
21744                             BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
21745                            );
21746 -       ctrlpriv->deco = (struct caam_deco __force *)
21747 -                        ((uint8_t *)ctrl +
21748 +       ctrlpriv->deco = (struct caam_deco __iomem __force *)
21749 +                        ((__force uint8_t *)ctrl +
21750                          BLOCK_OFFSET * DECO_BLOCK_NUMBER
21751                          );
21752  
21753 @@ -557,12 +559,17 @@ static int caam_probe(struct platform_de
21754  
21755         /*
21756          * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
21757 -        * long pointers in master configuration register
21758 +        * long pointers in master configuration register.
21759 +        * In case of DPAA 2.x, Management Complex firmware performs
21760 +        * the configuration.
21761          */
21762 -       clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
21763 -                     MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
21764 -                     MCFGR_WDENABLE | MCFGR_LARGE_BURST |
21765 -                     (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
21766 +       caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
21767 +       if (!caam_dpaa2)
21768 +               clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
21769 +                             MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
21770 +                             MCFGR_WDENABLE | MCFGR_LARGE_BURST |
21771 +                             (sizeof(dma_addr_t) == sizeof(u64) ?
21772 +                              MCFGR_LONG_PTR : 0));
21773  
21774         /*
21775          *  Read the Compile Time paramters and SCFGR to determine
21776 @@ -590,64 +597,67 @@ static int caam_probe(struct platform_de
21777                               JRSTART_JR1_START | JRSTART_JR2_START |
21778                               JRSTART_JR3_START);
21779  
21780 -       if (sizeof(dma_addr_t) == sizeof(u64))
21781 -               if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
21782 -                       dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
21783 +       if (sizeof(dma_addr_t) == sizeof(u64)) {
21784 +               if (caam_dpaa2)
21785 +                       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
21786 +               else if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
21787 +                       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
21788                 else
21789 -                       dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
21790 -       else
21791 -               dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
21792 -
21793 -       /*
21794 -        * Detect and enable JobRs
21795 -        * First, find out how many ring spec'ed, allocate references
21796 -        * for all, then go probe each one.
21797 -        */
21798 -       rspec = 0;
21799 -       for_each_available_child_of_node(nprop, np)
21800 -               if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
21801 -                   of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
21802 -                       rspec++;
21803 +                       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
21804 +       } else {
21805 +               ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
21806 +       }
21807 +       if (ret) {
21808 +               dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
21809 +               goto iounmap_ctrl;
21810 +       }
21811  
21812 -       ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
21813 -                                       sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
21814 -       if (ctrlpriv->jrpdev == NULL) {
21815 -               ret = -ENOMEM;
21816 +       ret = of_platform_populate(nprop, caam_match, NULL, dev);
21817 +       if (ret) {
21818 +               dev_err(dev, "JR platform devices creation error\n");
21819                 goto iounmap_ctrl;
21820         }
21821  
21822 +#ifdef CONFIG_DEBUG_FS
21823 +       /*
21824 +        * FIXME: needs better naming distinction, as some amalgamation of
21825 +        * "caam" and nprop->full_name. The OF name isn't distinctive,
21826 +        * but does separate instances
21827 +        */
21828 +       perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
21829 +
21830 +       ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
21831 +       ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
21832 +#endif
21833         ring = 0;
21834 -       ctrlpriv->total_jobrs = 0;
21835         for_each_available_child_of_node(nprop, np)
21836                 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
21837                     of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
21838 -                       ctrlpriv->jrpdev[ring] =
21839 -                               of_platform_device_create(np, NULL, dev);
21840 -                       if (!ctrlpriv->jrpdev[ring]) {
21841 -                               pr_warn("JR%d Platform device creation error\n",
21842 -                                       ring);
21843 -                               continue;
21844 -                       }
21845 -                       ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
21846 -                                            ((uint8_t *)ctrl +
21847 +                       ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
21848 +                                            ((__force uint8_t *)ctrl +
21849                                              (ring + JR_BLOCK_NUMBER) *
21850                                               BLOCK_OFFSET
21851                                              );
21852                         ctrlpriv->total_jobrs++;
21853                         ring++;
21854 -       }
21855 +               }
21856  
21857 -       /* Check to see if QI present. If so, enable */
21858 -       ctrlpriv->qi_present =
21859 -                       !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
21860 -                          CTPR_MS_QI_MASK);
21861 -       if (ctrlpriv->qi_present) {
21862 -               ctrlpriv->qi = (struct caam_queue_if __force *)
21863 -                              ((uint8_t *)ctrl +
21864 +       /* Check to see if (DPAA 1.x) QI present. If so, enable */
21865 +       ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
21866 +       if (ctrlpriv->qi_present && !caam_dpaa2) {
21867 +               ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
21868 +                              ((__force uint8_t *)ctrl +
21869                                  BLOCK_OFFSET * QI_BLOCK_NUMBER
21870                                );
21871                 /* This is all that's required to physically enable QI */
21872                 wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
21873 +
21874 +               /* If QMAN driver is present, init CAAM-QI backend */
21875 +#ifdef CONFIG_CAAM_QI
21876 +               ret = caam_qi_init(pdev);
21877 +               if (ret)
21878 +                       dev_err(dev, "caam qi i/f init failed: %d\n", ret);
21879 +#endif
21880         }
21881  
21882         /* If no QI and no rings specified, quit and go home */
21883 @@ -662,8 +672,10 @@ static int caam_probe(struct platform_de
21884         /*
21885          * If SEC has RNG version >= 4 and RNG state handle has not been
21886          * already instantiated, do RNG instantiation
21887 +        * In case of DPAA 2.x, RNG is managed by MC firmware.
21888          */
21889 -       if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
21890 +       if (!caam_dpaa2 &&
21891 +           (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
21892                 ctrlpriv->rng4_sh_init =
21893                         rd_reg32(&ctrl->r4tst[0].rdsta);
21894                 /*
21895 @@ -731,77 +743,46 @@ static int caam_probe(struct platform_de
21896         /* Report "alive" for developer to see */
21897         dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
21898                  caam_get_era());
21899 -       dev_info(dev, "job rings = %d, qi = %d\n",
21900 -                ctrlpriv->total_jobrs, ctrlpriv->qi_present);
21901 +       dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
21902 +                ctrlpriv->total_jobrs, ctrlpriv->qi_present,
21903 +                caam_dpaa2 ? "yes" : "no");
21904  
21905  #ifdef CONFIG_DEBUG_FS
21906 -       /*
21907 -        * FIXME: needs better naming distinction, as some amalgamation of
21908 -        * "caam" and nprop->full_name. The OF name isn't distinctive,
21909 -        * but does separate instances
21910 -        */
21911 -       perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
21912 -
21913 -       ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
21914 -       ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
21915 -
21916 -       /* Controller-level - performance monitor counters */
21917 -
21918 -       ctrlpriv->ctl_rq_dequeued =
21919 -               debugfs_create_file("rq_dequeued",
21920 -                                   S_IRUSR | S_IRGRP | S_IROTH,
21921 -                                   ctrlpriv->ctl, &perfmon->req_dequeued,
21922 -                                   &caam_fops_u64_ro);
21923 -       ctrlpriv->ctl_ob_enc_req =
21924 -               debugfs_create_file("ob_rq_encrypted",
21925 -                                   S_IRUSR | S_IRGRP | S_IROTH,
21926 -                                   ctrlpriv->ctl, &perfmon->ob_enc_req,
21927 -                                   &caam_fops_u64_ro);
21928 -       ctrlpriv->ctl_ib_dec_req =
21929 -               debugfs_create_file("ib_rq_decrypted",
21930 -                                   S_IRUSR | S_IRGRP | S_IROTH,
21931 -                                   ctrlpriv->ctl, &perfmon->ib_dec_req,
21932 -                                   &caam_fops_u64_ro);
21933 -       ctrlpriv->ctl_ob_enc_bytes =
21934 -               debugfs_create_file("ob_bytes_encrypted",
21935 -                                   S_IRUSR | S_IRGRP | S_IROTH,
21936 -                                   ctrlpriv->ctl, &perfmon->ob_enc_bytes,
21937 -                                   &caam_fops_u64_ro);
21938 -       ctrlpriv->ctl_ob_prot_bytes =
21939 -               debugfs_create_file("ob_bytes_protected",
21940 -                                   S_IRUSR | S_IRGRP | S_IROTH,
21941 -                                   ctrlpriv->ctl, &perfmon->ob_prot_bytes,
21942 -                                   &caam_fops_u64_ro);
21943 -       ctrlpriv->ctl_ib_dec_bytes =
21944 -               debugfs_create_file("ib_bytes_decrypted",
21945 -                                   S_IRUSR | S_IRGRP | S_IROTH,
21946 -                                   ctrlpriv->ctl, &perfmon->ib_dec_bytes,
21947 -                                   &caam_fops_u64_ro);
21948 -       ctrlpriv->ctl_ib_valid_bytes =
21949 -               debugfs_create_file("ib_bytes_validated",
21950 -                                   S_IRUSR | S_IRGRP | S_IROTH,
21951 -                                   ctrlpriv->ctl, &perfmon->ib_valid_bytes,
21952 -                                   &caam_fops_u64_ro);
21953 +       debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
21954 +                           ctrlpriv->ctl, &perfmon->req_dequeued,
21955 +                           &caam_fops_u64_ro);
21956 +       debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
21957 +                           ctrlpriv->ctl, &perfmon->ob_enc_req,
21958 +                           &caam_fops_u64_ro);
21959 +       debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
21960 +                           ctrlpriv->ctl, &perfmon->ib_dec_req,
21961 +                           &caam_fops_u64_ro);
21962 +       debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
21963 +                           ctrlpriv->ctl, &perfmon->ob_enc_bytes,
21964 +                           &caam_fops_u64_ro);
21965 +       debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH,
21966 +                           ctrlpriv->ctl, &perfmon->ob_prot_bytes,
21967 +                           &caam_fops_u64_ro);
21968 +       debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
21969 +                           ctrlpriv->ctl, &perfmon->ib_dec_bytes,
21970 +                           &caam_fops_u64_ro);
21971 +       debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH,
21972 +                           ctrlpriv->ctl, &perfmon->ib_valid_bytes,
21973 +                           &caam_fops_u64_ro);
21974  
21975         /* Controller level - global status values */
21976 -       ctrlpriv->ctl_faultaddr =
21977 -               debugfs_create_file("fault_addr",
21978 -                                   S_IRUSR | S_IRGRP | S_IROTH,
21979 -                                   ctrlpriv->ctl, &perfmon->faultaddr,
21980 -                                   &caam_fops_u32_ro);
21981 -       ctrlpriv->ctl_faultdetail =
21982 -               debugfs_create_file("fault_detail",
21983 -                                   S_IRUSR | S_IRGRP | S_IROTH,
21984 -                                   ctrlpriv->ctl, &perfmon->faultdetail,
21985 -                                   &caam_fops_u32_ro);
21986 -       ctrlpriv->ctl_faultstatus =
21987 -               debugfs_create_file("fault_status",
21988 -                                   S_IRUSR | S_IRGRP | S_IROTH,
21989 -                                   ctrlpriv->ctl, &perfmon->status,
21990 -                                   &caam_fops_u32_ro);
21991 +       debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH,
21992 +                           ctrlpriv->ctl, &perfmon->faultaddr,
21993 +                           &caam_fops_u32_ro);
21994 +       debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH,
21995 +                           ctrlpriv->ctl, &perfmon->faultdetail,
21996 +                           &caam_fops_u32_ro);
21997 +       debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH,
21998 +                           ctrlpriv->ctl, &perfmon->status,
21999 +                           &caam_fops_u32_ro);
22000  
22001         /* Internal covering keys (useful in non-secure mode only) */
22002 -       ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
22003 +       ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
22004         ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
22005         ctrlpriv->ctl_kek = debugfs_create_blob("kek",
22006                                                 S_IRUSR |
22007 @@ -809,7 +790,7 @@ static int caam_probe(struct platform_de
22008                                                 ctrlpriv->ctl,
22009                                                 &ctrlpriv->ctl_kek_wrap);
22010  
22011 -       ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
22012 +       ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
22013         ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
22014         ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
22015                                                  S_IRUSR |
22016 @@ -817,7 +798,7 @@ static int caam_probe(struct platform_de
22017                                                  ctrlpriv->ctl,
22018                                                  &ctrlpriv->ctl_tkek_wrap);
22019  
22020 -       ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
22021 +       ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
22022         ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
22023         ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
22024                                                  S_IRUSR |
22025 @@ -828,13 +809,17 @@ static int caam_probe(struct platform_de
22026         return 0;
22027  
22028  caam_remove:
22029 +#ifdef CONFIG_DEBUG_FS
22030 +       debugfs_remove_recursive(ctrlpriv->dfs_root);
22031 +#endif
22032         caam_remove(pdev);
22033         return ret;
22034  
22035  iounmap_ctrl:
22036         iounmap(ctrl);
22037  disable_caam_emi_slow:
22038 -       clk_disable_unprepare(ctrlpriv->caam_emi_slow);
22039 +       if (ctrlpriv->caam_emi_slow)
22040 +               clk_disable_unprepare(ctrlpriv->caam_emi_slow);
22041  disable_caam_aclk:
22042         clk_disable_unprepare(ctrlpriv->caam_aclk);
22043  disable_caam_mem:
22044 @@ -844,17 +829,6 @@ disable_caam_ipg:
22045         return ret;
22046  }
22047  
22048 -static struct of_device_id caam_match[] = {
22049 -       {
22050 -               .compatible = "fsl,sec-v4.0",
22051 -       },
22052 -       {
22053 -               .compatible = "fsl,sec4.0",
22054 -       },
22055 -       {},
22056 -};
22057 -MODULE_DEVICE_TABLE(of, caam_match);
22058 -
22059  static struct platform_driver caam_driver = {
22060         .driver = {
22061                 .name = "caam",
22062 --- a/drivers/crypto/caam/ctrl.h
22063 +++ b/drivers/crypto/caam/ctrl.h
22064 @@ -10,4 +10,6 @@
22065  /* Prototypes for backend-level services exposed to APIs */
22066  int caam_get_era(void);
22067  
22068 +extern bool caam_dpaa2;
22069 +
22070  #endif /* CTRL_H */
22071 --- a/drivers/crypto/caam/desc.h
22072 +++ b/drivers/crypto/caam/desc.h
22073 @@ -22,12 +22,6 @@
22074  #define SEC4_SG_LEN_MASK       0x3fffffff      /* Excludes EXT and FINAL */
22075  #define SEC4_SG_OFFSET_MASK    0x00001fff
22076  
22077 -struct sec4_sg_entry {
22078 -       u64 ptr;
22079 -       u32 len;
22080 -       u32 bpid_offset;
22081 -};
22082 -
22083  /* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
22084  #define MAX_CAAM_DESCSIZE      64
22085  
22086 @@ -47,6 +41,7 @@ struct sec4_sg_entry {
22087  #define CMD_SEQ_LOAD           (0x03 << CMD_SHIFT)
22088  #define CMD_FIFO_LOAD          (0x04 << CMD_SHIFT)
22089  #define CMD_SEQ_FIFO_LOAD      (0x05 << CMD_SHIFT)
22090 +#define CMD_MOVEB              (0x07 << CMD_SHIFT)
22091  #define CMD_STORE              (0x0a << CMD_SHIFT)
22092  #define CMD_SEQ_STORE          (0x0b << CMD_SHIFT)
22093  #define CMD_FIFO_STORE         (0x0c << CMD_SHIFT)
22094 @@ -90,8 +85,8 @@ struct sec4_sg_entry {
22095  #define HDR_ZRO                        0x00008000
22096  
22097  /* Start Index or SharedDesc Length */
22098 -#define HDR_START_IDX_MASK     0x3f
22099  #define HDR_START_IDX_SHIFT    16
22100 +#define HDR_START_IDX_MASK     (0x3f << HDR_START_IDX_SHIFT)
22101  
22102  /* If shared descriptor header, 6-bit length */
22103  #define HDR_DESCLEN_SHR_MASK   0x3f
22104 @@ -121,10 +116,10 @@ struct sec4_sg_entry {
22105  #define HDR_PROP_DNR           0x00000800
22106  
22107  /* JobDesc/SharedDesc share property */
22108 -#define HDR_SD_SHARE_MASK      0x03
22109  #define HDR_SD_SHARE_SHIFT     8
22110 -#define HDR_JD_SHARE_MASK      0x07
22111 +#define HDR_SD_SHARE_MASK      (0x03 << HDR_SD_SHARE_SHIFT)
22112  #define HDR_JD_SHARE_SHIFT     8
22113 +#define HDR_JD_SHARE_MASK      (0x07 << HDR_JD_SHARE_SHIFT)
22114  
22115  #define HDR_SHARE_NEVER                (0x00 << HDR_SD_SHARE_SHIFT)
22116  #define HDR_SHARE_WAIT         (0x01 << HDR_SD_SHARE_SHIFT)
22117 @@ -235,7 +230,7 @@ struct sec4_sg_entry {
22118  #define LDST_SRCDST_WORD_DECO_MATH2    (0x0a << LDST_SRCDST_SHIFT)
22119  #define LDST_SRCDST_WORD_DECO_AAD_SZ   (0x0b << LDST_SRCDST_SHIFT)
22120  #define LDST_SRCDST_WORD_DECO_MATH3    (0x0b << LDST_SRCDST_SHIFT)
22121 -#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
22122 +#define LDST_SRCDST_WORD_CLASS1_IV_SZ  (0x0c << LDST_SRCDST_SHIFT)
22123  #define LDST_SRCDST_WORD_ALTDS_CLASS1  (0x0f << LDST_SRCDST_SHIFT)
22124  #define LDST_SRCDST_WORD_PKHA_A_SZ     (0x10 << LDST_SRCDST_SHIFT)
22125  #define LDST_SRCDST_WORD_PKHA_B_SZ     (0x11 << LDST_SRCDST_SHIFT)
22126 @@ -360,6 +355,7 @@ struct sec4_sg_entry {
22127  #define FIFOLD_TYPE_PK_N       (0x08 << FIFOLD_TYPE_SHIFT)
22128  #define FIFOLD_TYPE_PK_A       (0x0c << FIFOLD_TYPE_SHIFT)
22129  #define FIFOLD_TYPE_PK_B       (0x0d << FIFOLD_TYPE_SHIFT)
22130 +#define FIFOLD_TYPE_IFIFO      (0x0f << FIFOLD_TYPE_SHIFT)
22131  
22132  /* Other types. Need to OR in last/flush bits as desired */
22133  #define FIFOLD_TYPE_MSG_MASK   (0x38 << FIFOLD_TYPE_SHIFT)
22134 @@ -400,7 +396,7 @@ struct sec4_sg_entry {
22135  #define FIFOST_TYPE_PKHA_N      (0x08 << FIFOST_TYPE_SHIFT)
22136  #define FIFOST_TYPE_PKHA_A      (0x0c << FIFOST_TYPE_SHIFT)
22137  #define FIFOST_TYPE_PKHA_B      (0x0d << FIFOST_TYPE_SHIFT)
22138 -#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
22139 +#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
22140  #define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
22141  #define FIFOST_TYPE_PKHA_E_JKEK         (0x22 << FIFOST_TYPE_SHIFT)
22142  #define FIFOST_TYPE_PKHA_E_TKEK         (0x23 << FIFOST_TYPE_SHIFT)
22143 @@ -413,6 +409,7 @@ struct sec4_sg_entry {
22144  #define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
22145  #define FIFOST_TYPE_RNGSTORE    (0x34 << FIFOST_TYPE_SHIFT)
22146  #define FIFOST_TYPE_RNGFIFO     (0x35 << FIFOST_TYPE_SHIFT)
22147 +#define FIFOST_TYPE_METADATA    (0x3e << FIFOST_TYPE_SHIFT)
22148  #define FIFOST_TYPE_SKIP        (0x3f << FIFOST_TYPE_SHIFT)
22149  
22150  /*
22151 @@ -1107,8 +1104,8 @@ struct sec4_sg_entry {
22152  /* For non-protocol/alg-only op commands */
22153  #define OP_ALG_TYPE_SHIFT      24
22154  #define OP_ALG_TYPE_MASK       (0x7 << OP_ALG_TYPE_SHIFT)
22155 -#define OP_ALG_TYPE_CLASS1     2
22156 -#define OP_ALG_TYPE_CLASS2     4
22157 +#define OP_ALG_TYPE_CLASS1     (2 << OP_ALG_TYPE_SHIFT)
22158 +#define OP_ALG_TYPE_CLASS2     (4 << OP_ALG_TYPE_SHIFT)
22159  
22160  #define OP_ALG_ALGSEL_SHIFT    16
22161  #define OP_ALG_ALGSEL_MASK     (0xff << OP_ALG_ALGSEL_SHIFT)
22162 @@ -1249,7 +1246,7 @@ struct sec4_sg_entry {
22163  #define OP_ALG_PKMODE_MOD_PRIMALITY    0x00f
22164  
22165  /* PKHA mode copy-memory functions */
22166 -#define OP_ALG_PKMODE_SRC_REG_SHIFT    13
22167 +#define OP_ALG_PKMODE_SRC_REG_SHIFT    17
22168  #define OP_ALG_PKMODE_SRC_REG_MASK     (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
22169  #define OP_ALG_PKMODE_DST_REG_SHIFT    10
22170  #define OP_ALG_PKMODE_DST_REG_MASK     (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
22171 @@ -1445,10 +1442,11 @@ struct sec4_sg_entry {
22172  #define MATH_SRC1_REG2         (0x02 << MATH_SRC1_SHIFT)
22173  #define MATH_SRC1_REG3         (0x03 << MATH_SRC1_SHIFT)
22174  #define MATH_SRC1_IMM          (0x04 << MATH_SRC1_SHIFT)
22175 -#define MATH_SRC1_DPOVRD       (0x07 << MATH_SRC0_SHIFT)
22176 +#define MATH_SRC1_DPOVRD       (0x07 << MATH_SRC1_SHIFT)
22177  #define MATH_SRC1_INFIFO       (0x0a << MATH_SRC1_SHIFT)
22178  #define MATH_SRC1_OUTFIFO      (0x0b << MATH_SRC1_SHIFT)
22179  #define MATH_SRC1_ONE          (0x0c << MATH_SRC1_SHIFT)
22180 +#define MATH_SRC1_ZERO         (0x0f << MATH_SRC1_SHIFT)
22181  
22182  /* Destination selectors */
22183  #define MATH_DEST_SHIFT                8
22184 @@ -1629,4 +1627,31 @@ struct sec4_sg_entry {
22185  /* Frame Descriptor Command for Replacement Job Descriptor */
22186  #define FD_CMD_REPLACE_JOB_DESC                                0x20000000
22187  
22188 +/* CHA Control Register bits */
22189 +#define CCTRL_RESET_CHA_ALL          0x1
22190 +#define CCTRL_RESET_CHA_AESA         0x2
22191 +#define CCTRL_RESET_CHA_DESA         0x4
22192 +#define CCTRL_RESET_CHA_AFHA         0x8
22193 +#define CCTRL_RESET_CHA_KFHA         0x10
22194 +#define CCTRL_RESET_CHA_SF8A         0x20
22195 +#define CCTRL_RESET_CHA_PKHA         0x40
22196 +#define CCTRL_RESET_CHA_MDHA         0x80
22197 +#define CCTRL_RESET_CHA_CRCA         0x100
22198 +#define CCTRL_RESET_CHA_RNG          0x200
22199 +#define CCTRL_RESET_CHA_SF9A         0x400
22200 +#define CCTRL_RESET_CHA_ZUCE         0x800
22201 +#define CCTRL_RESET_CHA_ZUCA         0x1000
22202 +#define CCTRL_UNLOAD_PK_A0           0x10000
22203 +#define CCTRL_UNLOAD_PK_A1           0x20000
22204 +#define CCTRL_UNLOAD_PK_A2           0x40000
22205 +#define CCTRL_UNLOAD_PK_A3           0x80000
22206 +#define CCTRL_UNLOAD_PK_B0           0x100000
22207 +#define CCTRL_UNLOAD_PK_B1           0x200000
22208 +#define CCTRL_UNLOAD_PK_B2           0x400000
22209 +#define CCTRL_UNLOAD_PK_B3           0x800000
22210 +#define CCTRL_UNLOAD_PK_N            0x1000000
22211 +#define CCTRL_UNLOAD_PK_A            0x4000000
22212 +#define CCTRL_UNLOAD_PK_B            0x8000000
22213 +#define CCTRL_UNLOAD_SBOX            0x10000000
22214 +
22215  #endif /* DESC_H */
22216 --- a/drivers/crypto/caam/desc_constr.h
22217 +++ b/drivers/crypto/caam/desc_constr.h
22218 @@ -4,6 +4,9 @@
22219   * Copyright 2008-2012 Freescale Semiconductor, Inc.
22220   */
22221  
22222 +#ifndef DESC_CONSTR_H
22223 +#define DESC_CONSTR_H
22224 +
22225  #include "desc.h"
22226  #include "regs.h"
22227  
22228 @@ -33,38 +36,39 @@
22229  
22230  extern bool caam_little_end;
22231  
22232 -static inline int desc_len(u32 *desc)
22233 +static inline int desc_len(u32 * const desc)
22234  {
22235         return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
22236  }
22237  
22238 -static inline int desc_bytes(void *desc)
22239 +static inline int desc_bytes(void * const desc)
22240  {
22241         return desc_len(desc) * CAAM_CMD_SZ;
22242  }
22243  
22244 -static inline u32 *desc_end(u32 *desc)
22245 +static inline u32 *desc_end(u32 * const desc)
22246  {
22247         return desc + desc_len(desc);
22248  }
22249  
22250 -static inline void *sh_desc_pdb(u32 *desc)
22251 +static inline void *sh_desc_pdb(u32 * const desc)
22252  {
22253         return desc + 1;
22254  }
22255  
22256 -static inline void init_desc(u32 *desc, u32 options)
22257 +static inline void init_desc(u32 * const desc, u32 options)
22258  {
22259         *desc = cpu_to_caam32((options | HDR_ONE) + 1);
22260  }
22261  
22262 -static inline void init_sh_desc(u32 *desc, u32 options)
22263 +static inline void init_sh_desc(u32 * const desc, u32 options)
22264  {
22265         PRINT_POS;
22266         init_desc(desc, CMD_SHARED_DESC_HDR | options);
22267  }
22268  
22269 -static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
22270 +static inline void init_sh_desc_pdb(u32 * const desc, u32 options,
22271 +                                   size_t pdb_bytes)
22272  {
22273         u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
22274  
22275 @@ -72,19 +76,20 @@ static inline void init_sh_desc_pdb(u32
22276                      options);
22277  }
22278  
22279 -static inline void init_job_desc(u32 *desc, u32 options)
22280 +static inline void init_job_desc(u32 * const desc, u32 options)
22281  {
22282         init_desc(desc, CMD_DESC_HDR | options);
22283  }
22284  
22285 -static inline void init_job_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
22286 +static inline void init_job_desc_pdb(u32 * const desc, u32 options,
22287 +                                    size_t pdb_bytes)
22288  {
22289         u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
22290  
22291         init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options);
22292  }
22293  
22294 -static inline void append_ptr(u32 *desc, dma_addr_t ptr)
22295 +static inline void append_ptr(u32 * const desc, dma_addr_t ptr)
22296  {
22297         dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
22298  
22299 @@ -94,8 +99,8 @@ static inline void append_ptr(u32 *desc,
22300                                 CAAM_PTR_SZ / CAAM_CMD_SZ);
22301  }
22302  
22303 -static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
22304 -                                       u32 options)
22305 +static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr,
22306 +                                       int len, u32 options)
22307  {
22308         PRINT_POS;
22309         init_job_desc(desc, HDR_SHARED | options |
22310 @@ -103,7 +108,7 @@ static inline void init_job_desc_shared(
22311         append_ptr(desc, ptr);
22312  }
22313  
22314 -static inline void append_data(u32 *desc, void *data, int len)
22315 +static inline void append_data(u32 * const desc, void *data, int len)
22316  {
22317         u32 *offset = desc_end(desc);
22318  
22319 @@ -114,7 +119,7 @@ static inline void append_data(u32 *desc
22320                                 (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ);
22321  }
22322  
22323 -static inline void append_cmd(u32 *desc, u32 command)
22324 +static inline void append_cmd(u32 * const desc, u32 command)
22325  {
22326         u32 *cmd = desc_end(desc);
22327  
22328 @@ -125,7 +130,7 @@ static inline void append_cmd(u32 *desc,
22329  
22330  #define append_u32 append_cmd
22331  
22332 -static inline void append_u64(u32 *desc, u64 data)
22333 +static inline void append_u64(u32 * const desc, u64 data)
22334  {
22335         u32 *offset = desc_end(desc);
22336  
22337 @@ -142,14 +147,14 @@ static inline void append_u64(u32 *desc,
22338  }
22339  
22340  /* Write command without affecting header, and return pointer to next word */
22341 -static inline u32 *write_cmd(u32 *desc, u32 command)
22342 +static inline u32 *write_cmd(u32 * const desc, u32 command)
22343  {
22344         *desc = cpu_to_caam32(command);
22345  
22346         return desc + 1;
22347  }
22348  
22349 -static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
22350 +static inline void append_cmd_ptr(u32 * const desc, dma_addr_t ptr, int len,
22351                                   u32 command)
22352  {
22353         append_cmd(desc, command | len);
22354 @@ -157,7 +162,7 @@ static inline void append_cmd_ptr(u32 *d
22355  }
22356  
22357  /* Write length after pointer, rather than inside command */
22358 -static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
22359 +static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr,
22360                                          unsigned int len, u32 command)
22361  {
22362         append_cmd(desc, command);
22363 @@ -166,7 +171,7 @@ static inline void append_cmd_ptr_extlen
22364         append_cmd(desc, len);
22365  }
22366  
22367 -static inline void append_cmd_data(u32 *desc, void *data, int len,
22368 +static inline void append_cmd_data(u32 * const desc, void *data, int len,
22369                                    u32 command)
22370  {
22371         append_cmd(desc, command | IMMEDIATE | len);
22372 @@ -174,7 +179,7 @@ static inline void append_cmd_data(u32 *
22373  }
22374  
22375  #define APPEND_CMD_RET(cmd, op) \
22376 -static inline u32 *append_##cmd(u32 *desc, u32 options) \
22377 +static inline u32 *append_##cmd(u32 * const desc, u32 options) \
22378  { \
22379         u32 *cmd = desc_end(desc); \
22380         PRINT_POS; \
22381 @@ -183,14 +188,15 @@ static inline u32 *append_##cmd(u32 *des
22382  }
22383  APPEND_CMD_RET(jump, JUMP)
22384  APPEND_CMD_RET(move, MOVE)
22385 +APPEND_CMD_RET(moveb, MOVEB)
22386  
22387 -static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
22388 +static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
22389  {
22390         *jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) |
22391                                   (desc_len(desc) - (jump_cmd - desc)));
22392  }
22393  
22394 -static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
22395 +static inline void set_move_tgt_here(u32 * const desc, u32 *move_cmd)
22396  {
22397         u32 val = caam32_to_cpu(*move_cmd);
22398  
22399 @@ -200,7 +206,7 @@ static inline void set_move_tgt_here(u32
22400  }
22401  
22402  #define APPEND_CMD(cmd, op) \
22403 -static inline void append_##cmd(u32 *desc, u32 options) \
22404 +static inline void append_##cmd(u32 * const desc, u32 options) \
22405  { \
22406         PRINT_POS; \
22407         append_cmd(desc, CMD_##op | options); \
22408 @@ -208,7 +214,8 @@ static inline void append_##cmd(u32 *des
22409  APPEND_CMD(operation, OPERATION)
22410  
22411  #define APPEND_CMD_LEN(cmd, op) \
22412 -static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
22413 +static inline void append_##cmd(u32 * const desc, unsigned int len, \
22414 +                               u32 options) \
22415  { \
22416         PRINT_POS; \
22417         append_cmd(desc, CMD_##op | len | options); \
22418 @@ -220,8 +227,8 @@ APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_L
22419  APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
22420  
22421  #define APPEND_CMD_PTR(cmd, op) \
22422 -static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
22423 -                               u32 options) \
22424 +static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
22425 +                               unsigned int len, u32 options) \
22426  { \
22427         PRINT_POS; \
22428         append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
22429 @@ -231,8 +238,8 @@ APPEND_CMD_PTR(load, LOAD)
22430  APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
22431  APPEND_CMD_PTR(fifo_store, FIFO_STORE)
22432  
22433 -static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
22434 -                               u32 options)
22435 +static inline void append_store(u32 * const desc, dma_addr_t ptr,
22436 +                               unsigned int len, u32 options)
22437  {
22438         u32 cmd_src;
22439  
22440 @@ -249,7 +256,8 @@ static inline void append_store(u32 *des
22441  }
22442  
22443  #define APPEND_SEQ_PTR_INTLEN(cmd, op) \
22444 -static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \
22445 +static inline void append_seq_##cmd##_ptr_intlen(u32 * const desc, \
22446 +                                                dma_addr_t ptr, \
22447                                                  unsigned int len, \
22448                                                  u32 options) \
22449  { \
22450 @@ -263,7 +271,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
22451  APPEND_SEQ_PTR_INTLEN(out, OUT)
22452  
22453  #define APPEND_CMD_PTR_TO_IMM(cmd, op) \
22454 -static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
22455 +static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
22456                                          unsigned int len, u32 options) \
22457  { \
22458         PRINT_POS; \
22459 @@ -273,7 +281,7 @@ APPEND_CMD_PTR_TO_IMM(load, LOAD);
22460  APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
22461  
22462  #define APPEND_CMD_PTR_EXTLEN(cmd, op) \
22463 -static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \
22464 +static inline void append_##cmd##_extlen(u32 * const desc, dma_addr_t ptr, \
22465                                          unsigned int len, u32 options) \
22466  { \
22467         PRINT_POS; \
22468 @@ -287,7 +295,7 @@ APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_O
22469   * the size of its type
22470   */
22471  #define APPEND_CMD_PTR_LEN(cmd, op, type) \
22472 -static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \
22473 +static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
22474                                 type len, u32 options) \
22475  { \
22476         PRINT_POS; \
22477 @@ -304,7 +312,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_
22478   * from length of immediate data provided, e.g., split keys
22479   */
22480  #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
22481 -static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
22482 +static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
22483                                          unsigned int data_len, \
22484                                          unsigned int len, u32 options) \
22485  { \
22486 @@ -315,7 +323,7 @@ static inline void append_##cmd##_as_imm
22487  APPEND_CMD_PTR_TO_IMM2(key, KEY);
22488  
22489  #define APPEND_CMD_RAW_IMM(cmd, op, type) \
22490 -static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
22491 +static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \
22492                                              u32 options) \
22493  { \
22494         PRINT_POS; \
22495 @@ -426,3 +434,66 @@ do { \
22496         APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
22497  #define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
22498         APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
22499 +
22500 +/**
22501 + * struct alginfo - Container for algorithm details
22502 + * @algtype: algorithm selector; for valid values, see documentation of the
22503 + *           functions where it is used.
22504 + * @keylen: length of the provided algorithm key, in bytes
22505 + * @keylen_pad: padded length of the provided algorithm key, in bytes
22506 + * @key: address where algorithm key resides; virtual address if key_inline
22507 + *       is true, dma (bus) address if key_inline is false.
22508 + * @key_inline: true - key can be inlined in the descriptor; false - key is
22509 + *              referenced by the descriptor
22510 + */
22511 +struct alginfo {
22512 +       u32 algtype;
22513 +       unsigned int keylen;
22514 +       unsigned int keylen_pad;
22515 +       union {
22516 +               dma_addr_t key_dma;
22517 +               void *key_virt;
22518 +       };
22519 +       bool key_inline;
22520 +};
22521 +
22522 +/**
22523 + * desc_inline_query() - Provide indications on which data items can be inlined
22524 + *                       and which shall be referenced in a shared descriptor.
22525 + * @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
22526 + *               excluding the data items to be inlined (or corresponding
22527 + *               pointer if an item is not inlined). Each cnstr_* function that
22528 + *               generates descriptors should have a define mentioning
22529 + *               corresponding length.
22530 + * @jd_len: Maximum length of the job descriptor(s) that will be used
22531 + *          together with the shared descriptor.
22532 + * @data_len: Array of lengths of the data items trying to be inlined
22533 + * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
22534 + *            otherwise.
22535 + * @count: Number of data items (size of @data_len array); must be <= 32
22536 + *
22537 + * Return: 0 if data can be inlined / referenced, negative value if not. If 0,
22538 + *         check @inl_mask for details.
22539 + */
22540 +static inline int desc_inline_query(unsigned int sd_base_len,
22541 +                                   unsigned int jd_len, unsigned int *data_len,
22542 +                                   u32 *inl_mask, unsigned int count)
22543 +{
22544 +       int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len);
22545 +       unsigned int i;
22546 +
22547 +       *inl_mask = 0;
22548 +       for (i = 0; (i < count) && (rem_bytes > 0); i++) {
22549 +               if (rem_bytes - (int)(data_len[i] +
22550 +                       (count - i - 1) * CAAM_PTR_SZ) >= 0) {
22551 +                       rem_bytes -= data_len[i];
22552 +                       *inl_mask |= (1 << i);
22553 +               } else {
22554 +                       rem_bytes -= CAAM_PTR_SZ;
22555 +               }
22556 +       }
22557 +
22558 +       return (rem_bytes >= 0) ? 0 : -1;
22559 +}
22560 +
22561 +#endif /* DESC_CONSTR_H */
22562 --- /dev/null
22563 +++ b/drivers/crypto/caam/dpseci.c
22564 @@ -0,0 +1,859 @@
22565 +/*
22566 + * Copyright 2013-2016 Freescale Semiconductor Inc.
22567 + * Copyright 2017 NXP
22568 + *
22569 + * Redistribution and use in source and binary forms, with or without
22570 + * modification, are permitted provided that the following conditions are met:
22571 + *     * Redistributions of source code must retain the above copyright
22572 + *      notice, this list of conditions and the following disclaimer.
22573 + *     * Redistributions in binary form must reproduce the above copyright
22574 + *      notice, this list of conditions and the following disclaimer in the
22575 + *      documentation and/or other materials provided with the distribution.
22576 + *     * Neither the names of the above-listed copyright holders nor the
22577 + *      names of any contributors may be used to endorse or promote products
22578 + *      derived from this software without specific prior written permission.
22579 + *
22580 + *
22581 + * ALTERNATIVELY, this software may be distributed under the terms of the
22582 + * GNU General Public License ("GPL") as published by the Free Software
22583 + * Foundation, either version 2 of that License or (at your option) any
22584 + * later version.
22585 + *
22586 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22587 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22588 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22589 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
22590 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22591 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22592 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22593 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22594 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22595 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
22596 + * POSSIBILITY OF SUCH DAMAGE.
22597 + */
22598 +
22599 +#include "../../../drivers/staging/fsl-mc/include/mc-sys.h"
22600 +#include "../../../drivers/staging/fsl-mc/include/mc-cmd.h"
22601 +#include "../../../drivers/staging/fsl-mc/include/dpopr.h"
22602 +#include "dpseci.h"
22603 +#include "dpseci_cmd.h"
22604 +
22605 +/**
22606 + * dpseci_open() - Open a control session for the specified object
22607 + * @mc_io:     Pointer to MC portal's I/O object
22608 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22609 + * @dpseci_id: DPSECI unique ID
22610 + * @token:     Returned token; use in subsequent API calls
22611 + *
22612 + * This function can be used to open a control session for an already created
22613 + * object; an object may have been declared in the DPL or by calling the
22614 + * dpseci_create() function.
22615 + * This function returns a unique authentication token, associated with the
22616 + * specific object ID and the specific MC portal; this token must be used in all
22617 + * subsequent commands for this specific object.
22618 + *
22619 + * Return:     '0' on success, error code otherwise
22620 + */
22621 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
22622 +               u16 *token)
22623 +{
22624 +       struct mc_command cmd = { 0 };
22625 +       struct dpseci_cmd_open *cmd_params;
22626 +       int err;
22627 +
22628 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
22629 +                                         cmd_flags,
22630 +                                         0);
22631 +       cmd_params = (struct dpseci_cmd_open *)cmd.params;
22632 +       cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
22633 +       err = mc_send_command(mc_io, &cmd);
22634 +       if (err)
22635 +               return err;
22636 +
22637 +       *token = mc_cmd_hdr_read_token(&cmd);
22638 +
22639 +       return 0;
22640 +}
22641 +
22642 +/**
22643 + * dpseci_close() - Close the control session of the object
22644 + * @mc_io:     Pointer to MC portal's I/O object
22645 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22646 + * @token:     Token of DPSECI object
22647 + *
22648 + * After this function is called, no further operations are allowed on the
22649 + * object without opening a new control session.
22650 + *
22651 + * Return:     '0' on success, error code otherwise
22652 + */
22653 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
22654 +{
22655 +       struct mc_command cmd = { 0 };
22656 +
22657 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
22658 +                                         cmd_flags,
22659 +                                         token);
22660 +       return mc_send_command(mc_io, &cmd);
22661 +}
22662 +
22663 +/**
22664 + * dpseci_create() - Create the DPSECI object
22665 + * @mc_io:     Pointer to MC portal's I/O object
22666 + * @dprc_token:        Parent container token; '0' for default container
22667 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22668 + * @cfg:       Configuration structure
22669 + * @obj_id:    returned object id
22670 + *
22671 + * Create the DPSECI object, allocate required resources and perform required
22672 + * initialization.
22673 + *
22674 + * The object can be created either by declaring it in the DPL file, or by
22675 + * calling this function.
22676 + *
22677 + * The function accepts an authentication token of a parent container that this
22678 + * object should be assigned to. The token can be '0' so the object will be
22679 + * assigned to the default container.
22680 + * The newly created object can be opened with the returned object id and using
22681 + * the container's associated tokens and MC portals.
22682 + *
22683 + * Return:     '0' on success, error code otherwise
22684 + */
22685 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
22686 +                 const struct dpseci_cfg *cfg, u32 *obj_id)
22687 +{
22688 +       struct mc_command cmd = { 0 };
22689 +       struct dpseci_cmd_create *cmd_params;
22690 +       int i, err;
22691 +
22692 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
22693 +                                         cmd_flags,
22694 +                                         dprc_token);
22695 +       cmd_params = (struct dpseci_cmd_create *)cmd.params;
22696 +       for (i = 0; i < 8; i++)
22697 +               cmd_params->priorities[i] = cfg->priorities[i];
22698 +       cmd_params->num_tx_queues = cfg->num_tx_queues;
22699 +       cmd_params->num_rx_queues = cfg->num_rx_queues;
22700 +       cmd_params->options = cpu_to_le32(cfg->options);
22701 +       err = mc_send_command(mc_io, &cmd);
22702 +       if (err)
22703 +               return err;
22704 +
22705 +       *obj_id = mc_cmd_read_object_id(&cmd);
22706 +
22707 +       return 0;
22708 +}
22709 +
22710 +/**
22711 + * dpseci_destroy() - Destroy the DPSECI object and release all its resources
22712 + * @mc_io:     Pointer to MC portal's I/O object
22713 + * @dprc_token: Parent container token; '0' for default container
22714 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22715 + * @object_id: The object id; it must be a valid id within the container that
22716 + *             created this object
22717 + *
22718 + * The function accepts the authentication token of the parent container that
22719 + * created the object (not the one that currently owns the object). The object
22720 + * is searched within parent using the provided 'object_id'.
22721 + * All tokens to the object must be closed before calling destroy.
22722 + *
22723 + * Return:     '0' on success, error code otherwise
22724 + */
22725 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
22726 +                  u32 object_id)
22727 +{
22728 +       struct mc_command cmd = { 0 };
22729 +       struct dpseci_cmd_destroy *cmd_params;
22730 +
22731 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
22732 +                                         cmd_flags,
22733 +                                         dprc_token);
22734 +       cmd_params = (struct dpseci_cmd_destroy *)cmd.params;
22735 +       cmd_params->object_id = cpu_to_le32(object_id);
22736 +
22737 +       return mc_send_command(mc_io, &cmd);
22738 +}
22739 +
22740 +/**
22741 + * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
22742 + * @mc_io:     Pointer to MC portal's I/O object
22743 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22744 + * @token:     Token of DPSECI object
22745 + *
22746 + * Return:     '0' on success, error code otherwise
22747 + */
22748 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
22749 +{
22750 +       struct mc_command cmd = { 0 };
22751 +
22752 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
22753 +                                         cmd_flags,
22754 +                                         token);
22755 +       return mc_send_command(mc_io, &cmd);
22756 +}
22757 +
22758 +/**
22759 + * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames
22760 + * @mc_io:     Pointer to MC portal's I/O object
22761 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22762 + * @token:     Token of DPSECI object
22763 + *
22764 + * Return:     '0' on success, error code otherwise
22765 + */
22766 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
22767 +{
22768 +       struct mc_command cmd = { 0 };
22769 +
22770 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
22771 +                                         cmd_flags,
22772 +                                         token);
22773 +
22774 +       return mc_send_command(mc_io, &cmd);
22775 +}
22776 +
22777 +/**
22778 + * dpseci_is_enabled() - Check if the DPSECI is enabled.
22779 + * @mc_io:     Pointer to MC portal's I/O object
22780 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22781 + * @token:     Token of DPSECI object
22782 + * @en:                Returns '1' if object is enabled; '0' otherwise
22783 + *
22784 + * Return:     '0' on success, error code otherwise
22785 + */
22786 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22787 +                     int *en)
22788 +{
22789 +       struct mc_command cmd = { 0 };
22790 +       struct dpseci_rsp_is_enabled *rsp_params;
22791 +       int err;
22792 +
22793 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
22794 +                                         cmd_flags,
22795 +                                         token);
22796 +       err = mc_send_command(mc_io, &cmd);
22797 +       if (err)
22798 +               return err;
22799 +
22800 +       rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
22801 +       *en = le32_to_cpu(rsp_params->is_enabled);
22802 +
22803 +       return 0;
22804 +}
22805 +
22806 +/**
22807 + * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
22808 + * @mc_io:     Pointer to MC portal's I/O object
22809 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22810 + * @token:     Token of DPSECI object
22811 + *
22812 + * Return:     '0' on success, error code otherwise
22813 + */
22814 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
22815 +{
22816 +       struct mc_command cmd = { 0 };
22817 +
22818 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
22819 +                                         cmd_flags,
22820 +                                         token);
22821 +
22822 +       return mc_send_command(mc_io, &cmd);
22823 +}
22824 +
22825 +/**
22826 + * dpseci_get_irq_enable() - Get overall interrupt state
22827 + * @mc_io:     Pointer to MC portal's I/O object
22828 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22829 + * @token:     Token of DPSECI object
22830 + * @irq_index: The interrupt index to configure
22831 + * @en:                Returned Interrupt state - enable = 1, disable = 0
22832 + *
22833 + * Return:     '0' on success, error code otherwise
22834 + */
22835 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22836 +                         u8 irq_index, u8 *en)
22837 +{
22838 +       struct mc_command cmd = { 0 };
22839 +       struct dpseci_cmd_irq_enable *cmd_params;
22840 +       struct dpseci_rsp_get_irq_enable *rsp_params;
22841 +       int err;
22842 +
22843 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE,
22844 +                                         cmd_flags,
22845 +                                         token);
22846 +       cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
22847 +       cmd_params->irq_index = irq_index;
22848 +       err = mc_send_command(mc_io, &cmd);
22849 +       if (err)
22850 +               return err;
22851 +
22852 +       rsp_params = (struct dpseci_rsp_get_irq_enable *)cmd.params;
22853 +       *en = rsp_params->enable_state;
22854 +
22855 +       return 0;
22856 +}
22857 +
22858 +/**
22859 + * dpseci_set_irq_enable() - Set overall interrupt state.
22860 + * @mc_io:     Pointer to MC portal's I/O object
22861 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22862 + * @token:     Token of DPSECI object
22863 + * @irq_index: The interrupt index to configure
22864 + * @en:                Interrupt state - enable = 1, disable = 0
22865 + *
22866 + * Allows GPP software to control when interrupts are generated.
22867 + * Each interrupt can have up to 32 causes. The enable/disable control's the
22868 + * overall interrupt state. If the interrupt is disabled no causes will cause
22869 + * an interrupt.
22870 + *
22871 + * Return:     '0' on success, error code otherwise
22872 + */
22873 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22874 +                         u8 irq_index, u8 en)
22875 +{
22876 +       struct mc_command cmd = { 0 };
22877 +       struct dpseci_cmd_irq_enable *cmd_params;
22878 +
22879 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
22880 +                                         cmd_flags,
22881 +                                         token);
22882 +       cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
22883 +       cmd_params->irq_index = irq_index;
22884 +       cmd_params->enable_state = en;
22885 +
22886 +       return mc_send_command(mc_io, &cmd);
22887 +}
22888 +
22889 +/**
22890 + * dpseci_get_irq_mask() - Get interrupt mask.
22891 + * @mc_io:     Pointer to MC portal's I/O object
22892 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22893 + * @token:     Token of DPSECI object
22894 + * @irq_index: The interrupt index to configure
22895 + * @mask:      Returned event mask to trigger interrupt
22896 + *
22897 + * Every interrupt can have up to 32 causes and the interrupt model supports
22898 + * masking/unmasking each cause independently.
22899 + *
22900 + * Return:     '0' on success, error code otherwise
22901 + */
22902 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22903 +                       u8 irq_index, u32 *mask)
22904 +{
22905 +       struct mc_command cmd = { 0 };
22906 +       struct dpseci_cmd_irq_mask *cmd_params;
22907 +       int err;
22908 +
22909 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK,
22910 +                                         cmd_flags,
22911 +                                         token);
22912 +       cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
22913 +       cmd_params->irq_index = irq_index;
22914 +       err = mc_send_command(mc_io, &cmd);
22915 +       if (err)
22916 +               return err;
22917 +
22918 +       *mask = le32_to_cpu(cmd_params->mask);
22919 +
22920 +       return 0;
22921 +}
22922 +
22923 +/**
22924 + * dpseci_set_irq_mask() - Set interrupt mask.
22925 + * @mc_io:     Pointer to MC portal's I/O object
22926 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22927 + * @token:     Token of DPSECI object
22928 + * @irq_index: The interrupt index to configure
22929 + * @mask:      event mask to trigger interrupt;
22930 + *             each bit:
22931 + *                     0 = ignore event
22932 + *                     1 = consider event for asserting IRQ
22933 + *
22934 + * Every interrupt can have up to 32 causes and the interrupt model supports
22935 + * masking/unmasking each cause independently
22936 + *
22937 + * Return:     '0' on success, error code otherwise
22938 + */
22939 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22940 +                       u8 irq_index, u32 mask)
22941 +{
22942 +       struct mc_command cmd = { 0 };
22943 +       struct dpseci_cmd_irq_mask *cmd_params;
22944 +
22945 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
22946 +                                         cmd_flags,
22947 +                                         token);
22948 +       cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
22949 +       cmd_params->mask = cpu_to_le32(mask);
22950 +       cmd_params->irq_index = irq_index;
22951 +
22952 +       return mc_send_command(mc_io, &cmd);
22953 +}
22954 +
22955 +/**
22956 + * dpseci_get_irq_status() - Get the current status of any pending interrupts
22957 + * @mc_io:     Pointer to MC portal's I/O object
22958 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22959 + * @token:     Token of DPSECI object
22960 + * @irq_index: The interrupt index to configure
22961 + * @status:    Returned interrupts status - one bit per cause:
22962 + *                     0 = no interrupt pending
22963 + *                     1 = interrupt pending
22964 + *
22965 + * Return:     '0' on success, error code otherwise
22966 + */
22967 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22968 +                         u8 irq_index, u32 *status)
22969 +{
22970 +       struct mc_command cmd = { 0 };
22971 +       struct dpseci_cmd_irq_status *cmd_params;
22972 +       int err;
22973 +
22974 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS,
22975 +                                         cmd_flags,
22976 +                                         token);
22977 +       cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
22978 +       cmd_params->status = cpu_to_le32(*status);
22979 +       cmd_params->irq_index = irq_index;
22980 +       err = mc_send_command(mc_io, &cmd);
22981 +       if (err)
22982 +               return err;
22983 +
22984 +       *status = le32_to_cpu(cmd_params->status);
22985 +
22986 +       return 0;
22987 +}
22988 +
22989 +/**
22990 + * dpseci_clear_irq_status() - Clear a pending interrupt's status
22991 + * @mc_io:     Pointer to MC portal's I/O object
22992 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22993 + * @token:     Token of DPSECI object
22994 + * @irq_index: The interrupt index to configure
22995 + * @status:    bits to clear (W1C) - one bit per cause:
22996 + *                     0 = don't change
22997 + *                     1 = clear status bit
22998 + *
22999 + * Return:     '0' on success, error code otherwise
23000 + */
23001 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23002 +                           u8 irq_index, u32 status)
23003 +{
23004 +       struct mc_command cmd = { 0 };
23005 +       struct dpseci_cmd_irq_status *cmd_params;
23006 +
23007 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
23008 +                                         cmd_flags,
23009 +                                         token);
23010 +       cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
23011 +       cmd_params->status = cpu_to_le32(status);
23012 +       cmd_params->irq_index = irq_index;
23013 +
23014 +       return mc_send_command(mc_io, &cmd);
23015 +}
23016 +
23017 +/**
23018 + * dpseci_get_attributes() - Retrieve DPSECI attributes
23019 + * @mc_io:     Pointer to MC portal's I/O object
23020 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23021 + * @token:     Token of DPSECI object
23022 + * @attr:      Returned object's attributes
23023 + *
23024 + * Return:     '0' on success, error code otherwise
23025 + */
23026 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23027 +                         struct dpseci_attr *attr)
23028 +{
23029 +       struct mc_command cmd = { 0 };
23030 +       struct dpseci_rsp_get_attributes *rsp_params;
23031 +       int err;
23032 +
23033 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
23034 +                                         cmd_flags,
23035 +                                         token);
23036 +       err = mc_send_command(mc_io, &cmd);
23037 +       if (err)
23038 +               return err;
23039 +
23040 +       rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params;
23041 +       attr->id = le32_to_cpu(rsp_params->id);
23042 +       attr->num_tx_queues = rsp_params->num_tx_queues;
23043 +       attr->num_rx_queues = rsp_params->num_rx_queues;
23044 +       attr->options = le32_to_cpu(rsp_params->options);
23045 +
23046 +       return 0;
23047 +}
23048 +
23049 +/**
23050 + * dpseci_set_rx_queue() - Set Rx queue configuration
23051 + * @mc_io:     Pointer to MC portal's I/O object
23052 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23053 + * @token:     Token of DPSECI object
23054 + * @queue:     Select the queue relative to number of priorities configured at
23055 + *             DPSECI creation; use DPSECI_ALL_QUEUES to configure all
23056 + *             Rx queues identically.
23057 + * @cfg:       Rx queue configuration
23058 + *
23059 + * Return:     '0' on success, error code otherwise
23060 + */
23061 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23062 +                       u8 queue, const struct dpseci_rx_queue_cfg *cfg)
23063 +{
23064 +       struct mc_command cmd = { 0 };
23065 +       struct dpseci_cmd_queue *cmd_params;
23066 +
23067 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
23068 +                                         cmd_flags,
23069 +                                         token);
23070 +       cmd_params = (struct dpseci_cmd_queue *)cmd.params;
23071 +       cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
23072 +       cmd_params->priority = cfg->dest_cfg.priority;
23073 +       cmd_params->queue = queue;
23074 +       cmd_params->dest_type = cfg->dest_cfg.dest_type;
23075 +       cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
23076 +       cmd_params->options = cpu_to_le32(cfg->options);
23077 +       cmd_params->order_preservation_en =
23078 +               cpu_to_le32(cfg->order_preservation_en);
23079 +
23080 +       return mc_send_command(mc_io, &cmd);
23081 +}
23082 +
23083 +/**
23084 + * dpseci_get_rx_queue() - Retrieve Rx queue attributes
23085 + * @mc_io:     Pointer to MC portal's I/O object
23086 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23087 + * @token:     Token of DPSECI object
23088 + * @queue:     Select the queue relative to number of priorities configured at
23089 + *             DPSECI creation
23090 + * @attr:      Returned Rx queue attributes
23091 + *
23092 + * Return:     '0' on success, error code otherwise
23093 + */
23094 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23095 +                       u8 queue, struct dpseci_rx_queue_attr *attr)
23096 +{
23097 +       struct mc_command cmd = { 0 };
23098 +       struct dpseci_cmd_queue *cmd_params;
23099 +       int err;
23100 +
23101 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
23102 +                                         cmd_flags,
23103 +                                         token);
23104 +       cmd_params = (struct dpseci_cmd_queue *)cmd.params;
23105 +       cmd_params->queue = queue;
23106 +       err = mc_send_command(mc_io, &cmd);
23107 +       if (err)
23108 +               return err;
23109 +
23110 +       attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
23111 +       attr->dest_cfg.priority = cmd_params->priority;
23112 +       attr->dest_cfg.dest_type = cmd_params->dest_type;
23113 +       attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
23114 +       attr->fqid = le32_to_cpu(cmd_params->fqid);
23115 +       attr->order_preservation_en =
23116 +               le32_to_cpu(cmd_params->order_preservation_en);
23117 +
23118 +       return 0;
23119 +}
23120 +
23121 +/**
23122 + * dpseci_get_tx_queue() - Retrieve Tx queue attributes
23123 + * @mc_io:     Pointer to MC portal's I/O object
23124 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23125 + * @token:     Token of DPSECI object
23126 + * @queue:     Select the queue relative to number of priorities configured at
23127 + *             DPSECI creation
23128 + * @attr:      Returned Tx queue attributes
23129 + *
23130 + * Return:     '0' on success, error code otherwise
23131 + */
23132 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23133 +                       u8 queue, struct dpseci_tx_queue_attr *attr)
23134 +{
23135 +       struct mc_command cmd = { 0 };
23136 +       struct dpseci_cmd_queue *cmd_params;
23137 +       struct dpseci_rsp_get_tx_queue *rsp_params;
23138 +       int err;
23139 +
23140 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
23141 +                                         cmd_flags,
23142 +                                         token);
23143 +       cmd_params = (struct dpseci_cmd_queue *)cmd.params;
23144 +       cmd_params->queue = queue;
23145 +       err = mc_send_command(mc_io, &cmd);
23146 +       if (err)
23147 +               return err;
23148 +
23149 +       rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
23150 +       attr->fqid = le32_to_cpu(rsp_params->fqid);
23151 +       attr->priority = rsp_params->priority;
23152 +
23153 +       return 0;
23154 +}
23155 +
23156 +/**
23157 + * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes
23158 + * @mc_io:     Pointer to MC portal's I/O object
23159 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23160 + * @token:     Token of DPSECI object
23161 + * @attr:      Returned SEC attributes
23162 + *
23163 + * Return:     '0' on success, error code otherwise
23164 + */
23165 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23166 +                       struct dpseci_sec_attr *attr)
23167 +{
23168 +       struct mc_command cmd = { 0 };
23169 +       struct dpseci_rsp_get_sec_attr *rsp_params;
23170 +       int err;
23171 +
23172 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
23173 +                                         cmd_flags,
23174 +                                         token);
23175 +       err = mc_send_command(mc_io, &cmd);
23176 +       if (err)
23177 +               return err;
23178 +
23179 +       rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
23180 +       attr->ip_id = le16_to_cpu(rsp_params->ip_id);
23181 +       attr->major_rev = rsp_params->major_rev;
23182 +       attr->minor_rev = rsp_params->minor_rev;
23183 +       attr->era = rsp_params->era;
23184 +       attr->deco_num = rsp_params->deco_num;
23185 +       attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
23186 +       attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
23187 +       attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
23188 +       attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
23189 +       attr->crc_acc_num = rsp_params->crc_acc_num;
23190 +       attr->pk_acc_num = rsp_params->pk_acc_num;
23191 +       attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
23192 +       attr->rng_acc_num = rsp_params->rng_acc_num;
23193 +       attr->md_acc_num = rsp_params->md_acc_num;
23194 +       attr->arc4_acc_num = rsp_params->arc4_acc_num;
23195 +       attr->des_acc_num = rsp_params->des_acc_num;
23196 +       attr->aes_acc_num = rsp_params->aes_acc_num;
23197 +
23198 +       return 0;
23199 +}
23200 +
23201 +/**
23202 + * dpseci_get_sec_counters() - Retrieve SEC accelerator counters
23203 + * @mc_io:     Pointer to MC portal's I/O object
23204 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23205 + * @token:     Token of DPSECI object
23206 + * @counters:  Returned SEC counters
23207 + *
23208 + * Return:     '0' on success, error code otherwise
23209 + */
23210 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23211 +                           struct dpseci_sec_counters *counters)
23212 +{
23213 +       struct mc_command cmd = { 0 };
23214 +       struct dpseci_rsp_get_sec_counters *rsp_params;
23215 +       int err;
23216 +
23217 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
23218 +                                         cmd_flags,
23219 +                                         token);
23220 +       err = mc_send_command(mc_io, &cmd);
23221 +       if (err)
23222 +               return err;
23223 +
23224 +       rsp_params = (struct dpseci_rsp_get_sec_counters *)cmd.params;
23225 +       counters->dequeued_requests =
23226 +               le64_to_cpu(rsp_params->dequeued_requests);
23227 +       counters->ob_enc_requests = le64_to_cpu(rsp_params->ob_enc_requests);
23228 +       counters->ib_dec_requests = le64_to_cpu(rsp_params->ib_dec_requests);
23229 +       counters->ob_enc_bytes = le64_to_cpu(rsp_params->ob_enc_bytes);
23230 +       counters->ob_prot_bytes = le64_to_cpu(rsp_params->ob_prot_bytes);
23231 +       counters->ib_dec_bytes = le64_to_cpu(rsp_params->ib_dec_bytes);
23232 +       counters->ib_valid_bytes = le64_to_cpu(rsp_params->ib_valid_bytes);
23233 +
23234 +       return 0;
23235 +}
23236 +
23237 +/**
23238 + * dpseci_get_api_version() - Get Data Path SEC Interface API version
23239 + * @mc_io:     Pointer to MC portal's I/O object
23240 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23241 + * @major_ver: Major version of data path sec API
23242 + * @minor_ver: Minor version of data path sec API
23243 + *
23244 + * Return:     '0' on success, error code otherwise
23245 + */
23246 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
23247 +                          u16 *major_ver, u16 *minor_ver)
23248 +{
23249 +       struct mc_command cmd = { 0 };
23250 +       struct dpseci_rsp_get_api_version *rsp_params;
23251 +       int err;
23252 +
23253 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
23254 +                                         cmd_flags, 0);
23255 +       err = mc_send_command(mc_io, &cmd);
23256 +       if (err)
23257 +               return err;
23258 +
23259 +       rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
23260 +       *major_ver = le16_to_cpu(rsp_params->major);
23261 +       *minor_ver = le16_to_cpu(rsp_params->minor);
23262 +
23263 +       return 0;
23264 +}
23265 +
23266 +/**
23267 + * dpseci_set_opr() - Set Order Restoration configuration
23268 + * @mc_io:     Pointer to MC portal's I/O object
23269 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23270 + * @token:     Token of DPSECI object
23271 + * @index:     The queue index
23272 + * @options:   Configuration mode options; can be OPR_OPT_CREATE or
23273 + *             OPR_OPT_RETIRE
23274 + * @cfg:       Configuration options for the OPR
23275 + *
23276 + * Return:     '0' on success, error code otherwise
23277 + */
23278 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
23279 +                  u8 options, struct opr_cfg *cfg)
23280 +{
23281 +       struct mc_command cmd = { 0 };
23282 +       struct dpseci_cmd_opr *cmd_params;
23283 +
23284 +       cmd.header = mc_encode_cmd_header(
23285 +                       DPSECI_CMDID_SET_OPR,
23286 +                       cmd_flags,
23287 +                       token);
23288 +       cmd_params = (struct dpseci_cmd_opr *)cmd.params;
23289 +       cmd_params->index = index;
23290 +       cmd_params->options = options;
23291 +       cmd_params->oloe = cfg->oloe;
23292 +       cmd_params->oeane = cfg->oeane;
23293 +       cmd_params->olws = cfg->olws;
23294 +       cmd_params->oa = cfg->oa;
23295 +       cmd_params->oprrws = cfg->oprrws;
23296 +
23297 +       return mc_send_command(mc_io, &cmd);
23298 +}
23299 +
23300 +/**
23301 + * dpseci_get_opr() - Retrieve Order Restoration config and query
23302 + * @mc_io:     Pointer to MC portal's I/O object
23303 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23304 + * @token:     Token of DPSECI object
23305 + * @index:     The queue index
23306 + * @cfg:       Returned OPR configuration
23307 + * @qry:       Returned OPR query
23308 + *
23309 + * Return:     '0' on success, error code otherwise
23310 + */
23311 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
23312 +                  struct opr_cfg *cfg, struct opr_qry *qry)
23313 +{
23314 +       struct mc_command cmd = { 0 };
23315 +       struct dpseci_cmd_opr *cmd_params;
23316 +       struct dpseci_rsp_get_opr *rsp_params;
23317 +       int err;
23318 +
23319 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR,
23320 +                                         cmd_flags,
23321 +                                         token);
23322 +       cmd_params = (struct dpseci_cmd_opr *)cmd.params;
23323 +       cmd_params->index = index;
23324 +       err = mc_send_command(mc_io, &cmd);
23325 +       if (err)
23326 +               return err;
23327 +
23328 +       rsp_params = (struct dpseci_rsp_get_opr *)cmd.params;
23329 +       qry->rip = dpseci_get_field(rsp_params->rip_enable, OPR_RIP);
23330 +       qry->enable = dpseci_get_field(rsp_params->rip_enable, OPR_ENABLE);
23331 +       cfg->oloe = rsp_params->oloe;
23332 +       cfg->oeane = rsp_params->oeane;
23333 +       cfg->olws = rsp_params->olws;
23334 +       cfg->oa = rsp_params->oa;
23335 +       cfg->oprrws = rsp_params->oprrws;
23336 +       qry->nesn = le16_to_cpu(rsp_params->nesn);
23337 +       qry->ndsn = le16_to_cpu(rsp_params->ndsn);
23338 +       qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
23339 +       qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_TSEQ_NLIS);
23340 +       qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
23341 +       qry->hseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_HSEQ_NLIS);
23342 +       qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
23343 +       qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
23344 +       qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
23345 +       qry->opr_id = le16_to_cpu(rsp_params->opr_id);
23346 +
23347 +       return 0;
23348 +}
23349 +
23350 +/**
23351 + * dpseci_set_congestion_notification() - Set congestion group
23352 + *     notification configuration
23353 + * @mc_io:     Pointer to MC portal's I/O object
23354 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23355 + * @token:     Token of DPSECI object
23356 + * @cfg:       congestion notification configuration
23357 + *
23358 + * Return:     '0' on success, error code otherwise
23359 + */
23360 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
23361 +       u16 token, const struct dpseci_congestion_notification_cfg *cfg)
23362 +{
23363 +       struct mc_command cmd = { 0 };
23364 +       struct dpseci_cmd_congestion_notification *cmd_params;
23365 +
23366 +       cmd.header = mc_encode_cmd_header(
23367 +                       DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
23368 +                       cmd_flags,
23369 +                       token);
23370 +       cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
23371 +       cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
23372 +       cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
23373 +       cmd_params->priority = cfg->dest_cfg.priority;
23374 +       dpseci_set_field(cmd_params->options, CGN_DEST_TYPE,
23375 +                        cfg->dest_cfg.dest_type);
23376 +       dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units);
23377 +       cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
23378 +       cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
23379 +       cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
23380 +       cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
23381 +
23382 +       return mc_send_command(mc_io, &cmd);
23383 +}
23384 +
23385 +/**
23386 + * dpseci_get_congestion_notification() - Get congestion group notification
23387 + *     configuration
23388 + * @mc_io:     Pointer to MC portal's I/O object
23389 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23390 + * @token:     Token of DPSECI object
23391 + * @cfg:       congestion notification configuration
23392 + *
23393 + * Return:     '0' on success, error code otherwise
23394 + */
23395 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
23396 +       u16 token, struct dpseci_congestion_notification_cfg *cfg)
23397 +{
23398 +       struct mc_command cmd = { 0 };
23399 +       struct dpseci_cmd_congestion_notification *rsp_params;
23400 +       int err;
23401 +
23402 +       cmd.header = mc_encode_cmd_header(
23403 +                       DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
23404 +                       cmd_flags,
23405 +                       token);
23406 +       err = mc_send_command(mc_io, &cmd);
23407 +       if (err)
23408 +               return err;
23409 +
23410 +       rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
23411 +       cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
23412 +       cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
23413 +       cfg->dest_cfg.priority = rsp_params->priority;
23414 +       cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options,
23415 +                                                  CGN_DEST_TYPE);
23416 +       cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS);
23417 +       cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
23418 +       cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
23419 +       cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
23420 +       cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
23421 +
23422 +       return 0;
23423 +}
23424 --- /dev/null
23425 +++ b/drivers/crypto/caam/dpseci.h
23426 @@ -0,0 +1,395 @@
23427 +/*
23428 + * Copyright 2013-2016 Freescale Semiconductor Inc.
23429 + * Copyright 2017 NXP
23430 + *
23431 + * Redistribution and use in source and binary forms, with or without
23432 + * modification, are permitted provided that the following conditions are met:
23433 + *     * Redistributions of source code must retain the above copyright
23434 + *      notice, this list of conditions and the following disclaimer.
23435 + *     * Redistributions in binary form must reproduce the above copyright
23436 + *      notice, this list of conditions and the following disclaimer in the
23437 + *      documentation and/or other materials provided with the distribution.
23438 + *     * Neither the names of the above-listed copyright holders nor the
23439 + *      names of any contributors may be used to endorse or promote products
23440 + *      derived from this software without specific prior written permission.
23441 + *
23442 + *
23443 + * ALTERNATIVELY, this software may be distributed under the terms of the
23444 + * GNU General Public License ("GPL") as published by the Free Software
23445 + * Foundation, either version 2 of that License or (at your option) any
23446 + * later version.
23447 + *
23448 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23449 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23450 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23451 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
23452 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23453 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23454 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23455 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23456 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23457 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
23458 + * POSSIBILITY OF SUCH DAMAGE.
23459 + */
23460 +#ifndef _DPSECI_H_
23461 +#define _DPSECI_H_
23462 +
23463 +/*
23464 + * Data Path SEC Interface API
23465 + * Contains initialization APIs and runtime control APIs for DPSECI
23466 + */
23467 +
23468 +struct fsl_mc_io;
23469 +struct opr_cfg;
23470 +struct opr_qry;
23471 +
23472 +/**
23473 + * General DPSECI macros
23474 + */
23475 +
23476 +/**
23477 + * Maximum number of Tx/Rx priorities per DPSECI object
23478 + */
23479 +#define DPSECI_PRIO_NUM                8
23480 +
23481 +/**
23482 + * All queues considered; see dpseci_set_rx_queue()
23483 + */
23484 +#define DPSECI_ALL_QUEUES      (u8)(-1)
23485 +
23486 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
23487 +               u16 *token);
23488 +
23489 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
23490 +
23491 +/**
23492 + * Enable the Congestion Group support
23493 + */
23494 +#define DPSECI_OPT_HAS_CG              0x000020
23495 +
23496 +/**
23497 + * Enable the Order Restoration support
23498 + */
23499 +#define DPSECI_OPT_HAS_OPR             0x000040
23500 +
23501 +/**
23502 + * Order Point Records are shared for the entire DPSECI
23503 + */
23504 +#define DPSECI_OPT_OPR_SHARED          0x000080
23505 +
23506 +/**
23507 + * struct dpseci_cfg - Structure representing DPSECI configuration
23508 + * @options: Any combination of the following options:
23509 + *             DPSECI_OPT_HAS_CG
23510 + *             DPSECI_OPT_HAS_OPR
23511 + *             DPSECI_OPT_OPR_SHARED
23512 + * @num_tx_queues: num of queues towards the SEC
23513 + * @num_rx_queues: num of queues back from the SEC
23514 + * @priorities: Priorities for the SEC hardware processing;
23515 + *             each place in the array is the priority of the tx queue
23516 + *             towards the SEC;
23517 + *             valid priorities are configured with values 1-8;
23518 + */
23519 +struct dpseci_cfg {
23520 +       u32 options;
23521 +       u8 num_tx_queues;
23522 +       u8 num_rx_queues;
23523 +       u8 priorities[DPSECI_PRIO_NUM];
23524 +};
23525 +
23526 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
23527 +                 const struct dpseci_cfg *cfg, u32 *obj_id);
23528 +
23529 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
23530 +                  u32 object_id);
23531 +
23532 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
23533 +
23534 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
23535 +
23536 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23537 +                     int *en);
23538 +
23539 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
23540 +
23541 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23542 +                         u8 irq_index, u8 *en);
23543 +
23544 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23545 +                         u8 irq_index, u8 en);
23546 +
23547 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23548 +                       u8 irq_index, u32 *mask);
23549 +
23550 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23551 +                       u8 irq_index, u32 mask);
23552 +
23553 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23554 +                         u8 irq_index, u32 *status);
23555 +
23556 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23557 +                           u8 irq_index, u32 status);
23558 +
23559 +/**
23560 + * struct dpseci_attr - Structure representing DPSECI attributes
23561 + * @id: DPSECI object ID
23562 + * @num_tx_queues: number of queues towards the SEC
23563 + * @num_rx_queues: number of queues back from the SEC
23564 + * @options: any combination of the following options:
23565 + *             DPSECI_OPT_HAS_CG
23566 + *             DPSECI_OPT_HAS_OPR
23567 + *             DPSECI_OPT_OPR_SHARED
23568 + */
23569 +struct dpseci_attr {
23570 +       int id;
23571 +       u8 num_tx_queues;
23572 +       u8 num_rx_queues;
23573 +       u32 options;
23574 +};
23575 +
23576 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23577 +                         struct dpseci_attr *attr);
23578 +
23579 +/**
23580 + * enum dpseci_dest - DPSECI destination types
23581 + * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
23582 + *     and does not generate FQDAN notifications; user is expected to dequeue
23583 + *     from the queue based on polling or other user-defined method
23584 + * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
23585 + *     notifications to the specified DPIO; user is expected to dequeue from
23586 + *     the queue only after notification is received
23587 + * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
23588 + *     FQDAN notifications, but is connected to the specified DPCON object;
23589 + *     user is expected to dequeue from the DPCON channel
23590 + */
23591 +enum dpseci_dest {
23592 +       DPSECI_DEST_NONE = 0,
23593 +       DPSECI_DEST_DPIO,
23594 +       DPSECI_DEST_DPCON
23595 +};
23596 +
23597 +/**
23598 + * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
23599 + * @dest_type: Destination type
23600 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
23601 + * @priority: Priority selection within the DPIO or DPCON channel; valid values
23602 + *     are 0-1 or 0-7, depending on the number of priorities in that channel;
23603 + *     not relevant for 'DPSECI_DEST_NONE' option
23604 + */
23605 +struct dpseci_dest_cfg {
23606 +       enum dpseci_dest dest_type;
23607 +       int dest_id;
23608 +       u8 priority;
23609 +};
23610 +
23611 +/**
23612 + * DPSECI queue modification options
23613 + */
23614 +
23615 +/**
23616 + * Select to modify the user's context associated with the queue
23617 + */
23618 +#define DPSECI_QUEUE_OPT_USER_CTX              0x00000001
23619 +
23620 +/**
23621 + * Select to modify the queue's destination
23622 + */
23623 +#define DPSECI_QUEUE_OPT_DEST                  0x00000002
23624 +
23625 +/**
23626 + * Select to modify the queue's order preservation
23627 + */
23628 +#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION    0x00000004
23629 +
23630 +/**
23631 + * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
23632 + * @options: Flags representing the suggested modifications to the queue;
23633 + *     Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
23634 + * @order_preservation_en: order preservation configuration for the rx queue
23635 + * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
23636 + * @user_ctx: User context value provided in the frame descriptor of each
23637 + *     dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained
23638 + *     in 'options'
23639 + * @dest_cfg: Queue destination parameters; valid only if
23640 + *     'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
23641 + */
23642 +struct dpseci_rx_queue_cfg {
23643 +       u32 options;
23644 +       int order_preservation_en;
23645 +       u64 user_ctx;
23646 +       struct dpseci_dest_cfg dest_cfg;
23647 +};
23648 +
23649 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23650 +                       u8 queue, const struct dpseci_rx_queue_cfg *cfg);
23651 +
23652 +/**
23653 + * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
23654 + * @user_ctx: User context value provided in the frame descriptor of each
23655 + *     dequeued frame
23656 + * @order_preservation_en: Status of the order preservation configuration on the
23657 + *     queue
23658 + * @dest_cfg: Queue destination configuration
23659 + * @fqid: Virtual FQID value to be used for dequeue operations
23660 + */
23661 +struct dpseci_rx_queue_attr {
23662 +       u64 user_ctx;
23663 +       int order_preservation_en;
23664 +       struct dpseci_dest_cfg dest_cfg;
23665 +       u32 fqid;
23666 +};
23667 +
23668 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23669 +                       u8 queue, struct dpseci_rx_queue_attr *attr);
23670 +
23671 +/**
23672 + * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
23673 + * @fqid: Virtual FQID to be used for sending frames to SEC hardware
23674 + * @priority: SEC hardware processing priority for the queue
23675 + */
23676 +struct dpseci_tx_queue_attr {
23677 +       u32 fqid;
23678 +       u8 priority;
23679 +};
23680 +
23681 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23682 +                       u8 queue, struct dpseci_tx_queue_attr *attr);
23683 +
23684 +/**
23685 + * struct dpseci_sec_attr - Structure representing attributes of the SEC
23686 + *     hardware accelerator
23687 + * @ip_id: ID for SEC
23688 + * @major_rev: Major revision number for SEC
23689 + * @minor_rev: Minor revision number for SEC
23690 + * @era: SEC Era
23691 + * @deco_num: The number of copies of the DECO that are implemented in this
23692 + *     version of SEC
23693 + * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this
23694 + *     version of SEC
23695 + * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this
23696 + *     version of SEC
23697 + * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
23698 + *     implemented in this version of SEC
23699 + * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
23700 + *     implemented in this version of SEC
23701 + * @crc_acc_num: The number of copies of the CRC module that are implemented in
23702 + *     this version of SEC
23703 + * @pk_acc_num:  The number of copies of the Public Key module that are
23704 + *     implemented in this version of SEC
23705 + * @kasumi_acc_num: The number of copies of the Kasumi module that are
23706 + *     implemented in this version of SEC
23707 + * @rng_acc_num: The number of copies of the Random Number Generator that are
23708 + *     implemented in this version of SEC
23709 + * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
23710 + *     implemented in this version of SEC
23711 + * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
23712 + *     in this version of SEC
23713 + * @des_acc_num: The number of copies of the DES module that are implemented in
23714 + *     this version of SEC
23715 + * @aes_acc_num: The number of copies of the AES module that are implemented in
23716 + *     this version of SEC
23717 + **/
23718 +struct dpseci_sec_attr {
23719 +       u16 ip_id;
23720 +       u8 major_rev;
23721 +       u8 minor_rev;
23722 +       u8 era;
23723 +       u8 deco_num;
23724 +       u8 zuc_auth_acc_num;
23725 +       u8 zuc_enc_acc_num;
23726 +       u8 snow_f8_acc_num;
23727 +       u8 snow_f9_acc_num;
23728 +       u8 crc_acc_num;
23729 +       u8 pk_acc_num;
23730 +       u8 kasumi_acc_num;
23731 +       u8 rng_acc_num;
23732 +       u8 md_acc_num;
23733 +       u8 arc4_acc_num;
23734 +       u8 des_acc_num;
23735 +       u8 aes_acc_num;
23736 +};
23737 +
23738 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23739 +                       struct dpseci_sec_attr *attr);
23740 +
23741 +/**
23742 + * struct dpseci_sec_counters - Structure representing global SEC counters and
23743 + *                             not per dpseci counters
23744 + * @dequeued_requests: Number of Requests Dequeued
23745 + * @ob_enc_requests:   Number of Outbound Encrypt Requests
23746 + * @ib_dec_requests:   Number of Inbound Decrypt Requests
23747 + * @ob_enc_bytes:      Number of Outbound Bytes Encrypted
23748 + * @ob_prot_bytes:     Number of Outbound Bytes Protected
23749 + * @ib_dec_bytes:      Number of Inbound Bytes Decrypted
23750 + * @ib_valid_bytes:    Number of Inbound Bytes Validated
23751 + */
23752 +struct dpseci_sec_counters {
23753 +       u64 dequeued_requests;
23754 +       u64 ob_enc_requests;
23755 +       u64 ib_dec_requests;
23756 +       u64 ob_enc_bytes;
23757 +       u64 ob_prot_bytes;
23758 +       u64 ib_dec_bytes;
23759 +       u64 ib_valid_bytes;
23760 +};
23761 +
23762 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23763 +                           struct dpseci_sec_counters *counters);
23764 +
23765 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
23766 +                          u16 *major_ver, u16 *minor_ver);
23767 +
23768 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
23769 +                  u8 options, struct opr_cfg *cfg);
23770 +
23771 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
23772 +                  struct opr_cfg *cfg, struct opr_qry *qry);
23773 +
23774 +/**
23775 + * enum dpseci_congestion_unit - DPSECI congestion units
23776 + * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
23777 + * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
23778 + */
23779 +enum dpseci_congestion_unit {
23780 +       DPSECI_CONGESTION_UNIT_BYTES = 0,
23781 +       DPSECI_CONGESTION_UNIT_FRAMES
23782 +};
23783 +
23784 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER             0x00000001
23785 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT              0x00000002
23786 +#define DPSECI_CGN_MODE_COHERENT_WRITE                 0x00000004
23787 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER           0x00000008
23788 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT            0x00000010
23789 +#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED       0x00000020
23790 +
23791 +/**
23792 + * struct dpseci_congestion_notification_cfg - congestion notification
23793 + *     configuration
23794 + * @units: units type
23795 + * @threshold_entry: above this threshold we enter a congestion state.
23796 + *     set it to '0' to disable it
23797 + * @threshold_exit: below this threshold we exit the congestion state.
23798 + * @message_ctx: The context that will be part of the CSCN message
23799 + * @message_iova: I/O virtual address (must be in DMA-able memory),
23800 + *     must be 16B aligned;
23801 + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
23802 + * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
23803 + *     values
23804 + */
23805 +struct dpseci_congestion_notification_cfg {
23806 +       enum dpseci_congestion_unit units;
23807 +       u32 threshold_entry;
23808 +       u32 threshold_exit;
23809 +       u64 message_ctx;
23810 +       u64 message_iova;
23811 +       struct dpseci_dest_cfg dest_cfg;
23812 +       u16 notification_mode;
23813 +};
23814 +
23815 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
23816 +       u16 token, const struct dpseci_congestion_notification_cfg *cfg);
23817 +
23818 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
23819 +       u16 token, struct dpseci_congestion_notification_cfg *cfg);
23820 +
23821 +#endif /* _DPSECI_H_ */
23822 --- /dev/null
23823 +++ b/drivers/crypto/caam/dpseci_cmd.h
23824 @@ -0,0 +1,261 @@
23825 +/*
23826 + * Copyright 2013-2016 Freescale Semiconductor Inc.
23827 + * Copyright 2017 NXP
23828 + *
23829 + * Redistribution and use in source and binary forms, with or without
23830 + * modification, are permitted provided that the following conditions are met:
23831 + *     * Redistributions of source code must retain the above copyright
23832 + *      notice, this list of conditions and the following disclaimer.
23833 + *     * Redistributions in binary form must reproduce the above copyright
23834 + *      notice, this list of conditions and the following disclaimer in the
23835 + *      documentation and/or other materials provided with the distribution.
23836 + *     * Neither the names of the above-listed copyright holders nor the
23837 + *      names of any contributors may be used to endorse or promote products
23838 + *      derived from this software without specific prior written permission.
23839 + *
23840 + *
23841 + * ALTERNATIVELY, this software may be distributed under the terms of the
23842 + * GNU General Public License ("GPL") as published by the Free Software
23843 + * Foundation, either version 2 of that License or (at your option) any
23844 + * later version.
23845 + *
23846 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23847 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23848 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23849 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
23850 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23851 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23852 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23853 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23854 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23855 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
23856 + * POSSIBILITY OF SUCH DAMAGE.
23857 + */
23858 +
23859 +#ifndef _DPSECI_CMD_H_
23860 +#define _DPSECI_CMD_H_
23861 +
23862 +/* DPSECI Version */
23863 +#define DPSECI_VER_MAJOR                               5
23864 +#define DPSECI_VER_MINOR                               1
23865 +
23866 +#define DPSECI_VER(maj, min)   (((maj) << 16) | (min))
23867 +#define DPSECI_VERSION         DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR)
23868 +
23869 +/* Command IDs */
23870 +
23871 +#define DPSECI_CMDID_CLOSE                              0x8001
23872 +#define DPSECI_CMDID_OPEN                               0x8091
23873 +#define DPSECI_CMDID_CREATE                             0x9092
23874 +#define DPSECI_CMDID_DESTROY                            0x9891
23875 +#define DPSECI_CMDID_GET_API_VERSION                    0xa091
23876 +
23877 +#define DPSECI_CMDID_ENABLE                             0x0021
23878 +#define DPSECI_CMDID_DISABLE                            0x0031
23879 +#define DPSECI_CMDID_GET_ATTR                           0x0041
23880 +#define DPSECI_CMDID_RESET                              0x0051
23881 +#define DPSECI_CMDID_IS_ENABLED                         0x0061
23882 +
23883 +#define DPSECI_CMDID_SET_IRQ_ENABLE                     0x0121
23884 +#define DPSECI_CMDID_GET_IRQ_ENABLE                     0x0131
23885 +#define DPSECI_CMDID_SET_IRQ_MASK                       0x0141
23886 +#define DPSECI_CMDID_GET_IRQ_MASK                       0x0151
23887 +#define DPSECI_CMDID_GET_IRQ_STATUS                     0x0161
23888 +#define DPSECI_CMDID_CLEAR_IRQ_STATUS                   0x0171
23889 +
23890 +#define DPSECI_CMDID_SET_RX_QUEUE                       0x1941
23891 +#define DPSECI_CMDID_GET_RX_QUEUE                       0x1961
23892 +#define DPSECI_CMDID_GET_TX_QUEUE                       0x1971
23893 +#define DPSECI_CMDID_GET_SEC_ATTR                       0x1981
23894 +#define DPSECI_CMDID_GET_SEC_COUNTERS                   0x1991
23895 +#define DPSECI_CMDID_SET_OPR                           0x19A1
23896 +#define DPSECI_CMDID_GET_OPR                           0x19B1
23897 +
23898 +#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION       0x1701
23899 +#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION       0x1711
23900 +
23901 +/* Macros for accessing command fields smaller than 1 byte */
23902 +#define DPSECI_MASK(field)     \
23903 +       GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1,     \
23904 +               DPSECI_##field##_SHIFT)
23905 +
23906 +#define dpseci_set_field(var, field, val)      \
23907 +       ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
23908 +
23909 +#define dpseci_get_field(var, field)   \
23910 +       (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
23911 +
23912 +struct dpseci_cmd_open {
23913 +       __le32 dpseci_id;
23914 +};
23915 +
23916 +struct dpseci_cmd_create {
23917 +       u8 priorities[8];
23918 +       u8 num_tx_queues;
23919 +       u8 num_rx_queues;
23920 +       __le16 pad;
23921 +       __le32 options;
23922 +};
23923 +
23924 +struct dpseci_cmd_destroy {
23925 +       __le32 object_id;
23926 +};
23927 +
23928 +struct dpseci_rsp_is_enabled {
23929 +       __le32 is_enabled;
23930 +};
23931 +
23932 +struct dpseci_cmd_irq_enable {
23933 +       u8 enable_state;
23934 +       u8 pad[3];
23935 +       u8 irq_index;
23936 +};
23937 +
23938 +struct dpseci_rsp_get_irq_enable {
23939 +       u8 enable_state;
23940 +};
23941 +
23942 +struct dpseci_cmd_irq_mask {
23943 +       __le32 mask;
23944 +       u8 irq_index;
23945 +};
23946 +
23947 +struct dpseci_cmd_irq_status {
23948 +       __le32 status;
23949 +       u8 irq_index;
23950 +};
23951 +
23952 +struct dpseci_rsp_get_attributes {
23953 +       __le32 id;
23954 +       __le32 pad0;
23955 +       u8 num_tx_queues;
23956 +       u8 num_rx_queues;
23957 +       u8 pad1[6];
23958 +       __le32 options;
23959 +};
23960 +
23961 +struct dpseci_cmd_queue {
23962 +       __le32 dest_id;
23963 +       u8 priority;
23964 +       u8 queue;
23965 +       u8 dest_type;
23966 +       u8 pad;
23967 +       __le64 user_ctx;
23968 +       union {
23969 +               __le32 options;
23970 +               __le32 fqid;
23971 +       };
23972 +       __le32 order_preservation_en;
23973 +};
23974 +
23975 +struct dpseci_rsp_get_tx_queue {
23976 +       __le32 pad;
23977 +       __le32 fqid;
23978 +       u8 priority;
23979 +};
23980 +
23981 +struct dpseci_rsp_get_sec_attr {
23982 +       __le16 ip_id;
23983 +       u8 major_rev;
23984 +       u8 minor_rev;
23985 +       u8 era;
23986 +       u8 pad0[3];
23987 +       u8 deco_num;
23988 +       u8 zuc_auth_acc_num;
23989 +       u8 zuc_enc_acc_num;
23990 +       u8 pad1;
23991 +       u8 snow_f8_acc_num;
23992 +       u8 snow_f9_acc_num;
23993 +       u8 crc_acc_num;
23994 +       u8 pad2;
23995 +       u8 pk_acc_num;
23996 +       u8 kasumi_acc_num;
23997 +       u8 rng_acc_num;
23998 +       u8 pad3;
23999 +       u8 md_acc_num;
24000 +       u8 arc4_acc_num;
24001 +       u8 des_acc_num;
24002 +       u8 aes_acc_num;
24003 +};
24004 +
24005 +struct dpseci_rsp_get_sec_counters {
24006 +       __le64 dequeued_requests;
24007 +       __le64 ob_enc_requests;
24008 +       __le64 ib_dec_requests;
24009 +       __le64 ob_enc_bytes;
24010 +       __le64 ob_prot_bytes;
24011 +       __le64 ib_dec_bytes;
24012 +       __le64 ib_valid_bytes;
24013 +};
24014 +
24015 +struct dpseci_rsp_get_api_version {
24016 +       __le16 major;
24017 +       __le16 minor;
24018 +};
24019 +
24020 +struct dpseci_cmd_opr {
24021 +       __le16 pad;
24022 +       u8 index;
24023 +       u8 options;
24024 +       u8 pad1[7];
24025 +       u8 oloe;
24026 +       u8 oeane;
24027 +       u8 olws;
24028 +       u8 oa;
24029 +       u8 oprrws;
24030 +};
24031 +
24032 +#define DPSECI_OPR_RIP_SHIFT           0
24033 +#define DPSECI_OPR_RIP_SIZE            1
24034 +#define DPSECI_OPR_ENABLE_SHIFT                1
24035 +#define DPSECI_OPR_ENABLE_SIZE         1
24036 +#define DPSECI_OPR_TSEQ_NLIS_SHIFT     1
24037 +#define DPSECI_OPR_TSEQ_NLIS_SIZE      1
24038 +#define DPSECI_OPR_HSEQ_NLIS_SHIFT     1
24039 +#define DPSECI_OPR_HSEQ_NLIS_SIZE      1
24040 +
24041 +struct dpseci_rsp_get_opr {
24042 +       __le64 pad;
24043 +       u8 rip_enable;
24044 +       u8 pad0[2];
24045 +       u8 oloe;
24046 +       u8 oeane;
24047 +       u8 olws;
24048 +       u8 oa;
24049 +       u8 oprrws;
24050 +       __le16 nesn;
24051 +       __le16 pad1;
24052 +       __le16 ndsn;
24053 +       __le16 pad2;
24054 +       __le16 ea_tseq;
24055 +       u8 tseq_nlis;
24056 +       u8 pad3;
24057 +       __le16 ea_hseq;
24058 +       u8 hseq_nlis;
24059 +       u8 pad4;
24060 +       __le16 ea_hptr;
24061 +       __le16 pad5;
24062 +       __le16 ea_tptr;
24063 +       __le16 pad6;
24064 +       __le16 opr_vid;
24065 +       __le16 pad7;
24066 +       __le16 opr_id;
24067 +};
24068 +
24069 +#define DPSECI_CGN_DEST_TYPE_SHIFT     0
24070 +#define DPSECI_CGN_DEST_TYPE_SIZE      4
24071 +#define DPSECI_CGN_UNITS_SHIFT         4
24072 +#define DPSECI_CGN_UNITS_SIZE          2
24073 +
24074 +struct dpseci_cmd_congestion_notification {
24075 +       __le32 dest_id;
24076 +       __le16 notification_mode;
24077 +       u8 priority;
24078 +       u8 options;
24079 +       __le64 message_iova;
24080 +       __le64 message_ctx;
24081 +       __le32 threshold_entry;
24082 +       __le32 threshold_exit;
24083 +};
24084 +
24085 +#endif /* _DPSECI_CMD_H_ */
24086 --- a/drivers/crypto/caam/error.c
24087 +++ b/drivers/crypto/caam/error.c
24088 @@ -6,11 +6,54 @@
24089  
24090  #include "compat.h"
24091  #include "regs.h"
24092 -#include "intern.h"
24093  #include "desc.h"
24094 -#include "jr.h"
24095  #include "error.h"
24096  
24097 +#ifdef DEBUG
24098 +
24099 +#include <linux/highmem.h>
24100 +
24101 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
24102 +                 int rowsize, int groupsize, struct scatterlist *sg,
24103 +                 size_t tlen, bool ascii)
24104 +{
24105 +       struct scatterlist *it;
24106 +       void *it_page;
24107 +       size_t len;
24108 +       void *buf;
24109 +
24110 +       for (it = sg; it && tlen > 0 ; it = sg_next(sg)) {
24111 +               /*
24112 +                * make sure the scatterlist's page
24113 +                * has a valid virtual memory mapping
24114 +                */
24115 +               it_page = kmap_atomic(sg_page(it));
24116 +               if (unlikely(!it_page)) {
24117 +                       pr_err("caam_dump_sg: kmap failed\n");
24118 +                       return;
24119 +               }
24120 +
24121 +               buf = it_page + it->offset;
24122 +               len = min_t(size_t, tlen, it->length);
24123 +               print_hex_dump(level, prefix_str, prefix_type, rowsize,
24124 +                              groupsize, buf, len, ascii);
24125 +               tlen -= len;
24126 +
24127 +               kunmap_atomic(it_page);
24128 +       }
24129 +}
24130 +
24131 +#else
24132 +
24133 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
24134 +                 int rowsize, int groupsize, struct scatterlist *sg,
24135 +                 size_t tlen, bool ascii)
24136 +{}
24137 +
24138 +#endif
24139 +
24140 +EXPORT_SYMBOL(caam_dump_sg);
24141 +
24142  static const struct {
24143         u8 value;
24144         const char *error_text;
24145 @@ -69,6 +112,54 @@ static const struct {
24146         { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
24147  };
24148  
24149 +static const struct {
24150 +       u8 value;
24151 +       const char *error_text;
24152 +} qi_error_list[] = {
24153 +       { 0x1F, "Job terminated by FQ or ICID flush" },
24154 +       { 0x20, "FD format error"},
24155 +       { 0x21, "FD command format error"},
24156 +       { 0x23, "FL format error"},
24157 +       { 0x25, "CRJD specified in FD, but not enabled in FLC"},
24158 +       { 0x30, "Max. buffer size too small"},
24159 +       { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"},
24160 +       { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"},
24161 +       { 0x33, "Size over/underflow (allocate mode)"},
24162 +       { 0x34, "Size over/underflow (reuse mode)"},
24163 +       { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"},
24164 +       { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"},
24165 +       { 0x41, "SBC frame format not supported (allocate mode)"},
24166 +       { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"},
24167 +       { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"},
24168 +       { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"},
24169 +       { 0x45, "DHR correction underflow (reuse mode, single buffer format)"},
24170 +       { 0x46, "Annotation length exceeds offset (reuse mode)"},
24171 +       { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
24172 +       { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
24173 +       { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
24174 +       { 0x51, "Unsupported IF reuse mode"},
24175 +       { 0x52, "Unsupported FL use mode"},
24176 +       { 0x53, "Unsupported RJD use mode"},
24177 +       { 0x54, "Unsupported inline descriptor use mode"},
24178 +       { 0xC0, "Table buffer pool 0 depletion"},
24179 +       { 0xC1, "Table buffer pool 1 depletion"},
24180 +       { 0xC2, "Data buffer pool 0 depletion, no OF allocated"},
24181 +       { 0xC3, "Data buffer pool 1 depletion, no OF allocated"},
24182 +       { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"},
24183 +       { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"},
24184 +       { 0xD0, "FLC read error"},
24185 +       { 0xD1, "FL read error"},
24186 +       { 0xD2, "FL write error"},
24187 +       { 0xD3, "OF SGT write error"},
24188 +       { 0xD4, "PTA read error"},
24189 +       { 0xD5, "PTA write error"},
24190 +       { 0xD6, "OF SGT F-bit write error"},
24191 +       { 0xD7, "ASA write error"},
24192 +       { 0xE1, "FLC[ICR]=0 ICID error"},
24193 +       { 0xE2, "FLC[ICR]=1 ICID error"},
24194 +       { 0xE4, "source of ICID flush not trusted (BDI = 0)"},
24195 +};
24196 +
24197  static const char * const cha_id_list[] = {
24198         "",
24199         "AES",
24200 @@ -146,10 +237,9 @@ static void report_ccb_status(struct dev
24201             strlen(rng_err_id_list[err_id])) {
24202                 /* RNG-only error */
24203                 err_str = rng_err_id_list[err_id];
24204 -       } else if (err_id < ARRAY_SIZE(err_id_list))
24205 +       } else {
24206                 err_str = err_id_list[err_id];
24207 -       else
24208 -               snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
24209 +       }
24210  
24211         /*
24212          * CCB ICV check failures are part of normal operation life;
24213 @@ -198,6 +288,27 @@ static void report_deco_status(struct de
24214                 status, error, idx_str, idx, err_str, err_err_code);
24215  }
24216  
24217 +static void report_qi_status(struct device *qidev, const u32 status,
24218 +                            const char *error)
24219 +{
24220 +       u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
24221 +       const char *err_str = "unidentified error value 0x";
24222 +       char err_err_code[3] = { 0 };
24223 +       int i;
24224 +
24225 +       for (i = 0; i < ARRAY_SIZE(qi_error_list); i++)
24226 +               if (qi_error_list[i].value == err_id)
24227 +                       break;
24228 +
24229 +       if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text)
24230 +               err_str = qi_error_list[i].error_text;
24231 +       else
24232 +               snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
24233 +
24234 +       dev_err(qidev, "%08x: %s: %s%s\n",
24235 +               status, error, err_str, err_err_code);
24236 +}
24237 +
24238  static void report_jr_status(struct device *jrdev, const u32 status,
24239                              const char *error)
24240  {
24241 @@ -212,7 +323,7 @@ static void report_cond_code_status(stru
24242                 status, error, __func__);
24243  }
24244  
24245 -void caam_jr_strstatus(struct device *jrdev, u32 status)
24246 +void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
24247  {
24248         static const struct stat_src {
24249                 void (*report_ssed)(struct device *jrdev, const u32 status,
24250 @@ -224,7 +335,7 @@ void caam_jr_strstatus(struct device *jr
24251                 { report_ccb_status, "CCB" },
24252                 { report_jump_status, "Jump" },
24253                 { report_deco_status, "DECO" },
24254 -               { NULL, "Queue Manager Interface" },
24255 +               { report_qi_status, "Queue Manager Interface" },
24256                 { report_jr_status, "Job Ring" },
24257                 { report_cond_code_status, "Condition Code" },
24258                 { NULL, NULL },
24259 @@ -250,4 +361,4 @@ void caam_jr_strstatus(struct device *jr
24260         else
24261                 dev_err(jrdev, "%d: unknown error source\n", ssrc);
24262  }
24263 -EXPORT_SYMBOL(caam_jr_strstatus);
24264 +EXPORT_SYMBOL(caam_strstatus);
24265 --- a/drivers/crypto/caam/error.h
24266 +++ b/drivers/crypto/caam/error.h
24267 @@ -7,5 +7,13 @@
24268  #ifndef CAAM_ERROR_H
24269  #define CAAM_ERROR_H
24270  #define CAAM_ERROR_STR_MAX 302
24271 -void caam_jr_strstatus(struct device *jrdev, u32 status);
24272 +
24273 +void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
24274 +
24275 +#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
24276 +#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
24277 +
24278 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
24279 +                 int rowsize, int groupsize, struct scatterlist *sg,
24280 +                 size_t tlen, bool ascii);
24281  #endif /* CAAM_ERROR_H */
24282 --- a/drivers/crypto/caam/intern.h
24283 +++ b/drivers/crypto/caam/intern.h
24284 @@ -64,10 +64,9 @@ struct caam_drv_private_jr {
24285   * Driver-private storage for a single CAAM block instance
24286   */
24287  struct caam_drv_private {
24288 -
24289 -       struct device *dev;
24290 -       struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
24291 -       struct platform_device *pdev;
24292 +#ifdef CONFIG_CAAM_QI
24293 +       struct device *qidev;
24294 +#endif
24295  
24296         /* Physical-presence section */
24297         struct caam_ctrl __iomem *ctrl; /* controller region */
24298 @@ -103,11 +102,6 @@ struct caam_drv_private {
24299  #ifdef CONFIG_DEBUG_FS
24300         struct dentry *dfs_root;
24301         struct dentry *ctl; /* controller dir */
24302 -       struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req;
24303 -       struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes;
24304 -       struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes;
24305 -       struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus;
24306 -
24307         struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
24308         struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
24309  #endif
24310 @@ -115,4 +109,22 @@ struct caam_drv_private {
24311  
24312  void caam_jr_algapi_init(struct device *dev);
24313  void caam_jr_algapi_remove(struct device *dev);
24314 +
24315 +#ifdef CONFIG_DEBUG_FS
24316 +static int caam_debugfs_u64_get(void *data, u64 *val)
24317 +{
24318 +       *val = caam64_to_cpu(*(u64 *)data);
24319 +       return 0;
24320 +}
24321 +
24322 +static int caam_debugfs_u32_get(void *data, u64 *val)
24323 +{
24324 +       *val = caam32_to_cpu(*(u32 *)data);
24325 +       return 0;
24326 +}
24327 +
24328 +DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
24329 +DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
24330 +#endif
24331 +
24332  #endif /* INTERN_H */
24333 --- a/drivers/crypto/caam/jr.c
24334 +++ b/drivers/crypto/caam/jr.c
24335 @@ -9,6 +9,7 @@
24336  #include <linux/of_address.h>
24337  
24338  #include "compat.h"
24339 +#include "ctrl.h"
24340  #include "regs.h"
24341  #include "jr.h"
24342  #include "desc.h"
24343 @@ -22,6 +23,14 @@ struct jr_driver_data {
24344  
24345  static struct jr_driver_data driver_data;
24346  
24347 +static int jr_driver_probed;
24348 +
24349 +int caam_jr_driver_probed(void)
24350 +{
24351 +       return jr_driver_probed;
24352 +}
24353 +EXPORT_SYMBOL(caam_jr_driver_probed);
24354 +
24355  static int caam_reset_hw_jr(struct device *dev)
24356  {
24357         struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
24358 @@ -118,6 +127,8 @@ static int caam_jr_remove(struct platfor
24359                 dev_err(jrdev, "Failed to shut down job ring\n");
24360         irq_dispose_mapping(jrpriv->irq);
24361  
24362 +       jr_driver_probed--;
24363 +
24364         return ret;
24365  }
24366  
24367 @@ -280,6 +291,36 @@ struct device *caam_jr_alloc(void)
24368  EXPORT_SYMBOL(caam_jr_alloc);
24369  
24370  /**
24371 + * caam_jridx_alloc() - Alloc a specific job ring based on its index.
24372 + *
24373 + * returns :  pointer to the newly allocated physical
24374 + *           JobR dev can be written to if successful.
24375 + **/
24376 +struct device *caam_jridx_alloc(int idx)
24377 +{
24378 +       struct caam_drv_private_jr *jrpriv;
24379 +       struct device *dev = ERR_PTR(-ENODEV);
24380 +
24381 +       spin_lock(&driver_data.jr_alloc_lock);
24382 +
24383 +       if (list_empty(&driver_data.jr_list))
24384 +               goto end;
24385 +
24386 +       list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
24387 +               if (jrpriv->ridx == idx) {
24388 +                       atomic_inc(&jrpriv->tfm_count);
24389 +                       dev = jrpriv->dev;
24390 +                       break;
24391 +               }
24392 +       }
24393 +
24394 +end:
24395 +       spin_unlock(&driver_data.jr_alloc_lock);
24396 +       return dev;
24397 +}
24398 +EXPORT_SYMBOL(caam_jridx_alloc);
24399 +
24400 +/**
24401   * caam_jr_free() - Free the Job Ring
24402   * @rdev     - points to the dev that identifies the Job ring to
24403   *             be released.
24404 @@ -496,15 +537,28 @@ static int caam_jr_probe(struct platform
24405                 return -ENOMEM;
24406         }
24407  
24408 -       jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
24409 +       jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
24410  
24411 -       if (sizeof(dma_addr_t) == sizeof(u64))
24412 -               if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
24413 -                       dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40));
24414 +       if (sizeof(dma_addr_t) == sizeof(u64)) {
24415 +               if (caam_dpaa2)
24416 +                       error = dma_set_mask_and_coherent(jrdev,
24417 +                                                         DMA_BIT_MASK(49));
24418 +               else if (of_device_is_compatible(nprop,
24419 +                                                "fsl,sec-v5.0-job-ring"))
24420 +                       error = dma_set_mask_and_coherent(jrdev,
24421 +                                                         DMA_BIT_MASK(40));
24422                 else
24423 -                       dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36));
24424 -       else
24425 -               dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
24426 +                       error = dma_set_mask_and_coherent(jrdev,
24427 +                                                         DMA_BIT_MASK(36));
24428 +       } else {
24429 +               error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
24430 +       }
24431 +       if (error) {
24432 +               dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n",
24433 +                       error);
24434 +               iounmap(ctrl);
24435 +               return error;
24436 +       }
24437  
24438         /* Identify the interrupt */
24439         jrpriv->irq = irq_of_parse_and_map(nprop, 0);
24440 @@ -524,10 +578,12 @@ static int caam_jr_probe(struct platform
24441  
24442         atomic_set(&jrpriv->tfm_count, 0);
24443  
24444 +       jr_driver_probed++;
24445 +
24446         return 0;
24447  }
24448  
24449 -static struct of_device_id caam_jr_match[] = {
24450 +static const struct of_device_id caam_jr_match[] = {
24451         {
24452                 .compatible = "fsl,sec-v4.0-job-ring",
24453         },
24454 --- a/drivers/crypto/caam/jr.h
24455 +++ b/drivers/crypto/caam/jr.h
24456 @@ -8,7 +8,9 @@
24457  #define JR_H
24458  
24459  /* Prototypes for backend-level services exposed to APIs */
24460 +int caam_jr_driver_probed(void);
24461  struct device *caam_jr_alloc(void);
24462 +struct device *caam_jridx_alloc(int idx);
24463  void caam_jr_free(struct device *rdev);
24464  int caam_jr_enqueue(struct device *dev, u32 *desc,
24465                     void (*cbk)(struct device *dev, u32 *desc, u32 status,
24466 --- a/drivers/crypto/caam/key_gen.c
24467 +++ b/drivers/crypto/caam/key_gen.c
24468 @@ -41,15 +41,29 @@ Split key generation--------------------
24469  [06] 0x64260028    fifostr: class2 mdsplit-jdk len=40
24470                         @0xffe04000
24471  */
24472 -int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
24473 -                 int split_key_pad_len, const u8 *key_in, u32 keylen,
24474 -                 u32 alg_op)
24475 +int gen_split_key(struct device *jrdev, u8 *key_out,
24476 +                 struct alginfo * const adata, const u8 *key_in, u32 keylen,
24477 +                 int max_keylen)
24478  {
24479         u32 *desc;
24480         struct split_key_result result;
24481         dma_addr_t dma_addr_in, dma_addr_out;
24482         int ret = -ENOMEM;
24483  
24484 +       adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK);
24485 +       adata->keylen_pad = split_key_pad_len(adata->algtype &
24486 +                                             OP_ALG_ALGSEL_MASK);
24487 +
24488 +#ifdef DEBUG
24489 +       dev_err(jrdev, "split keylen %d split keylen padded %d\n",
24490 +               adata->keylen, adata->keylen_pad);
24491 +       print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
24492 +                      DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
24493 +#endif
24494 +
24495 +       if (adata->keylen_pad > max_keylen)
24496 +               return -EINVAL;
24497 +
24498         desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
24499         if (!desc) {
24500                 dev_err(jrdev, "unable to allocate key input memory\n");
24501 @@ -63,7 +77,7 @@ int gen_split_key(struct device *jrdev,
24502                 goto out_free;
24503         }
24504  
24505 -       dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
24506 +       dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad,
24507                                       DMA_FROM_DEVICE);
24508         if (dma_mapping_error(jrdev, dma_addr_out)) {
24509                 dev_err(jrdev, "unable to map key output memory\n");
24510 @@ -74,7 +88,9 @@ int gen_split_key(struct device *jrdev,
24511         append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
24512  
24513         /* Sets MDHA up into an HMAC-INIT */
24514 -       append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT);
24515 +       append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
24516 +                        OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
24517 +                        OP_ALG_AS_INIT);
24518  
24519         /*
24520          * do a FIFO_LOAD of zero, this will trigger the internal key expansion
24521 @@ -87,7 +103,7 @@ int gen_split_key(struct device *jrdev,
24522          * FIFO_STORE with the explicit split-key content store
24523          * (0x26 output type)
24524          */
24525 -       append_fifo_store(desc, dma_addr_out, split_key_len,
24526 +       append_fifo_store(desc, dma_addr_out, adata->keylen,
24527                           LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
24528  
24529  #ifdef DEBUG
24530 @@ -108,11 +124,11 @@ int gen_split_key(struct device *jrdev,
24531  #ifdef DEBUG
24532                 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
24533                                DUMP_PREFIX_ADDRESS, 16, 4, key_out,
24534 -                              split_key_pad_len, 1);
24535 +                              adata->keylen_pad, 1);
24536  #endif
24537         }
24538  
24539 -       dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
24540 +       dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad,
24541                          DMA_FROM_DEVICE);
24542  out_unmap_in:
24543         dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
24544 --- a/drivers/crypto/caam/key_gen.h
24545 +++ b/drivers/crypto/caam/key_gen.h
24546 @@ -5,6 +5,36 @@
24547   *
24548   */
24549  
24550 +/**
24551 + * split_key_len - Compute MDHA split key length for a given algorithm
24552 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
24553 + *        SHA224, SHA384, SHA512.
24554 + *
24555 + * Return: MDHA split key length
24556 + */
24557 +static inline u32 split_key_len(u32 hash)
24558 +{
24559 +       /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
24560 +       static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
24561 +       u32 idx;
24562 +
24563 +       idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
24564 +
24565 +       return (u32)(mdpadlen[idx] * 2);
24566 +}
24567 +
24568 +/**
24569 + * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
24570 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
24571 + *        SHA224, SHA384, SHA512.
24572 + *
24573 + * Return: MDHA split key pad length
24574 + */
24575 +static inline u32 split_key_pad_len(u32 hash)
24576 +{
24577 +       return ALIGN(split_key_len(hash), 16);
24578 +}
24579 +
24580  struct split_key_result {
24581         struct completion completion;
24582         int err;
24583 @@ -12,6 +42,6 @@ struct split_key_result {
24584  
24585  void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
24586  
24587 -int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
24588 -                   int split_key_pad_len, const u8 *key_in, u32 keylen,
24589 -                   u32 alg_op);
24590 +int gen_split_key(struct device *jrdev, u8 *key_out,
24591 +                 struct alginfo * const adata, const u8 *key_in, u32 keylen,
24592 +                 int max_keylen);
24593 --- a/drivers/crypto/caam/pdb.h
24594 +++ b/drivers/crypto/caam/pdb.h
24595 @@ -483,6 +483,8 @@ struct dsa_verify_pdb {
24596  #define RSA_PDB_E_MASK          (0xFFF << RSA_PDB_E_SHIFT)
24597  #define RSA_PDB_D_SHIFT         12
24598  #define RSA_PDB_D_MASK          (0xFFF << RSA_PDB_D_SHIFT)
24599 +#define RSA_PDB_Q_SHIFT         12
24600 +#define RSA_PDB_Q_MASK          (0xFFF << RSA_PDB_Q_SHIFT)
24601  
24602  #define RSA_PDB_SGF_F           (0x8 << RSA_PDB_SGF_SHIFT)
24603  #define RSA_PDB_SGF_G           (0x4 << RSA_PDB_SGF_SHIFT)
24604 @@ -490,6 +492,8 @@ struct dsa_verify_pdb {
24605  #define RSA_PRIV_PDB_SGF_G      (0x8 << RSA_PDB_SGF_SHIFT)
24606  
24607  #define RSA_PRIV_KEY_FRM_1      0
24608 +#define RSA_PRIV_KEY_FRM_2      1
24609 +#define RSA_PRIV_KEY_FRM_3      2
24610  
24611  /**
24612   * RSA Encrypt Protocol Data Block
24613 @@ -525,4 +529,62 @@ struct rsa_priv_f1_pdb {
24614         dma_addr_t      d_dma;
24615  } __packed;
24616  
24617 +/**
24618 + * RSA Decrypt PDB - Private Key Form #2
24619 + * @sgf     : scatter-gather field
24620 + * @g_dma   : dma address of encrypted input data
24621 + * @f_dma   : dma address of output data
24622 + * @d_dma   : dma address of RSA private exponent
24623 + * @p_dma   : dma address of RSA prime factor p of RSA modulus n
24624 + * @q_dma   : dma address of RSA prime factor q of RSA modulus n
24625 + * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
24626 + *            as internal state buffer. It is assumed to be as long as p.
24627 + * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
24628 + *            as internal state buffer. It is assumed to be as long as q.
24629 + * @p_q_len : length in bytes of first two prime factors of the RSA modulus n
24630 + */
24631 +struct rsa_priv_f2_pdb {
24632 +       u32             sgf;
24633 +       dma_addr_t      g_dma;
24634 +       dma_addr_t      f_dma;
24635 +       dma_addr_t      d_dma;
24636 +       dma_addr_t      p_dma;
24637 +       dma_addr_t      q_dma;
24638 +       dma_addr_t      tmp1_dma;
24639 +       dma_addr_t      tmp2_dma;
24640 +       u32             p_q_len;
24641 +} __packed;
24642 +
24643 +/**
24644 + * RSA Decrypt PDB - Private Key Form #3
24645 + * This is the RSA Chinese Reminder Theorem (CRT) form for two prime factors of
24646 + * the RSA modulus.
24647 + * @sgf     : scatter-gather field
24648 + * @g_dma   : dma address of encrypted input data
24649 + * @f_dma   : dma address of output data
24650 + * @c_dma   : dma address of RSA CRT coefficient
24651 + * @p_dma   : dma address of RSA prime factor p of RSA modulus n
24652 + * @q_dma   : dma address of RSA prime factor q of RSA modulus n
24653 + * @dp_dma  : dma address of RSA CRT exponent of RSA prime factor p
24654 + * @dp_dma  : dma address of RSA CRT exponent of RSA prime factor q
24655 + * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
24656 + *            as internal state buffer. It is assumed to be as long as p.
24657 + * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
24658 + *            as internal state buffer. It is assumed to be as long as q.
24659 + * @p_q_len : length in bytes of first two prime factors of the RSA modulus n
24660 + */
24661 +struct rsa_priv_f3_pdb {
24662 +       u32             sgf;
24663 +       dma_addr_t      g_dma;
24664 +       dma_addr_t      f_dma;
24665 +       dma_addr_t      c_dma;
24666 +       dma_addr_t      p_dma;
24667 +       dma_addr_t      q_dma;
24668 +       dma_addr_t      dp_dma;
24669 +       dma_addr_t      dq_dma;
24670 +       dma_addr_t      tmp1_dma;
24671 +       dma_addr_t      tmp2_dma;
24672 +       u32             p_q_len;
24673 +} __packed;
24674 +
24675  #endif
24676 --- a/drivers/crypto/caam/pkc_desc.c
24677 +++ b/drivers/crypto/caam/pkc_desc.c
24678 @@ -34,3 +34,39 @@ void init_rsa_priv_f1_desc(u32 *desc, st
24679         append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
24680                          RSA_PRIV_KEY_FRM_1);
24681  }
24682 +
24683 +/* Descriptor for RSA Private operation - Private Key Form #2 */
24684 +void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb)
24685 +{
24686 +       init_job_desc_pdb(desc, 0, sizeof(*pdb));
24687 +       append_cmd(desc, pdb->sgf);
24688 +       append_ptr(desc, pdb->g_dma);
24689 +       append_ptr(desc, pdb->f_dma);
24690 +       append_ptr(desc, pdb->d_dma);
24691 +       append_ptr(desc, pdb->p_dma);
24692 +       append_ptr(desc, pdb->q_dma);
24693 +       append_ptr(desc, pdb->tmp1_dma);
24694 +       append_ptr(desc, pdb->tmp2_dma);
24695 +       append_cmd(desc, pdb->p_q_len);
24696 +       append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
24697 +                        RSA_PRIV_KEY_FRM_2);
24698 +}
24699 +
24700 +/* Descriptor for RSA Private operation - Private Key Form #3 */
24701 +void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb)
24702 +{
24703 +       init_job_desc_pdb(desc, 0, sizeof(*pdb));
24704 +       append_cmd(desc, pdb->sgf);
24705 +       append_ptr(desc, pdb->g_dma);
24706 +       append_ptr(desc, pdb->f_dma);
24707 +       append_ptr(desc, pdb->c_dma);
24708 +       append_ptr(desc, pdb->p_dma);
24709 +       append_ptr(desc, pdb->q_dma);
24710 +       append_ptr(desc, pdb->dp_dma);
24711 +       append_ptr(desc, pdb->dq_dma);
24712 +       append_ptr(desc, pdb->tmp1_dma);
24713 +       append_ptr(desc, pdb->tmp2_dma);
24714 +       append_cmd(desc, pdb->p_q_len);
24715 +       append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
24716 +                        RSA_PRIV_KEY_FRM_3);
24717 +}
24718 --- /dev/null
24719 +++ b/drivers/crypto/caam/qi.c
24720 @@ -0,0 +1,797 @@
24721 +/*
24722 + * CAAM/SEC 4.x QI transport/backend driver
24723 + * Queue Interface backend functionality
24724 + *
24725 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
24726 + * Copyright 2016-2017 NXP
24727 + */
24728 +
24729 +#include <linux/cpumask.h>
24730 +#include <linux/kthread.h>
24731 +#include <linux/fsl_qman.h>
24732 +
24733 +#include "regs.h"
24734 +#include "qi.h"
24735 +#include "desc.h"
24736 +#include "intern.h"
24737 +#include "desc_constr.h"
24738 +
24739 +#define PREHDR_RSLS_SHIFT      31
24740 +
24741 +/*
24742 + * Use a reasonable backlog of frames (per CPU) as congestion threshold,
24743 + * so that resources used by the in-flight buffers do not become a memory hog.
24744 + */
24745 +#define MAX_RSP_FQ_BACKLOG_PER_CPU     256
24746 +
24747 +#define CAAM_QI_ENQUEUE_RETRIES        10000
24748 +
24749 +#define CAAM_NAPI_WEIGHT       63
24750 +
24751 +/*
24752 + * caam_napi - struct holding CAAM NAPI-related params
24753 + * @irqtask: IRQ task for QI backend
24754 + * @p: QMan portal
24755 + */
24756 +struct caam_napi {
24757 +       struct napi_struct irqtask;
24758 +       struct qman_portal *p;
24759 +};
24760 +
24761 +/*
24762 + * caam_qi_pcpu_priv - percpu private data structure to main list of pending
24763 + *                     responses expected on each cpu.
24764 + * @caam_napi: CAAM NAPI params
24765 + * @net_dev: netdev used by NAPI
24766 + * @rsp_fq: response FQ from CAAM
24767 + */
24768 +struct caam_qi_pcpu_priv {
24769 +       struct caam_napi caam_napi;
24770 +       struct net_device net_dev;
24771 +       struct qman_fq *rsp_fq;
24772 +} ____cacheline_aligned;
24773 +
24774 +static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
24775 +static DEFINE_PER_CPU(int, last_cpu);
24776 +
24777 +/*
24778 + * caam_qi_priv - CAAM QI backend private params
24779 + * @cgr: QMan congestion group
24780 + * @qi_pdev: platform device for QI backend
24781 + */
24782 +struct caam_qi_priv {
24783 +       struct qman_cgr cgr;
24784 +       struct platform_device *qi_pdev;
24785 +};
24786 +
24787 +static struct caam_qi_priv qipriv ____cacheline_aligned;
24788 +
24789 +/*
24790 + * This is written by only one core - the one that initialized the CGR - and
24791 + * read by multiple cores (all the others).
24792 + */
24793 +bool caam_congested __read_mostly;
24794 +EXPORT_SYMBOL(caam_congested);
24795 +
24796 +#ifdef CONFIG_DEBUG_FS
24797 +/*
24798 + * This is a counter for the number of times the congestion group (where all
24799 + * the request and response queueus are) reached congestion. Incremented
24800 + * each time the congestion callback is called with congested == true.
24801 + */
24802 +static u64 times_congested;
24803 +#endif
24804 +
24805 +/*
24806 + * CPU from where the module initialised. This is required because QMan driver
24807 + * requires CGRs to be removed from same CPU from where they were originally
24808 + * allocated.
24809 + */
24810 +static int mod_init_cpu;
24811 +
24812 +/*
24813 + * This is a a cache of buffers, from which the users of CAAM QI driver
24814 + * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
24815 + * doing malloc on the hotpath.
24816 + * NOTE: A more elegant solution would be to have some headroom in the frames
24817 + *       being processed. This could be added by the dpaa-ethernet driver.
24818 + *       This would pose a problem for userspace application processing which
24819 + *       cannot know of this limitation. So for now, this will work.
24820 + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
24821 + */
24822 +static struct kmem_cache *qi_cache;
24823 +
24824 +int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
24825 +{
24826 +       struct qm_fd fd;
24827 +       int ret;
24828 +       int num_retries = 0;
24829 +
24830 +       fd.cmd = 0;
24831 +       fd.format = qm_fd_compound;
24832 +       fd.cong_weight = req->fd_sgt[1].length;
24833 +       fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
24834 +                             DMA_BIDIRECTIONAL);
24835 +       if (dma_mapping_error(qidev, fd.addr)) {
24836 +               dev_err(qidev, "DMA mapping error for QI enqueue request\n");
24837 +               return -EIO;
24838 +       }
24839 +
24840 +       do {
24841 +               ret = qman_enqueue(req->drv_ctx->req_fq, &fd, 0);
24842 +               if (likely(!ret))
24843 +                       return 0;
24844 +
24845 +               if (ret != -EBUSY)
24846 +                       break;
24847 +               num_retries++;
24848 +       } while (num_retries < CAAM_QI_ENQUEUE_RETRIES);
24849 +
24850 +       dev_err(qidev, "qman_enqueue failed: %d\n", ret);
24851 +
24852 +       return ret;
24853 +}
24854 +EXPORT_SYMBOL(caam_qi_enqueue);
24855 +
24856 +static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
24857 +                          const struct qm_mr_entry *msg)
24858 +{
24859 +       const struct qm_fd *fd;
24860 +       struct caam_drv_req *drv_req;
24861 +       struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
24862 +
24863 +       fd = &msg->ern.fd;
24864 +
24865 +       if (fd->format != qm_fd_compound) {
24866 +               dev_err(qidev, "Non-compound FD from CAAM\n");
24867 +               return;
24868 +       }
24869 +
24870 +       drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
24871 +       if (!drv_req) {
24872 +               dev_err(qidev,
24873 +                       "Can't find original request for CAAM response\n");
24874 +               return;
24875 +       }
24876 +
24877 +       dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
24878 +                        sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
24879 +
24880 +       drv_req->cbk(drv_req, -EIO);
24881 +}
24882 +
24883 +static struct qman_fq *create_caam_req_fq(struct device *qidev,
24884 +                                         struct qman_fq *rsp_fq,
24885 +                                         dma_addr_t hwdesc,
24886 +                                         int fq_sched_flag)
24887 +{
24888 +       int ret;
24889 +       struct qman_fq *req_fq;
24890 +       struct qm_mcc_initfq opts;
24891 +
24892 +       req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC);
24893 +       if (!req_fq)
24894 +               return ERR_PTR(-ENOMEM);
24895 +
24896 +       req_fq->cb.ern = caam_fq_ern_cb;
24897 +       req_fq->cb.fqs = NULL;
24898 +
24899 +       ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
24900 +                               QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_LOCKED,
24901 +                            req_fq);
24902 +       if (ret) {
24903 +               dev_err(qidev, "Failed to create session req FQ\n");
24904 +               goto create_req_fq_fail;
24905 +       }
24906 +
24907 +       opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
24908 +                      QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
24909 +                      QM_INITFQ_WE_CGID;
24910 +       opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE;
24911 +       opts.fqd.dest.channel = qm_channel_caam;
24912 +       opts.fqd.dest.wq = 2;
24913 +       opts.fqd.context_b = qman_fq_fqid(rsp_fq);
24914 +       opts.fqd.context_a.hi = upper_32_bits(hwdesc);
24915 +       opts.fqd.context_a.lo = lower_32_bits(hwdesc);
24916 +       opts.fqd.cgid = qipriv.cgr.cgrid;
24917 +
24918 +       ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
24919 +       if (ret) {
24920 +               dev_err(qidev, "Failed to init session req FQ\n");
24921 +               goto init_req_fq_fail;
24922 +       }
24923 +
24924 +       dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
24925 +               smp_processor_id());
24926 +       return req_fq;
24927 +
24928 +init_req_fq_fail:
24929 +       qman_destroy_fq(req_fq, 0);
24930 +create_req_fq_fail:
24931 +       kfree(req_fq);
24932 +       return ERR_PTR(ret);
24933 +}
24934 +
24935 +static int empty_retired_fq(struct device *qidev, struct qman_fq *fq)
24936 +{
24937 +       int ret;
24938 +
24939 +       ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT |
24940 +                                   QMAN_VOLATILE_FLAG_FINISH,
24941 +                                   QM_VDQCR_PRECEDENCE_VDQCR |
24942 +                                   QM_VDQCR_NUMFRAMES_TILLEMPTY);
24943 +       if (ret) {
24944 +               dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid);
24945 +               return ret;
24946 +       }
24947 +
24948 +       do {
24949 +               struct qman_portal *p;
24950 +
24951 +               p = qman_get_affine_portal(smp_processor_id());
24952 +               qman_p_poll_dqrr(p, 16);
24953 +       } while (fq->flags & QMAN_FQ_STATE_NE);
24954 +
24955 +       return 0;
24956 +}
24957 +
24958 +static int kill_fq(struct device *qidev, struct qman_fq *fq)
24959 +{
24960 +       u32 flags;
24961 +       int ret;
24962 +
24963 +       ret = qman_retire_fq(fq, &flags);
24964 +       if (ret < 0) {
24965 +               dev_err(qidev, "qman_retire_fq failed: %d\n", ret);
24966 +               return ret;
24967 +       }
24968 +
24969 +       if (!ret)
24970 +               goto empty_fq;
24971 +
24972 +       /* Async FQ retirement condition */
24973 +       if (ret == 1) {
24974 +               /* Retry till FQ gets in retired state */
24975 +               do {
24976 +                       msleep(20);
24977 +               } while (fq->state != qman_fq_state_retired);
24978 +
24979 +               WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS);
24980 +               WARN_ON(fq->flags & QMAN_FQ_STATE_ORL);
24981 +       }
24982 +
24983 +empty_fq:
24984 +       if (fq->flags & QMAN_FQ_STATE_NE) {
24985 +               ret = empty_retired_fq(qidev, fq);
24986 +               if (ret) {
24987 +                       dev_err(qidev, "empty_retired_fq fail for FQ: %u\n",
24988 +                               fq->fqid);
24989 +                       return ret;
24990 +               }
24991 +       }
24992 +
24993 +       ret = qman_oos_fq(fq);
24994 +       if (ret)
24995 +               dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
24996 +
24997 +       qman_destroy_fq(fq, 0);
24998 +       kfree(fq);
24999 +
25000 +       return ret;
25001 +}
25002 +
25003 +static int empty_caam_fq(struct qman_fq *fq)
25004 +{
25005 +       int ret;
25006 +       struct qm_mcr_queryfq_np np;
25007 +
25008 +       /* Wait till the older CAAM FQ get empty */
25009 +       do {
25010 +               ret = qman_query_fq_np(fq, &np);
25011 +               if (ret)
25012 +                       return ret;
25013 +
25014 +               if (!np.frm_cnt)
25015 +                       break;
25016 +
25017 +               msleep(20);
25018 +       } while (1);
25019 +
25020 +       /*
25021 +        * Give extra time for pending jobs from this FQ in holding tanks
25022 +        * to get processed
25023 +        */
25024 +       msleep(20);
25025 +       return 0;
25026 +}
25027 +
25028 +int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
25029 +{
25030 +       int ret;
25031 +       u32 num_words;
25032 +       struct qman_fq *new_fq, *old_fq;
25033 +       struct device *qidev = drv_ctx->qidev;
25034 +
25035 +       num_words = desc_len(sh_desc);
25036 +       if (num_words > MAX_SDLEN) {
25037 +               dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
25038 +               return -EINVAL;
25039 +       }
25040 +
25041 +       /* Note down older req FQ */
25042 +       old_fq = drv_ctx->req_fq;
25043 +
25044 +       /* Create a new req FQ in parked state */
25045 +       new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
25046 +                                   drv_ctx->context_a, 0);
25047 +       if (unlikely(IS_ERR_OR_NULL(new_fq))) {
25048 +               dev_err(qidev, "FQ allocation for shdesc update failed\n");
25049 +               return PTR_ERR(new_fq);
25050 +       }
25051 +
25052 +       /* Hook up new FQ to context so that new requests keep queuing */
25053 +       drv_ctx->req_fq = new_fq;
25054 +
25055 +       /* Empty and remove the older FQ */
25056 +       ret = empty_caam_fq(old_fq);
25057 +       if (ret) {
25058 +               dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
25059 +
25060 +               /* We can revert to older FQ */
25061 +               drv_ctx->req_fq = old_fq;
25062 +
25063 +               if (kill_fq(qidev, new_fq))
25064 +                       dev_warn(qidev, "New CAAM FQ kill failed\n");
25065 +
25066 +               return ret;
25067 +       }
25068 +
25069 +       /*
25070 +        * Re-initialise pre-header. Set RSLS and SDLEN.
25071 +        * Update the shared descriptor for driver context.
25072 +        */
25073 +       drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
25074 +                                          num_words);
25075 +       memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
25076 +       dma_sync_single_for_device(qidev, drv_ctx->context_a,
25077 +                                  sizeof(drv_ctx->sh_desc) +
25078 +                                  sizeof(drv_ctx->prehdr),
25079 +                                  DMA_BIDIRECTIONAL);
25080 +
25081 +       /* Put the new FQ in scheduled state */
25082 +       ret = qman_schedule_fq(new_fq);
25083 +       if (ret) {
25084 +               dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);
25085 +
25086 +               /*
25087 +                * We can kill new FQ and revert to old FQ.
25088 +                * Since the desc is already modified, it is success case
25089 +                */
25090 +
25091 +               drv_ctx->req_fq = old_fq;
25092 +
25093 +               if (kill_fq(qidev, new_fq))
25094 +                       dev_warn(qidev, "New CAAM FQ kill failed\n");
25095 +       } else if (kill_fq(qidev, old_fq)) {
25096 +               dev_warn(qidev, "Old CAAM FQ kill failed\n");
25097 +       }
25098 +
25099 +       return 0;
25100 +}
25101 +EXPORT_SYMBOL(caam_drv_ctx_update);
25102 +
25103 +struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
25104 +                                      int *cpu,
25105 +                                      u32 *sh_desc)
25106 +{
25107 +       size_t size;
25108 +       u32 num_words;
25109 +       dma_addr_t hwdesc;
25110 +       struct caam_drv_ctx *drv_ctx;
25111 +       const cpumask_t *cpus = qman_affine_cpus();
25112 +
25113 +       num_words = desc_len(sh_desc);
25114 +       if (num_words > MAX_SDLEN) {
25115 +               dev_err(qidev, "Invalid descriptor len: %d words\n",
25116 +                       num_words);
25117 +               return ERR_PTR(-EINVAL);
25118 +       }
25119 +
25120 +       drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC);
25121 +       if (!drv_ctx)
25122 +               return ERR_PTR(-ENOMEM);
25123 +
25124 +       /*
25125 +        * Initialise pre-header - set RSLS and SDLEN - and shared descriptor
25126 +        * and dma-map them.
25127 +        */
25128 +       drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
25129 +                                          num_words);
25130 +       memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
25131 +       size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
25132 +       hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
25133 +                               DMA_BIDIRECTIONAL);
25134 +       if (dma_mapping_error(qidev, hwdesc)) {
25135 +               dev_err(qidev, "DMA map error for preheader + shdesc\n");
25136 +               kfree(drv_ctx);
25137 +               return ERR_PTR(-ENOMEM);
25138 +       }
25139 +       drv_ctx->context_a = hwdesc;
25140 +
25141 +       /* If given CPU does not own the portal, choose another one that does */
25142 +       if (!cpumask_test_cpu(*cpu, cpus)) {
25143 +               int *pcpu = &get_cpu_var(last_cpu);
25144 +
25145 +               *pcpu = cpumask_next(*pcpu, cpus);
25146 +               if (*pcpu >= nr_cpu_ids)
25147 +                       *pcpu = cpumask_first(cpus);
25148 +               *cpu = *pcpu;
25149 +
25150 +               put_cpu_var(last_cpu);
25151 +       }
25152 +       drv_ctx->cpu = *cpu;
25153 +
25154 +       /* Find response FQ hooked with this CPU */
25155 +       drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
25156 +
25157 +       /* Attach request FQ */
25158 +       drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
25159 +                                            QMAN_INITFQ_FLAG_SCHED);
25160 +       if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) {
25161 +               dev_err(qidev, "create_caam_req_fq failed\n");
25162 +               dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
25163 +               kfree(drv_ctx);
25164 +               return ERR_PTR(-ENOMEM);
25165 +       }
25166 +
25167 +       drv_ctx->qidev = qidev;
25168 +       return drv_ctx;
25169 +}
25170 +EXPORT_SYMBOL(caam_drv_ctx_init);
25171 +
25172 +void *qi_cache_alloc(gfp_t flags)
25173 +{
25174 +       return kmem_cache_alloc(qi_cache, flags);
25175 +}
25176 +EXPORT_SYMBOL(qi_cache_alloc);
25177 +
25178 +void qi_cache_free(void *obj)
25179 +{
25180 +       kmem_cache_free(qi_cache, obj);
25181 +}
25182 +EXPORT_SYMBOL(qi_cache_free);
25183 +
25184 +static int caam_qi_poll(struct napi_struct *napi, int budget)
25185 +{
25186 +       struct caam_napi *np = container_of(napi, struct caam_napi, irqtask);
25187 +
25188 +       int cleaned = qman_p_poll_dqrr(np->p, budget);
25189 +
25190 +       if (cleaned < budget) {
25191 +               napi_complete(napi);
25192 +               qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
25193 +       }
25194 +
25195 +       return cleaned;
25196 +}
25197 +
25198 +void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
25199 +{
25200 +       if (IS_ERR_OR_NULL(drv_ctx))
25201 +               return;
25202 +
25203 +       /* Remove request FQ */
25204 +       if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq))
25205 +               dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n");
25206 +
25207 +       dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a,
25208 +                        sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr),
25209 +                        DMA_BIDIRECTIONAL);
25210 +       kfree(drv_ctx);
25211 +}
25212 +EXPORT_SYMBOL(caam_drv_ctx_rel);
25213 +
25214 +int caam_qi_shutdown(struct device *qidev)
25215 +{
25216 +       int i, ret;
25217 +       struct caam_qi_priv *priv = dev_get_drvdata(qidev);
25218 +       const cpumask_t *cpus = qman_affine_cpus();
25219 +       struct cpumask old_cpumask = current->cpus_allowed;
25220 +
25221 +       for_each_cpu(i, cpus) {
25222 +               struct napi_struct *irqtask;
25223 +
25224 +               irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask;
25225 +               napi_disable(irqtask);
25226 +               netif_napi_del(irqtask);
25227 +
25228 +               if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
25229 +                       dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
25230 +       }
25231 +
25232 +       /*
25233 +        * QMan driver requires CGRs to be deleted from same CPU from where they
25234 +        * were instantiated. Hence we get the module removal execute from the
25235 +        * same CPU from where it was originally inserted.
25236 +        */
25237 +       set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
25238 +
25239 +       ret = qman_delete_cgr(&priv->cgr);
25240 +       if (ret)
25241 +               dev_err(qidev, "Deletion of CGR failed: %d\n", ret);
25242 +       else
25243 +               qman_release_cgrid(priv->cgr.cgrid);
25244 +
25245 +       kmem_cache_destroy(qi_cache);
25246 +
25247 +       /* Now that we're done with the CGRs, restore the cpus allowed mask */
25248 +       set_cpus_allowed_ptr(current, &old_cpumask);
25249 +
25250 +       platform_device_unregister(priv->qi_pdev);
25251 +       return ret;
25252 +}
25253 +
25254 +static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
25255 +{
25256 +       caam_congested = congested;
25257 +
25258 +       if (congested) {
25259 +#ifdef CONFIG_DEBUG_FS
25260 +               times_congested++;
25261 +#endif
25262 +               pr_debug_ratelimited("CAAM entered congestion\n");
25263 +
25264 +       } else {
25265 +               pr_debug_ratelimited("CAAM exited congestion\n");
25266 +       }
25267 +}
25268 +
25269 +static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
25270 +{
25271 +       /*
25272 +        * In case of threaded ISR, for RT kernels in_irq() does not return
25273 +        * appropriate value, so use in_serving_softirq to distinguish between
25274 +        * softirq and irq contexts.
25275 +        */
25276 +       if (unlikely(in_irq() || !in_serving_softirq())) {
25277 +               /* Disable QMan IRQ source and invoke NAPI */
25278 +               qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
25279 +               np->p = p;
25280 +               napi_schedule(&np->irqtask);
25281 +               return 1;
25282 +       }
25283 +       return 0;
25284 +}
25285 +
25286 +static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
25287 +                                                   struct qman_fq *rsp_fq,
25288 +                                                   const struct qm_dqrr_entry *dqrr)
25289 +{
25290 +       struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
25291 +       struct caam_drv_req *drv_req;
25292 +       const struct qm_fd *fd;
25293 +       struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
25294 +
25295 +       if (caam_qi_napi_schedule(p, caam_napi))
25296 +               return qman_cb_dqrr_stop;
25297 +
25298 +       fd = &dqrr->fd;
25299 +       if (unlikely(fd->status))
25300 +               dev_err(qidev, "Error: %#x in CAAM response FD\n", fd->status);
25301 +
25302 +       if (unlikely(fd->format != fd->format)) {
25303 +               dev_err(qidev, "Non-compound FD from CAAM\n");
25304 +               return qman_cb_dqrr_consume;
25305 +       }
25306 +
25307 +       drv_req = (struct caam_drv_req *)phys_to_virt(fd->addr);
25308 +       if (unlikely(!drv_req)) {
25309 +               dev_err(qidev,
25310 +                       "Can't find original request for caam response\n");
25311 +               return qman_cb_dqrr_consume;
25312 +       }
25313 +
25314 +       dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
25315 +                        sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
25316 +
25317 +       drv_req->cbk(drv_req, fd->status);
25318 +       return qman_cb_dqrr_consume;
25319 +}
25320 +
25321 +static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
25322 +{
25323 +       struct qm_mcc_initfq opts;
25324 +       struct qman_fq *fq;
25325 +       int ret;
25326 +
25327 +       fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA);
25328 +       if (!fq)
25329 +               return -ENOMEM;
25330 +
25331 +       fq->cb.dqrr = caam_rsp_fq_dqrr_cb;
25332 +
25333 +       ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE |
25334 +                            QMAN_FQ_FLAG_DYNAMIC_FQID, fq);
25335 +       if (ret) {
25336 +               dev_err(qidev, "Rsp FQ create failed\n");
25337 +               kfree(fq);
25338 +               return -ENODEV;
25339 +       }
25340 +
25341 +       opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
25342 +               QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
25343 +               QM_INITFQ_WE_CGID;
25344 +       opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING | QM_FQCTRL_CPCSTASH |
25345 +                          QM_FQCTRL_CGE;
25346 +       opts.fqd.dest.channel = qman_affine_channel(cpu);
25347 +       opts.fqd.dest.wq = 3;
25348 +       opts.fqd.cgid = qipriv.cgr.cgrid;
25349 +       opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
25350 +                                               QM_STASHING_EXCL_DATA;
25351 +       opts.fqd.context_a.stashing.data_cl = 1;
25352 +       opts.fqd.context_a.stashing.context_cl = 1;
25353 +
25354 +       ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
25355 +       if (ret) {
25356 +               dev_err(qidev, "Rsp FQ init failed\n");
25357 +               kfree(fq);
25358 +               return -ENODEV;
25359 +       }
25360 +
25361 +       per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
25362 +
25363 +       dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
25364 +       return 0;
25365 +}
25366 +
25367 +static int init_cgr(struct device *qidev)
25368 +{
25369 +       int ret;
25370 +       struct qm_mcc_initcgr opts;
25371 +       const u64 cpus = *(u64 *)qman_affine_cpus();
25372 +       const int num_cpus = hweight64(cpus);
25373 +       const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;
25374 +
25375 +       ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
25376 +       if (ret) {
25377 +               dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret);
25378 +               return ret;
25379 +       }
25380 +
25381 +       qipriv.cgr.cb = cgr_cb;
25382 +       memset(&opts, 0, sizeof(opts));
25383 +       opts.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | QM_CGR_WE_MODE;
25384 +       opts.cgr.cscn_en = QM_CGR_EN;
25385 +       opts.cgr.mode = QMAN_CGR_MODE_FRAME;
25386 +       qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
25387 +
25388 +       ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts);
25389 +       if (ret) {
25390 +               dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret,
25391 +                       qipriv.cgr.cgrid);
25392 +               return ret;
25393 +       }
25394 +
25395 +       dev_dbg(qidev, "Congestion threshold set to %llu\n", val);
25396 +       return 0;
25397 +}
25398 +
25399 +static int alloc_rsp_fqs(struct device *qidev)
25400 +{
25401 +       int ret, i;
25402 +       const cpumask_t *cpus = qman_affine_cpus();
25403 +
25404 +       /*Now create response FQs*/
25405 +       for_each_cpu(i, cpus) {
25406 +               ret = alloc_rsp_fq_cpu(qidev, i);
25407 +               if (ret) {
25408 +                       dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i);
25409 +                       return ret;
25410 +               }
25411 +       }
25412 +
25413 +       return 0;
25414 +}
25415 +
25416 +static void free_rsp_fqs(void)
25417 +{
25418 +       int i;
25419 +       const cpumask_t *cpus = qman_affine_cpus();
25420 +
25421 +       for_each_cpu(i, cpus)
25422 +               kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
25423 +}
25424 +
25425 +int caam_qi_init(struct platform_device *caam_pdev)
25426 +{
25427 +       int err, i;
25428 +       struct platform_device *qi_pdev;
25429 +       struct device *ctrldev = &caam_pdev->dev, *qidev;
25430 +       struct caam_drv_private *ctrlpriv;
25431 +       const cpumask_t *cpus = qman_affine_cpus();
25432 +       struct cpumask old_cpumask = current->cpus_allowed;
25433 +       static struct platform_device_info qi_pdev_info = {
25434 +               .name = "caam_qi",
25435 +               .id = PLATFORM_DEVID_NONE
25436 +       };
25437 +
25438 +       /*
25439 +        * QMAN requires CGRs to be removed from same CPU+portal from where it
25440 +        * was originally allocated. Hence we need to note down the
25441 +        * initialisation CPU and use the same CPU for module exit.
25442 +        * We select the first CPU to from the list of portal owning CPUs.
25443 +        * Then we pin module init to this CPU.
25444 +        */
25445 +       mod_init_cpu = cpumask_first(cpus);
25446 +       set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
25447 +
25448 +       qi_pdev_info.parent = ctrldev;
25449 +       qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
25450 +       qi_pdev = platform_device_register_full(&qi_pdev_info);
25451 +       if (IS_ERR(qi_pdev))
25452 +               return PTR_ERR(qi_pdev);
25453 +       arch_setup_dma_ops(&qi_pdev->dev, 0, 0, NULL, true);
25454 +
25455 +       ctrlpriv = dev_get_drvdata(ctrldev);
25456 +       qidev = &qi_pdev->dev;
25457 +
25458 +       qipriv.qi_pdev = qi_pdev;
25459 +       dev_set_drvdata(qidev, &qipriv);
25460 +
25461 +       /* Initialize the congestion detection */
25462 +       err = init_cgr(qidev);
25463 +       if (err) {
25464 +               dev_err(qidev, "CGR initialization failed: %d\n", err);
25465 +               platform_device_unregister(qi_pdev);
25466 +               return err;
25467 +       }
25468 +
25469 +       /* Initialise response FQs */
25470 +       err = alloc_rsp_fqs(qidev);
25471 +       if (err) {
25472 +               dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
25473 +               free_rsp_fqs();
25474 +               platform_device_unregister(qi_pdev);
25475 +               return err;
25476 +       }
25477 +
25478 +       /*
25479 +        * Enable the NAPI contexts on each of the core which has an affine
25480 +        * portal.
25481 +        */
25482 +       for_each_cpu(i, cpus) {
25483 +               struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i);
25484 +               struct caam_napi *caam_napi = &priv->caam_napi;
25485 +               struct napi_struct *irqtask = &caam_napi->irqtask;
25486 +               struct net_device *net_dev = &priv->net_dev;
25487 +
25488 +               net_dev->dev = *qidev;
25489 +               INIT_LIST_HEAD(&net_dev->napi_list);
25490 +
25491 +               netif_napi_add(net_dev, irqtask, caam_qi_poll,
25492 +                              CAAM_NAPI_WEIGHT);
25493 +
25494 +               napi_enable(irqtask);
25495 +       }
25496 +
25497 +       /* Hook up QI device to parent controlling caam device */
25498 +       ctrlpriv->qidev = qidev;
25499 +
25500 +       qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
25501 +                                    SLAB_CACHE_DMA, NULL);
25502 +       if (!qi_cache) {
25503 +               dev_err(qidev, "Can't allocate CAAM cache\n");
25504 +               free_rsp_fqs();
25505 +               platform_device_unregister(qi_pdev);
25506 +               return -ENOMEM;
25507 +       }
25508 +
25509 +       /* Done with the CGRs; restore the cpus allowed mask */
25510 +       set_cpus_allowed_ptr(current, &old_cpumask);
25511 +#ifdef CONFIG_DEBUG_FS
25512 +       debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
25513 +                           &times_congested, &caam_fops_u64_ro);
25514 +#endif
25515 +       dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
25516 +       return 0;
25517 +}
25518 --- /dev/null
25519 +++ b/drivers/crypto/caam/qi.h
25520 @@ -0,0 +1,204 @@
25521 +/*
25522 + * Public definitions for the CAAM/QI (Queue Interface) backend.
25523 + *
25524 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
25525 + * Copyright 2016-2017 NXP
25526 + */
25527 +
25528 +#ifndef __QI_H__
25529 +#define __QI_H__
25530 +
25531 +#include <linux/fsl_qman.h>
25532 +#include "compat.h"
25533 +#include "desc.h"
25534 +#include "desc_constr.h"
25535 +
25536 +/*
25537 + * CAAM hardware constructs a job descriptor which points to a shared descriptor
25538 + * (as pointed by context_a of to-CAAM FQ).
25539 + * When the job descriptor is executed by DECO, the whole job descriptor
25540 + * together with shared descriptor gets loaded in DECO buffer, which is
25541 + * 64 words (each 32-bit) long.
25542 + *
25543 + * The job descriptor constructed by CAAM hardware has the following layout:
25544 + *
25545 + *     HEADER          (1 word)
25546 + *     Shdesc ptr      (1 or 2 words)
25547 + *     SEQ_OUT_PTR     (1 word)
25548 + *     Out ptr         (1 or 2 words)
25549 + *     Out length      (1 word)
25550 + *     SEQ_IN_PTR      (1 word)
25551 + *     In ptr          (1 or 2 words)
25552 + *     In length       (1 word)
25553 + *
25554 + * The shdesc ptr is used to fetch shared descriptor contents into DECO buffer.
25555 + *
25556 + * Apart from shdesc contents, the total number of words that get loaded in DECO
25557 + * buffer are '8' or '11'. The remaining words in DECO buffer can be used for
25558 + * storing shared descriptor.
25559 + */
25560 +#define MAX_SDLEN      ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
25561 +
25562 +/* Length of a single buffer in the QI driver memory cache */
25563 +#define CAAM_QI_MEMCACHE_SIZE  768
25564 +
25565 +extern bool caam_congested __read_mostly;
25566 +
25567 +/*
25568 + * This is the request structure the driver application should fill while
25569 + * submitting a job to driver.
25570 + */
25571 +struct caam_drv_req;
25572 +
25573 +/*
25574 + * caam_qi_cbk - application's callback function invoked by the driver when the
25575 + *               request has been successfully processed.
25576 + * @drv_req: original request that was submitted
25577 + * @status: completion status of request (0 - success, non-zero - error code)
25578 + */
25579 +typedef void (*caam_qi_cbk)(struct caam_drv_req *drv_req, u32 status);
25580 +
25581 +enum optype {
25582 +       ENCRYPT,
25583 +       DECRYPT,
25584 +       GIVENCRYPT,
25585 +       NUM_OP
25586 +};
25587 +
25588 +/**
25589 + * caam_drv_ctx - CAAM/QI backend driver context
25590 + *
25591 + * The jobs are processed by the driver against a driver context.
25592 + * With every cryptographic context, a driver context is attached.
25593 + * The driver context contains data for private use by driver.
25594 + * For the applications, this is an opaque structure.
25595 + *
25596 + * @prehdr: preheader placed before shrd desc
25597 + * @sh_desc: shared descriptor
25598 + * @context_a: shared descriptor dma address
25599 + * @req_fq: to-CAAM request frame queue
25600 + * @rsp_fq: from-CAAM response frame queue
25601 + * @cpu: cpu on which to receive CAAM response
25602 + * @op_type: operation type
25603 + * @qidev: device pointer for CAAM/QI backend
25604 + */
25605 +struct caam_drv_ctx {
25606 +       u32 prehdr[2];
25607 +       u32 sh_desc[MAX_SDLEN];
25608 +       dma_addr_t context_a;
25609 +       struct qman_fq *req_fq;
25610 +       struct qman_fq *rsp_fq;
25611 +       int cpu;
25612 +       enum optype op_type;
25613 +       struct device *qidev;
25614 +} ____cacheline_aligned;
25615 +
25616 +/**
25617 + * caam_drv_req - The request structure the driver application should fill while
25618 + *                submitting a job to driver.
25619 + * @fd_sgt: QMan S/G pointing to output (fd_sgt[0]) and input (fd_sgt[1])
25620 + *          buffers.
25621 + * @cbk: callback function to invoke when job is completed
25622 + * @app_ctx: arbitrary context attached with request by the application
25623 + *
25624 + * The fields mentioned below should not be used by application.
25625 + * These are for private use by driver.
25626 + *
25627 + * @hdr__: linked list header to maintain list of outstanding requests to CAAM
25628 + * @hwaddr: DMA address for the S/G table.
25629 + */
25630 +struct caam_drv_req {
25631 +       struct qm_sg_entry fd_sgt[2];
25632 +       struct caam_drv_ctx *drv_ctx;
25633 +       caam_qi_cbk cbk;
25634 +       void *app_ctx;
25635 +} ____cacheline_aligned;
25636 +
25637 +/**
25638 + * caam_drv_ctx_init - Initialise a CAAM/QI driver context
25639 + *
25640 + * A CAAM/QI driver context must be attached with each cryptographic context.
25641 + * This function allocates memory for CAAM/QI context and returns a handle to
25642 + * the application. This handle must be submitted along with each enqueue
25643 + * request to the driver by the application.
25644 + *
25645 + * @cpu: CPU where the application prefers to the driver to receive CAAM
25646 + *       responses. The request completion callback would be issued from this
25647 + *       CPU.
25648 + * @sh_desc: shared descriptor pointer to be attached with CAAM/QI driver
25649 + *           context.
25650 + *
25651 + * Returns a driver context on success or negative error code on failure.
25652 + */
25653 +struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, int *cpu,
25654 +                                      u32 *sh_desc);
25655 +
25656 +/**
25657 + * caam_qi_enqueue - Submit a request to QI backend driver.
25658 + *
25659 + * The request structure must be properly filled as described above.
25660 + *
25661 + * @qidev: device pointer for QI backend
25662 + * @req: CAAM QI request structure
25663 + *
25664 + * Returns 0 on success or negative error code on failure.
25665 + */
25666 +int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req);
25667 +
25668 +/**
25669 + * caam_drv_ctx_busy - Check if there are too many jobs pending with CAAM
25670 + *                    or too many CAAM responses are pending to be processed.
25671 + * @drv_ctx: driver context for which job is to be submitted
25672 + *
25673 + * Returns caam congestion status 'true/false'
25674 + */
25675 +bool caam_drv_ctx_busy(struct caam_drv_ctx *drv_ctx);
25676 +
25677 +/**
25678 + * caam_drv_ctx_update - Update QI driver context
25679 + *
25680 + * Invoked when shared descriptor is required to be change in driver context.
25681 + *
25682 + * @drv_ctx: driver context to be updated
25683 + * @sh_desc: new shared descriptor pointer to be updated in QI driver context
25684 + *
25685 + * Returns 0 on success or negative error code on failure.
25686 + */
25687 +int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
25688 +
25689 +/**
25690 + * caam_drv_ctx_rel - Release a QI driver context
25691 + * @drv_ctx: context to be released
25692 + */
25693 +void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
25694 +
25695 +int caam_qi_init(struct platform_device *pdev);
25696 +int caam_qi_shutdown(struct device *dev);
25697 +
25698 +/**
25699 + * qi_cache_alloc - Allocate buffers from CAAM-QI cache
25700 + *
25701 + * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) needs data which has
25702 + * to be allocated on the hotpath. Instead of using malloc, one can use the
25703 + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
25704 + * will have a size of 256B, which is sufficient for hosting 16 SG entries.
25705 + *
25706 + * @flags: flags that would be used for the equivalent malloc(..) call
25707 + *
25708 + * Returns a pointer to a retrieved buffer on success or NULL on failure.
25709 + */
25710 +void *qi_cache_alloc(gfp_t flags);
25711 +
25712 +/**
25713 + * qi_cache_free - Frees buffers allocated from CAAM-QI cache
25714 + *
25715 + * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) no longer needs
25716 + * the buffer previously allocated by a qi_cache_alloc call.
25717 + * No checking is being done, the call is a passthrough call to
25718 + * kmem_cache_free(...)
25719 + *
25720 + * @obj: object previously allocated using qi_cache_alloc()
25721 + */
25722 +void qi_cache_free(void *obj);
25723 +
25724 +#endif /* __QI_H__ */
25725 --- a/drivers/crypto/caam/regs.h
25726 +++ b/drivers/crypto/caam/regs.h
25727 @@ -2,6 +2,7 @@
25728   * CAAM hardware register-level view
25729   *
25730   * Copyright 2008-2011 Freescale Semiconductor, Inc.
25731 + * Copyright 2017 NXP
25732   */
25733  
25734  #ifndef REGS_H
25735 @@ -67,6 +68,7 @@
25736   */
25737  
25738  extern bool caam_little_end;
25739 +extern bool caam_imx;
25740  
25741  #define caam_to_cpu(len)                               \
25742  static inline u##len caam##len ## _to_cpu(u##len val)  \
25743 @@ -154,13 +156,10 @@ static inline u64 rd_reg64(void __iomem
25744  #else /* CONFIG_64BIT */
25745  static inline void wr_reg64(void __iomem *reg, u64 data)
25746  {
25747 -#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
25748 -       if (caam_little_end) {
25749 +       if (!caam_imx && caam_little_end) {
25750                 wr_reg32((u32 __iomem *)(reg) + 1, data >> 32);
25751                 wr_reg32((u32 __iomem *)(reg), data);
25752 -       } else
25753 -#endif
25754 -       {
25755 +       } else {
25756                 wr_reg32((u32 __iomem *)(reg), data >> 32);
25757                 wr_reg32((u32 __iomem *)(reg) + 1, data);
25758         }
25759 @@ -168,41 +167,40 @@ static inline void wr_reg64(void __iomem
25760  
25761  static inline u64 rd_reg64(void __iomem *reg)
25762  {
25763 -#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
25764 -       if (caam_little_end)
25765 +       if (!caam_imx && caam_little_end)
25766                 return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 |
25767                         (u64)rd_reg32((u32 __iomem *)(reg)));
25768 -       else
25769 -#endif
25770 -               return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
25771 -                       (u64)rd_reg32((u32 __iomem *)(reg) + 1));
25772 +
25773 +       return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
25774 +               (u64)rd_reg32((u32 __iomem *)(reg) + 1));
25775  }
25776  #endif /* CONFIG_64BIT  */
25777  
25778 +static inline u64 cpu_to_caam_dma64(dma_addr_t value)
25779 +{
25780 +       if (caam_imx)
25781 +               return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) |
25782 +                        (u64)cpu_to_caam32(upper_32_bits(value)));
25783 +
25784 +       return cpu_to_caam64(value);
25785 +}
25786 +
25787 +static inline u64 caam_dma64_to_cpu(u64 value)
25788 +{
25789 +       if (caam_imx)
25790 +               return (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) |
25791 +                        (u64)caam32_to_cpu(upper_32_bits(value)));
25792 +
25793 +       return caam64_to_cpu(value);
25794 +}
25795 +
25796  #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
25797 -#ifdef CONFIG_SOC_IMX7D
25798 -#define cpu_to_caam_dma(value) \
25799 -               (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
25800 -                 (u64)cpu_to_caam32(upper_32_bits(value)))
25801 -#define caam_dma_to_cpu(value) \
25802 -               (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | \
25803 -                 (u64)caam32_to_cpu(upper_32_bits(value)))
25804 -#else
25805 -#define cpu_to_caam_dma(value) cpu_to_caam64(value)
25806 -#define caam_dma_to_cpu(value) caam64_to_cpu(value)
25807 -#endif /* CONFIG_SOC_IMX7D */
25808 +#define cpu_to_caam_dma(value) cpu_to_caam_dma64(value)
25809 +#define caam_dma_to_cpu(value) caam_dma64_to_cpu(value)
25810  #else
25811  #define cpu_to_caam_dma(value) cpu_to_caam32(value)
25812  #define caam_dma_to_cpu(value) caam32_to_cpu(value)
25813 -#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT  */
25814 -
25815 -#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
25816 -#define cpu_to_caam_dma64(value) \
25817 -               (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
25818 -                (u64)cpu_to_caam32(upper_32_bits(value)))
25819 -#else
25820 -#define cpu_to_caam_dma64(value) cpu_to_caam64(value)
25821 -#endif
25822 +#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
25823  
25824  /*
25825   * jr_outentry
25826 @@ -293,6 +291,7 @@ struct caam_perfmon {
25827         u32 cha_rev_ls;         /* CRNR - CHA Rev No. Least significant half*/
25828  #define CTPR_MS_QI_SHIFT       25
25829  #define CTPR_MS_QI_MASK                (0x1ull << CTPR_MS_QI_SHIFT)
25830 +#define CTPR_MS_DPAA2          BIT(13)
25831  #define CTPR_MS_VIRT_EN_INCL   0x00000001
25832  #define CTPR_MS_VIRT_EN_POR    0x00000002
25833  #define CTPR_MS_PG_SZ_MASK     0x10
25834 @@ -628,6 +627,8 @@ struct caam_job_ring {
25835  #define JRSTA_DECOERR_INVSIGN       0x86
25836  #define JRSTA_DECOERR_DSASIGN       0x87
25837  
25838 +#define JRSTA_QIERR_ERROR_MASK      0x00ff
25839 +
25840  #define JRSTA_CCBERR_JUMP           0x08000000
25841  #define JRSTA_CCBERR_INDEX_MASK     0xff00
25842  #define JRSTA_CCBERR_INDEX_SHIFT    8
25843 --- /dev/null
25844 +++ b/drivers/crypto/caam/sg_sw_qm.h
25845 @@ -0,0 +1,126 @@
25846 +/*
25847 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
25848 + * Copyright 2016-2017 NXP
25849 + *
25850 + * Redistribution and use in source and binary forms, with or without
25851 + * modification, are permitted provided that the following conditions are met:
25852 + *     * Redistributions of source code must retain the above copyright
25853 + *       notice, this list of conditions and the following disclaimer.
25854 + *     * Redistributions in binary form must reproduce the above copyright
25855 + *       notice, this list of conditions and the following disclaimer in the
25856 + *       documentation and/or other materials provided with the distribution.
25857 + *     * Neither the name of Freescale Semiconductor nor the
25858 + *       names of its contributors may be used to endorse or promote products
25859 + *       derived from this software without specific prior written permission.
25860 + *
25861 + *
25862 + * ALTERNATIVELY, this software may be distributed under the terms of the
25863 + * GNU General Public License ("GPL") as published by the Free Software
25864 + * Foundation, either version 2 of that License or (at your option) any
25865 + * later version.
25866 + *
25867 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
25868 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25869 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25870 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
25871 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25872 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25873 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25874 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25875 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25876 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25877 + */
25878 +
25879 +#ifndef __SG_SW_QM_H
25880 +#define __SG_SW_QM_H
25881 +
25882 +#include <linux/fsl_qman.h>
25883 +#include "regs.h"
25884 +
25885 +static inline void cpu_to_hw_sg(struct qm_sg_entry *qm_sg_ptr)
25886 +{
25887 +       dma_addr_t addr = qm_sg_ptr->opaque;
25888 +
25889 +       qm_sg_ptr->opaque = cpu_to_caam64(addr);
25890 +       qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
25891 +}
25892 +
25893 +static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
25894 +                                 u32 len, u16 offset)
25895 +{
25896 +       qm_sg_ptr->addr = dma;
25897 +       qm_sg_ptr->length = len;
25898 +       qm_sg_ptr->__reserved2 = 0;
25899 +       qm_sg_ptr->bpid = 0;
25900 +       qm_sg_ptr->__reserved3 = 0;
25901 +       qm_sg_ptr->offset = offset & QM_SG_OFFSET_MASK;
25902 +
25903 +       cpu_to_hw_sg(qm_sg_ptr);
25904 +}
25905 +
25906 +static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
25907 +                                   dma_addr_t dma, u32 len, u16 offset)
25908 +{
25909 +       qm_sg_ptr->extension = 0;
25910 +       qm_sg_ptr->final = 0;
25911 +       __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
25912 +}
25913 +
25914 +static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
25915 +                                        dma_addr_t dma, u32 len, u16 offset)
25916 +{
25917 +       qm_sg_ptr->extension = 0;
25918 +       qm_sg_ptr->final = 1;
25919 +       __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
25920 +}
25921 +
25922 +static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
25923 +                                       dma_addr_t dma, u32 len, u16 offset)
25924 +{
25925 +       qm_sg_ptr->extension = 1;
25926 +       qm_sg_ptr->final = 0;
25927 +       __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
25928 +}
25929 +
25930 +static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
25931 +                                            dma_addr_t dma, u32 len,
25932 +                                            u16 offset)
25933 +{
25934 +       qm_sg_ptr->extension = 1;
25935 +       qm_sg_ptr->final = 1;
25936 +       __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
25937 +}
25938 +
25939 +/*
25940 + * convert scatterlist to h/w link table format
25941 + * but does not have final bit; instead, returns last entry
25942 + */
25943 +static inline struct qm_sg_entry *
25944 +sg_to_qm_sg(struct scatterlist *sg, int sg_count,
25945 +           struct qm_sg_entry *qm_sg_ptr, u16 offset)
25946 +{
25947 +       while (sg_count && sg) {
25948 +               dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
25949 +                                sg_dma_len(sg), offset);
25950 +               qm_sg_ptr++;
25951 +               sg = sg_next(sg);
25952 +               sg_count--;
25953 +       }
25954 +       return qm_sg_ptr - 1;
25955 +}
25956 +
25957 +/*
25958 + * convert scatterlist to h/w link table format
25959 + * scatterlist must have been previously dma mapped
25960 + */
25961 +static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
25962 +                                   struct qm_sg_entry *qm_sg_ptr, u16 offset)
25963 +{
25964 +       qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
25965 +
25966 +       qm_sg_ptr->sgt_efl = caam32_to_cpu(qm_sg_ptr->sgt_efl);
25967 +       qm_sg_ptr->final = 1;
25968 +       qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
25969 +}
25970 +
25971 +#endif /* __SG_SW_QM_H */
25972 --- /dev/null
25973 +++ b/drivers/crypto/caam/sg_sw_qm2.h
25974 @@ -0,0 +1,81 @@
25975 +/*
25976 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
25977 + * Copyright 2017 NXP
25978 + *
25979 + * Redistribution and use in source and binary forms, with or without
25980 + * modification, are permitted provided that the following conditions are met:
25981 + *     * Redistributions of source code must retain the above copyright
25982 + *      notice, this list of conditions and the following disclaimer.
25983 + *     * Redistributions in binary form must reproduce the above copyright
25984 + *      notice, this list of conditions and the following disclaimer in the
25985 + *      documentation and/or other materials provided with the distribution.
25986 + *     * Neither the names of the above-listed copyright holders nor the
25987 + *      names of any contributors may be used to endorse or promote products
25988 + *      derived from this software without specific prior written permission.
25989 + *
25990 + *
25991 + * ALTERNATIVELY, this software may be distributed under the terms of the
25992 + * GNU General Public License ("GPL") as published by the Free Software
25993 + * Foundation, either version 2 of that License or (at your option) any
25994 + * later version.
25995 + *
25996 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25997 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25998 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25999 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
26000 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26001 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26002 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26003 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26004 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26005 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26006 + * POSSIBILITY OF SUCH DAMAGE.
26007 + */
26008 +
26009 +#ifndef _SG_SW_QM2_H_
26010 +#define _SG_SW_QM2_H_
26011 +
26012 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
26013 +
26014 +static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr,
26015 +                                   dma_addr_t dma, u32 len, u16 offset)
26016 +{
26017 +       dpaa2_sg_set_addr(qm_sg_ptr, dma);
26018 +       dpaa2_sg_set_format(qm_sg_ptr, dpaa2_sg_single);
26019 +       dpaa2_sg_set_final(qm_sg_ptr, false);
26020 +       dpaa2_sg_set_len(qm_sg_ptr, len);
26021 +       dpaa2_sg_set_bpid(qm_sg_ptr, 0);
26022 +       dpaa2_sg_set_offset(qm_sg_ptr, offset);
26023 +}
26024 +
26025 +/*
26026 + * convert scatterlist to h/w link table format
26027 + * but does not have final bit; instead, returns last entry
26028 + */
26029 +static inline struct dpaa2_sg_entry *
26030 +sg_to_qm_sg(struct scatterlist *sg, int sg_count,
26031 +           struct dpaa2_sg_entry *qm_sg_ptr, u16 offset)
26032 +{
26033 +       while (sg_count && sg) {
26034 +               dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
26035 +                                sg_dma_len(sg), offset);
26036 +               qm_sg_ptr++;
26037 +               sg = sg_next(sg);
26038 +               sg_count--;
26039 +       }
26040 +       return qm_sg_ptr - 1;
26041 +}
26042 +
26043 +/*
26044 + * convert scatterlist to h/w link table format
26045 + * scatterlist must have been previously dma mapped
26046 + */
26047 +static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
26048 +                                   struct dpaa2_sg_entry *qm_sg_ptr,
26049 +                                   u16 offset)
26050 +{
26051 +       qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
26052 +       dpaa2_sg_set_final(qm_sg_ptr, true);
26053 +}
26054 +
26055 +#endif /* _SG_SW_QM2_H_ */
26056 --- a/drivers/crypto/caam/sg_sw_sec4.h
26057 +++ b/drivers/crypto/caam/sg_sw_sec4.h
26058 @@ -5,9 +5,19 @@
26059   *
26060   */
26061  
26062 +#ifndef _SG_SW_SEC4_H_
26063 +#define _SG_SW_SEC4_H_
26064 +
26065 +#include "ctrl.h"
26066  #include "regs.h"
26067 +#include "sg_sw_qm2.h"
26068 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
26069  
26070 -struct sec4_sg_entry;
26071 +struct sec4_sg_entry {
26072 +       u64 ptr;
26073 +       u32 len;
26074 +       u32 bpid_offset;
26075 +};
26076  
26077  /*
26078   * convert single dma address to h/w link table format
26079 @@ -15,9 +25,15 @@ struct sec4_sg_entry;
26080  static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
26081                                       dma_addr_t dma, u32 len, u16 offset)
26082  {
26083 -       sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
26084 -       sec4_sg_ptr->len = cpu_to_caam32(len);
26085 -       sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK);
26086 +       if (caam_dpaa2) {
26087 +               dma_to_qm_sg_one((struct dpaa2_sg_entry *)sec4_sg_ptr, dma, len,
26088 +                                offset);
26089 +       } else {
26090 +               sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
26091 +               sec4_sg_ptr->len = cpu_to_caam32(len);
26092 +               sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset &
26093 +                                                        SEC4_SG_OFFSET_MASK);
26094 +       }
26095  #ifdef DEBUG
26096         print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
26097                        DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
26098 @@ -43,6 +59,14 @@ sg_to_sec4_sg(struct scatterlist *sg, in
26099         return sec4_sg_ptr - 1;
26100  }
26101  
26102 +static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr)
26103 +{
26104 +       if (caam_dpaa2)
26105 +               dpaa2_sg_set_final((struct dpaa2_sg_entry *)sec4_sg_ptr, true);
26106 +       else
26107 +               sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
26108 +}
26109 +
26110  /*
26111   * convert scatterlist to h/w link table format
26112   * scatterlist must have been previously dma mapped
26113 @@ -52,31 +76,7 @@ static inline void sg_to_sec4_sg_last(st
26114                                       u16 offset)
26115  {
26116         sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
26117 -       sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
26118 -}
26119 -
26120 -static inline struct sec4_sg_entry *sg_to_sec4_sg_len(
26121 -       struct scatterlist *sg, unsigned int total,
26122 -       struct sec4_sg_entry *sec4_sg_ptr)
26123 -{
26124 -       do {
26125 -               unsigned int len = min(sg_dma_len(sg), total);
26126 -
26127 -               dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), len, 0);
26128 -               sec4_sg_ptr++;
26129 -               sg = sg_next(sg);
26130 -               total -= len;
26131 -       } while (total);
26132 -       return sec4_sg_ptr - 1;
26133 +       sg_to_sec4_set_last(sec4_sg_ptr);
26134  }
26135  
26136 -/* derive number of elements in scatterlist, but return 0 for 1 */
26137 -static inline int sg_count(struct scatterlist *sg_list, int nbytes)
26138 -{
26139 -       int sg_nents = sg_nents_for_len(sg_list, nbytes);
26140 -
26141 -       if (likely(sg_nents == 1))
26142 -               return 0;
26143 -
26144 -       return sg_nents;
26145 -}
26146 +#endif /* _SG_SW_SEC4_H_ */
26147 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c
26148 +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
26149 @@ -516,7 +516,7 @@ err:
26150  
26151  /**
26152   * rsi_disconnect() - This function performs the reverse of the probe function,
26153 - *                   it deintialize the driver structure.
26154 + *                   it deinitialize the driver structure.
26155   * @pfunction: Pointer to the USB interface structure.
26156   *
26157   * Return: None.
26158 --- a/drivers/staging/wilc1000/linux_wlan.c
26159 +++ b/drivers/staging/wilc1000/linux_wlan.c
26160 @@ -211,7 +211,7 @@ static void deinit_irq(struct net_device
26161         vif = netdev_priv(dev);
26162         wilc = vif->wilc;
26163  
26164 -       /* Deintialize IRQ */
26165 +       /* Deinitialize IRQ */
26166         if (wilc->dev_irq_num) {
26167                 free_irq(wilc->dev_irq_num, wilc);
26168                 gpio_free(wilc->gpio);
26169 --- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
26170 +++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
26171 @@ -2359,7 +2359,7 @@ int wilc_deinit_host_int(struct net_devi
26172                 del_timer_sync(&wilc_during_ip_timer);
26173  
26174         if (s32Error)
26175 -               netdev_err(net, "Error while deintializing host interface\n");
26176 +               netdev_err(net, "Error while deinitializing host interface\n");
26177  
26178         return s32Error;
26179  }
26180 --- /dev/null
26181 +++ b/include/crypto/acompress.h
26182 @@ -0,0 +1,269 @@
26183 +/*
26184 + * Asynchronous Compression operations
26185 + *
26186 + * Copyright (c) 2016, Intel Corporation
26187 + * Authors: Weigang Li <weigang.li@intel.com>
26188 + *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
26189 + *
26190 + * This program is free software; you can redistribute it and/or modify it
26191 + * under the terms of the GNU General Public License as published by the Free
26192 + * Software Foundation; either version 2 of the License, or (at your option)
26193 + * any later version.
26194 + *
26195 + */
26196 +#ifndef _CRYPTO_ACOMP_H
26197 +#define _CRYPTO_ACOMP_H
26198 +#include <linux/crypto.h>
26199 +
26200 +#define CRYPTO_ACOMP_ALLOC_OUTPUT      0x00000001
26201 +
26202 +/**
26203 + * struct acomp_req - asynchronous (de)compression request
26204 + *
26205 + * @base:      Common attributes for asynchronous crypto requests
26206 + * @src:       Source Data
26207 + * @dst:       Destination data
26208 + * @slen:      Size of the input buffer
26209 + * @dlen:      Size of the output buffer and number of bytes produced
26210 + * @flags:     Internal flags
26211 + * @__ctx:     Start of private context data
26212 + */
26213 +struct acomp_req {
26214 +       struct crypto_async_request base;
26215 +       struct scatterlist *src;
26216 +       struct scatterlist *dst;
26217 +       unsigned int slen;
26218 +       unsigned int dlen;
26219 +       u32 flags;
26220 +       void *__ctx[] CRYPTO_MINALIGN_ATTR;
26221 +};
26222 +
26223 +/**
26224 + * struct crypto_acomp - user-instantiated objects which encapsulate
26225 + * algorithms and core processing logic
26226 + *
26227 + * @compress:          Function performs a compress operation
26228 + * @decompress:                Function performs a de-compress operation
26229 + * @dst_free:          Frees destination buffer if allocated inside the
26230 + *                     algorithm
26231 + * @reqsize:           Context size for (de)compression requests
26232 + * @base:              Common crypto API algorithm data structure
26233 + */
26234 +struct crypto_acomp {
26235 +       int (*compress)(struct acomp_req *req);
26236 +       int (*decompress)(struct acomp_req *req);
26237 +       void (*dst_free)(struct scatterlist *dst);
26238 +       unsigned int reqsize;
26239 +       struct crypto_tfm base;
26240 +};
26241 +
26242 +/**
26243 + * struct acomp_alg - asynchronous compression algorithm
26244 + *
26245 + * @compress:  Function performs a compress operation
26246 + * @decompress:        Function performs a de-compress operation
26247 + * @dst_free:  Frees destination buffer if allocated inside the algorithm
26248 + * @init:      Initialize the cryptographic transformation object.
26249 + *             This function is used to initialize the cryptographic
26250 + *             transformation object. This function is called only once at
26251 + *             the instantiation time, right after the transformation context
26252 + *             was allocated. In case the cryptographic hardware has some
26253 + *             special requirements which need to be handled by software, this
26254 + *             function shall check for the precise requirement of the
26255 + *             transformation and put any software fallbacks in place.
26256 + * @exit:      Deinitialize the cryptographic transformation object. This is a
26257 + *             counterpart to @init, used to remove various changes set in
26258 + *             @init.
26259 + *
26260 + * @reqsize:   Context size for (de)compression requests
26261 + * @base:      Common crypto API algorithm data structure
26262 + */
26263 +struct acomp_alg {
26264 +       int (*compress)(struct acomp_req *req);
26265 +       int (*decompress)(struct acomp_req *req);
26266 +       void (*dst_free)(struct scatterlist *dst);
26267 +       int (*init)(struct crypto_acomp *tfm);
26268 +       void (*exit)(struct crypto_acomp *tfm);
26269 +       unsigned int reqsize;
26270 +       struct crypto_alg base;
26271 +};
26272 +
26273 +/**
26274 + * DOC: Asynchronous Compression API
26275 + *
26276 + * The Asynchronous Compression API is used with the algorithms of type
26277 + * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto)
26278 + */
26279 +
26280 +/**
26281 + * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle
26282 + * @alg_name:  is the cra_name / name or cra_driver_name / driver name of the
26283 + *             compression algorithm e.g. "deflate"
26284 + * @type:      specifies the type of the algorithm
26285 + * @mask:      specifies the mask for the algorithm
26286 + *
26287 + * Allocate a handle for a compression algorithm. The returned struct
26288 + * crypto_acomp is the handle that is required for any subsequent
26289 + * API invocation for the compression operations.
26290 + *
26291 + * Return:     allocated handle in case of success; IS_ERR() is true in case
26292 + *             of an error, PTR_ERR() returns the error code.
26293 + */
26294 +struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
26295 +                                       u32 mask);
26296 +
26297 +static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
26298 +{
26299 +       return &tfm->base;
26300 +}
26301 +
26302 +static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
26303 +{
26304 +       return container_of(alg, struct acomp_alg, base);
26305 +}
26306 +
26307 +static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
26308 +{
26309 +       return container_of(tfm, struct crypto_acomp, base);
26310 +}
26311 +
26312 +static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
26313 +{
26314 +       return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
26315 +}
26316 +
26317 +static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
26318 +{
26319 +       return tfm->reqsize;
26320 +}
26321 +
26322 +static inline void acomp_request_set_tfm(struct acomp_req *req,
26323 +                                        struct crypto_acomp *tfm)
26324 +{
26325 +       req->base.tfm = crypto_acomp_tfm(tfm);
26326 +}
26327 +
26328 +static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
26329 +{
26330 +       return __crypto_acomp_tfm(req->base.tfm);
26331 +}
26332 +
26333 +/**
26334 + * crypto_free_acomp() -- free ACOMPRESS tfm handle
26335 + *
26336 + * @tfm:       ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
26337 + */
26338 +static inline void crypto_free_acomp(struct crypto_acomp *tfm)
26339 +{
26340 +       crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm));
26341 +}
26342 +
26343 +static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
26344 +{
26345 +       type &= ~CRYPTO_ALG_TYPE_MASK;
26346 +       type |= CRYPTO_ALG_TYPE_ACOMPRESS;
26347 +       mask |= CRYPTO_ALG_TYPE_MASK;
26348 +
26349 +       return crypto_has_alg(alg_name, type, mask);
26350 +}
26351 +
26352 +/**
26353 + * acomp_request_alloc() -- allocates asynchronous (de)compression request
26354 + *
26355 + * @tfm:       ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
26356 + *
26357 + * Return:     allocated handle in case of success or NULL in case of an error
26358 + */
26359 +struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm);
26360 +
26361 +/**
26362 + * acomp_request_free() -- zeroize and free asynchronous (de)compression
26363 + *                        request as well as the output buffer if allocated
26364 + *                        inside the algorithm
26365 + *
26366 + * @req:       request to free
26367 + */
26368 +void acomp_request_free(struct acomp_req *req);
26369 +
26370 +/**
26371 + * acomp_request_set_callback() -- Sets an asynchronous callback
26372 + *
26373 + * Callback will be called when an asynchronous operation on a given
26374 + * request is finished.
26375 + *
26376 + * @req:       request that the callback will be set for
26377 + * @flgs:      specify for instance if the operation may backlog
26378 + * @cmlp:      callback which will be called
26379 + * @data:      private data used by the caller
26380 + */
26381 +static inline void acomp_request_set_callback(struct acomp_req *req,
26382 +                                             u32 flgs,
26383 +                                             crypto_completion_t cmpl,
26384 +                                             void *data)
26385 +{
26386 +       req->base.complete = cmpl;
26387 +       req->base.data = data;
26388 +       req->base.flags = flgs;
26389 +}
26390 +
26391 +/**
26392 + * acomp_request_set_params() -- Sets request parameters
26393 + *
26394 + * Sets parameters required by an acomp operation
26395 + *
26396 + * @req:       asynchronous compress request
26397 + * @src:       pointer to input buffer scatterlist
26398 + * @dst:       pointer to output buffer scatterlist. If this is NULL, the
26399 + *             acomp layer will allocate the output memory
26400 + * @slen:      size of the input buffer
26401 + * @dlen:      size of the output buffer. If dst is NULL, this can be used by
26402 + *             the user to specify the maximum amount of memory to allocate
26403 + */
26404 +static inline void acomp_request_set_params(struct acomp_req *req,
26405 +                                           struct scatterlist *src,
26406 +                                           struct scatterlist *dst,
26407 +                                           unsigned int slen,
26408 +                                           unsigned int dlen)
26409 +{
26410 +       req->src = src;
26411 +       req->dst = dst;
26412 +       req->slen = slen;
26413 +       req->dlen = dlen;
26414 +
26415 +       if (!req->dst)
26416 +               req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
26417 +}
26418 +
26419 +/**
26420 + * crypto_acomp_compress() -- Invoke asynchronous compress operation
26421 + *
26422 + * Function invokes the asynchronous compress operation
26423 + *
26424 + * @req:       asynchronous compress request
26425 + *
26426 + * Return:     zero on success; error code in case of error
26427 + */
26428 +static inline int crypto_acomp_compress(struct acomp_req *req)
26429 +{
26430 +       struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
26431 +
26432 +       return tfm->compress(req);
26433 +}
26434 +
26435 +/**
26436 + * crypto_acomp_decompress() -- Invoke asynchronous decompress operation
26437 + *
26438 + * Function invokes the asynchronous decompress operation
26439 + *
26440 + * @req:       asynchronous compress request
26441 + *
26442 + * Return:     zero on success; error code in case of error
26443 + */
26444 +static inline int crypto_acomp_decompress(struct acomp_req *req)
26445 +{
26446 +       struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
26447 +
26448 +       return tfm->decompress(req);
26449 +}
26450 +
26451 +#endif
26452 --- /dev/null
26453 +++ b/include/crypto/internal/acompress.h
26454 @@ -0,0 +1,81 @@
26455 +/*
26456 + * Asynchronous Compression operations
26457 + *
26458 + * Copyright (c) 2016, Intel Corporation
26459 + * Authors: Weigang Li <weigang.li@intel.com>
26460 + *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
26461 + *
26462 + * This program is free software; you can redistribute it and/or modify it
26463 + * under the terms of the GNU General Public License as published by the Free
26464 + * Software Foundation; either version 2 of the License, or (at your option)
26465 + * any later version.
26466 + *
26467 + */
26468 +#ifndef _CRYPTO_ACOMP_INT_H
26469 +#define _CRYPTO_ACOMP_INT_H
26470 +#include <crypto/acompress.h>
26471 +
26472 +/*
26473 + * Transform internal helpers.
26474 + */
26475 +static inline void *acomp_request_ctx(struct acomp_req *req)
26476 +{
26477 +       return req->__ctx;
26478 +}
26479 +
26480 +static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm)
26481 +{
26482 +       return tfm->base.__crt_ctx;
26483 +}
26484 +
26485 +static inline void acomp_request_complete(struct acomp_req *req,
26486 +                                         int err)
26487 +{
26488 +       req->base.complete(&req->base, err);
26489 +}
26490 +
26491 +static inline const char *acomp_alg_name(struct crypto_acomp *tfm)
26492 +{
26493 +       return crypto_acomp_tfm(tfm)->__crt_alg->cra_name;
26494 +}
26495 +
26496 +static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm)
26497 +{
26498 +       struct acomp_req *req;
26499 +
26500 +       req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL);
26501 +       if (likely(req))
26502 +               acomp_request_set_tfm(req, tfm);
26503 +       return req;
26504 +}
26505 +
26506 +static inline void __acomp_request_free(struct acomp_req *req)
26507 +{
26508 +       kzfree(req);
26509 +}
26510 +
26511 +/**
26512 + * crypto_register_acomp() -- Register asynchronous compression algorithm
26513 + *
26514 + * Function registers an implementation of an asynchronous
26515 + * compression algorithm
26516 + *
26517 + * @alg:       algorithm definition
26518 + *
26519 + * Return:     zero on success; error code in case of error
26520 + */
26521 +int crypto_register_acomp(struct acomp_alg *alg);
26522 +
26523 +/**
26524 + * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm
26525 + *
26526 + * Function unregisters an implementation of an asynchronous
26527 + * compression algorithm
26528 + *
26529 + * @alg:       algorithm definition
26530 + *
26531 + * Return:     zero on success; error code in case of error
26532 + */
26533 +int crypto_unregister_acomp(struct acomp_alg *alg);
26534 +
26535 +#endif
26536 --- /dev/null
26537 +++ b/include/crypto/internal/scompress.h
26538 @@ -0,0 +1,136 @@
26539 +/*
26540 + * Synchronous Compression operations
26541 + *
26542 + * Copyright 2015 LG Electronics Inc.
26543 + * Copyright (c) 2016, Intel Corporation
26544 + * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
26545 + *
26546 + * This program is free software; you can redistribute it and/or modify it
26547 + * under the terms of the GNU General Public License as published by the Free
26548 + * Software Foundation; either version 2 of the License, or (at your option)
26549 + * any later version.
26550 + *
26551 + */
26552 +#ifndef _CRYPTO_SCOMP_INT_H
26553 +#define _CRYPTO_SCOMP_INT_H
26554 +#include <linux/crypto.h>
26555 +
26556 +#define SCOMP_SCRATCH_SIZE     131072
26557 +
26558 +struct crypto_scomp {
26559 +       struct crypto_tfm base;
26560 +};
26561 +
26562 +/**
26563 + * struct scomp_alg - synchronous compression algorithm
26564 + *
26565 + * @alloc_ctx: Function allocates algorithm specific context
26566 + * @free_ctx:  Function frees context allocated with alloc_ctx
26567 + * @compress:  Function performs a compress operation
26568 + * @decompress:        Function performs a de-compress operation
26569 + * @init:      Initialize the cryptographic transformation object.
26570 + *             This function is used to initialize the cryptographic
26571 + *             transformation object. This function is called only once at
26572 + *             the instantiation time, right after the transformation context
26573 + *             was allocated. In case the cryptographic hardware has some
26574 + *             special requirements which need to be handled by software, this
26575 + *             function shall check for the precise requirement of the
26576 + *             transformation and put any software fallbacks in place.
26577 + * @exit:      Deinitialize the cryptographic transformation object. This is a
26578 + *             counterpart to @init, used to remove various changes set in
26579 + *             @init.
26580 + * @base:      Common crypto API algorithm data structure
26581 + */
26582 +struct scomp_alg {
26583 +       void *(*alloc_ctx)(struct crypto_scomp *tfm);
26584 +       void (*free_ctx)(struct crypto_scomp *tfm, void *ctx);
26585 +       int (*compress)(struct crypto_scomp *tfm, const u8 *src,
26586 +                       unsigned int slen, u8 *dst, unsigned int *dlen,
26587 +                       void *ctx);
26588 +       int (*decompress)(struct crypto_scomp *tfm, const u8 *src,
26589 +                         unsigned int slen, u8 *dst, unsigned int *dlen,
26590 +                         void *ctx);
26591 +       struct crypto_alg base;
26592 +};
26593 +
26594 +static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg)
26595 +{
26596 +       return container_of(alg, struct scomp_alg, base);
26597 +}
26598 +
26599 +static inline struct crypto_scomp *__crypto_scomp_tfm(struct crypto_tfm *tfm)
26600 +{
26601 +       return container_of(tfm, struct crypto_scomp, base);
26602 +}
26603 +
26604 +static inline struct crypto_tfm *crypto_scomp_tfm(struct crypto_scomp *tfm)
26605 +{
26606 +       return &tfm->base;
26607 +}
26608 +
26609 +static inline void crypto_free_scomp(struct crypto_scomp *tfm)
26610 +{
26611 +       crypto_destroy_tfm(tfm, crypto_scomp_tfm(tfm));
26612 +}
26613 +
26614 +static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm)
26615 +{
26616 +       return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg);
26617 +}
26618 +
26619 +static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm)
26620 +{
26621 +       return crypto_scomp_alg(tfm)->alloc_ctx(tfm);
26622 +}
26623 +
26624 +static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm,
26625 +                                        void *ctx)
26626 +{
26627 +       return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx);
26628 +}
26629 +
26630 +static inline int crypto_scomp_compress(struct crypto_scomp *tfm,
26631 +                                       const u8 *src, unsigned int slen,
26632 +                                       u8 *dst, unsigned int *dlen, void *ctx)
26633 +{
26634 +       return crypto_scomp_alg(tfm)->compress(tfm, src, slen, dst, dlen, ctx);
26635 +}
26636 +
26637 +static inline int crypto_scomp_decompress(struct crypto_scomp *tfm,
26638 +                                         const u8 *src, unsigned int slen,
26639 +                                         u8 *dst, unsigned int *dlen,
26640 +                                         void *ctx)
26641 +{
26642 +       return crypto_scomp_alg(tfm)->decompress(tfm, src, slen, dst, dlen,
26643 +                                                ctx);
26644 +}
26645 +
26646 +int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
26647 +struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
26648 +void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
26649 +
26650 +/**
26651 + * crypto_register_scomp() -- Register synchronous compression algorithm
26652 + *
26653 + * Function registers an implementation of a synchronous
26654 + * compression algorithm
26655 + *
26656 + * @alg:       algorithm definition
26657 + *
26658 + * Return: zero on success; error code in case of error
26659 + */
26660 +int crypto_register_scomp(struct scomp_alg *alg);
26661 +
26662 +/**
26663 + * crypto_unregister_scomp() -- Unregister synchronous compression algorithm
26664 + *
26665 + * Function unregisters an implementation of a synchronous
26666 + * compression algorithm
26667 + *
26668 + * @alg:       algorithm definition
26669 + *
26670 + * Return: zero on success; error code in case of error
26671 + */
26672 +int crypto_unregister_scomp(struct scomp_alg *alg);
26673 +
26674 +#endif
26675 --- a/include/linux/crypto.h
26676 +++ b/include/linux/crypto.h
26677 @@ -50,6 +50,8 @@
26678  #define CRYPTO_ALG_TYPE_SKCIPHER       0x00000005
26679  #define CRYPTO_ALG_TYPE_GIVCIPHER      0x00000006
26680  #define CRYPTO_ALG_TYPE_KPP            0x00000008
26681 +#define CRYPTO_ALG_TYPE_ACOMPRESS      0x0000000a
26682 +#define CRYPTO_ALG_TYPE_SCOMPRESS      0x0000000b
26683  #define CRYPTO_ALG_TYPE_RNG            0x0000000c
26684  #define CRYPTO_ALG_TYPE_AKCIPHER       0x0000000d
26685  #define CRYPTO_ALG_TYPE_DIGEST         0x0000000e
26686 @@ -60,6 +62,7 @@
26687  #define CRYPTO_ALG_TYPE_HASH_MASK      0x0000000e
26688  #define CRYPTO_ALG_TYPE_AHASH_MASK     0x0000000e
26689  #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
26690 +#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e
26691  
26692  #define CRYPTO_ALG_LARVAL              0x00000010
26693  #define CRYPTO_ALG_DEAD                        0x00000020
26694 --- a/include/uapi/linux/cryptouser.h
26695 +++ b/include/uapi/linux/cryptouser.h
26696 @@ -46,6 +46,7 @@ enum crypto_attr_type_t {
26697         CRYPTOCFGA_REPORT_CIPHER,       /* struct crypto_report_cipher */
26698         CRYPTOCFGA_REPORT_AKCIPHER,     /* struct crypto_report_akcipher */
26699         CRYPTOCFGA_REPORT_KPP,          /* struct crypto_report_kpp */
26700 +       CRYPTOCFGA_REPORT_ACOMP,        /* struct crypto_report_acomp */
26701         __CRYPTOCFGA_MAX
26702  
26703  #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
26704 @@ -112,5 +113,9 @@ struct crypto_report_kpp {
26705         char type[CRYPTO_MAX_NAME];
26706  };
26707  
26708 +struct crypto_report_acomp {
26709 +       char type[CRYPTO_MAX_NAME];
26710 +};
26711 +
26712  #define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
26713                                sizeof(struct crypto_report_blkcipher))
26714 --- a/scripts/spelling.txt
26715 +++ b/scripts/spelling.txt
26716 @@ -305,6 +305,9 @@ defintion||definition
26717  defintions||definitions
26718  defualt||default
26719  defult||default
26720 +deintializing||deinitializing
26721 +deintialize||deinitialize
26722 +deintialized||deinitialized
26723  deivce||device
26724  delared||declared
26725  delare||declare
26726 --- a/sound/soc/amd/acp-pcm-dma.c
26727 +++ b/sound/soc/amd/acp-pcm-dma.c
26728 @@ -506,7 +506,7 @@ static int acp_init(void __iomem *acp_mm
26729         return 0;
26730  }
26731  
26732 -/* Deintialize ACP */
26733 +/* Deinitialize ACP */
26734  static int acp_deinit(void __iomem *acp_mmio)
26735  {
26736         u32 val;