kernel: bump 4.9 to 4.9.82
[oweals/openwrt.git] / target / linux / layerscape / patches-4.9 / 804-crypto-support-layerscape.patch
1 From a3310d64d7cb1ba0f9279e77d21f13a75fa66ab5 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Wed, 17 Jan 2018 15:29:23 +0800
4 Subject: [PATCH 16/30] crypto: support layerscape
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 This is an integrated patch for layerscape sec support.
10
11 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
12 Signed-off-by: Fabio Estevam <festevam@gmail.com>
13 Signed-off-by: Arnd Bergmann <arnd@arndb.de>
14 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
15 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
16 Signed-off-by: Eric Biggers <ebiggers@google.com>
17 Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
18 Signed-off-by: Xulin Sun <xulin.sun@windriver.com>
19 Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
20 Signed-off-by: Marcus Folkesson <marcus.folkesson@gmail.com>
21 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
22 Signed-off-by: Andrew Lutomirski <luto@kernel.org>
23 Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
24 Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
25 Signed-off-by: Marcelo Cerri <marcelo.cerri@canonical.com>
26 Signed-off-by: Arvind Yadav <arvind.yadav.cs@gmail.com>
27 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
28 Signed-off-by: Laura Abbott <labbott@redhat.com>
29 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
30 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
31 ---
32  crypto/Kconfig                                    |   30 +
33  crypto/Makefile                                   |    4 +
34  crypto/acompress.c                                |  169 +
35  crypto/algboss.c                                  |   12 +-
36  crypto/crypto_user.c                              |   19 +
37  crypto/scompress.c                                |  356 ++
38  crypto/tcrypt.c                                   |   17 +-
39  crypto/testmgr.c                                  | 1708 +++---
40  crypto/testmgr.h                                  | 1125 ++--
41  crypto/tls.c                                      |  607 +++
42  drivers/crypto/caam/Kconfig                       |   77 +-
43  drivers/crypto/caam/Makefile                      |   16 +-
44  drivers/crypto/caam/caamalg.c                     | 2171 ++------
45  drivers/crypto/caam/caamalg_desc.c                | 1961 +++++++
46  drivers/crypto/caam/caamalg_desc.h                |  127 +
47  drivers/crypto/caam/caamalg_qi.c                  | 2929 ++++++++++
48  drivers/crypto/caam/caamalg_qi2.c                 | 5920 +++++++++++++++++++++
49  drivers/crypto/caam/caamalg_qi2.h                 |  281 +
50  drivers/crypto/caam/caamhash.c                    |  550 +-
51  drivers/crypto/caam/caamhash_desc.c               |  108 +
52  drivers/crypto/caam/caamhash_desc.h               |   49 +
53  drivers/crypto/caam/caampkc.c                     |  471 +-
54  drivers/crypto/caam/caampkc.h                     |   58 +
55  drivers/crypto/caam/caamrng.c                     |   16 +-
56  drivers/crypto/caam/compat.h                      |    1 +
57  drivers/crypto/caam/ctrl.c                        |  358 +-
58  drivers/crypto/caam/ctrl.h                        |    2 +
59  drivers/crypto/caam/desc.h                        |   84 +-
60  drivers/crypto/caam/desc_constr.h                 |  180 +-
61  drivers/crypto/caam/dpseci.c                      |  859 +++
62  drivers/crypto/caam/dpseci.h                      |  395 ++
63  drivers/crypto/caam/dpseci_cmd.h                  |  261 +
64  drivers/crypto/caam/error.c                       |  127 +-
65  drivers/crypto/caam/error.h                       |   10 +-
66  drivers/crypto/caam/intern.h                      |   31 +-
67  drivers/crypto/caam/jr.c                          |   72 +-
68  drivers/crypto/caam/jr.h                          |    2 +
69  drivers/crypto/caam/key_gen.c                     |   32 +-
70  drivers/crypto/caam/key_gen.h                     |   36 +-
71  drivers/crypto/caam/pdb.h                         |   62 +
72  drivers/crypto/caam/pkc_desc.c                    |   36 +
73  drivers/crypto/caam/qi.c                          |  797 +++
74  drivers/crypto/caam/qi.h                          |  204 +
75  drivers/crypto/caam/regs.h                        |   63 +-
76  drivers/crypto/caam/sg_sw_qm.h                    |  126 +
77  drivers/crypto/caam/sg_sw_qm2.h                   |   81 +
78  drivers/crypto/caam/sg_sw_sec4.h                  |   60 +-
79  drivers/net/wireless/rsi/rsi_91x_usb.c            |    2 +-
80  drivers/staging/wilc1000/linux_wlan.c             |    2 +-
81  drivers/staging/wilc1000/wilc_wfi_cfgoperations.c |    2 +-
82  include/crypto/acompress.h                        |  269 +
83  include/crypto/internal/acompress.h               |   81 +
84  include/crypto/internal/scompress.h               |  136 +
85  include/linux/crypto.h                            |    3 +
86  include/uapi/linux/cryptouser.h                   |    5 +
87  scripts/spelling.txt                              |    3 +
88  sound/soc/amd/acp-pcm-dma.c                       |    2 +-
89  57 files changed, 19177 insertions(+), 3988 deletions(-)
90  create mode 100644 crypto/acompress.c
91  create mode 100644 crypto/scompress.c
92  create mode 100644 crypto/tls.c
93  create mode 100644 drivers/crypto/caam/caamalg_desc.c
94  create mode 100644 drivers/crypto/caam/caamalg_desc.h
95  create mode 100644 drivers/crypto/caam/caamalg_qi.c
96  create mode 100644 drivers/crypto/caam/caamalg_qi2.c
97  create mode 100644 drivers/crypto/caam/caamalg_qi2.h
98  create mode 100644 drivers/crypto/caam/caamhash_desc.c
99  create mode 100644 drivers/crypto/caam/caamhash_desc.h
100  create mode 100644 drivers/crypto/caam/dpseci.c
101  create mode 100644 drivers/crypto/caam/dpseci.h
102  create mode 100644 drivers/crypto/caam/dpseci_cmd.h
103  create mode 100644 drivers/crypto/caam/qi.c
104  create mode 100644 drivers/crypto/caam/qi.h
105  create mode 100644 drivers/crypto/caam/sg_sw_qm.h
106  create mode 100644 drivers/crypto/caam/sg_sw_qm2.h
107  create mode 100644 include/crypto/acompress.h
108  create mode 100644 include/crypto/internal/acompress.h
109  create mode 100644 include/crypto/internal/scompress.h
110
111 --- a/crypto/Kconfig
112 +++ b/crypto/Kconfig
113 @@ -102,6 +102,15 @@ config CRYPTO_KPP
114         select CRYPTO_ALGAPI
115         select CRYPTO_KPP2
116  
117 +config CRYPTO_ACOMP2
118 +       tristate
119 +       select CRYPTO_ALGAPI2
120 +
121 +config CRYPTO_ACOMP
122 +       tristate
123 +       select CRYPTO_ALGAPI
124 +       select CRYPTO_ACOMP2
125 +
126  config CRYPTO_RSA
127         tristate "RSA algorithm"
128         select CRYPTO_AKCIPHER
129 @@ -138,6 +147,7 @@ config CRYPTO_MANAGER2
130         select CRYPTO_BLKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
131         select CRYPTO_AKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
132         select CRYPTO_KPP2 if !CRYPTO_MANAGER_DISABLE_TESTS
133 +       select CRYPTO_ACOMP2 if !CRYPTO_MANAGER_DISABLE_TESTS
134  
135  config CRYPTO_USER
136         tristate "Userspace cryptographic algorithm configuration"
137 @@ -295,6 +305,26 @@ config CRYPTO_ECHAINIV
138           a sequence number xored with a salt.  This is the default
139           algorithm for CBC.
140  
141 +config CRYPTO_TLS
142 +       tristate "TLS support"
143 +       select CRYPTO_AEAD
144 +       select CRYPTO_BLKCIPHER
145 +       select CRYPTO_MANAGER
146 +       select CRYPTO_HASH
147 +       select CRYPTO_NULL
148 +       select CRYPTO_AUTHENC
149 +       help
150 +         Support for TLS 1.0 record encryption and decryption
151 +
152 +         This module adds support for encryption/decryption of TLS 1.0 frames
153 +         using blockcipher algorithms. The name of the resulting algorithm is
154 +         "tls10(hmac(<digest>),cbc(<cipher>))". By default, the generic base
155 +         algorithms are used (e.g. aes-generic, sha1-generic), but hardware
156 +         accelerated versions will be used automatically if available.
157 +
158 +         User-space applications (OpenSSL, GnuTLS) can offload TLS 1.0
159 +         operations through AF_ALG or cryptodev interfaces
160 +
161  comment "Block modes"
162  
163  config CRYPTO_CBC
164 --- a/crypto/Makefile
165 +++ b/crypto/Makefile
166 @@ -51,6 +51,9 @@ rsa_generic-y += rsa_helper.o
167  rsa_generic-y += rsa-pkcs1pad.o
168  obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
169  
170 +obj-$(CONFIG_CRYPTO_ACOMP2) += acompress.o
171 +obj-$(CONFIG_CRYPTO_ACOMP2) += scompress.o
172 +
173  cryptomgr-y := algboss.o testmgr.o
174  
175  obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
176 @@ -115,6 +118,7 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_ge
177  obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
178  obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
179  obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
180 +obj-$(CONFIG_CRYPTO_TLS) += tls.o
181  obj-$(CONFIG_CRYPTO_LZO) += lzo.o
182  obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
183  obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
184 --- /dev/null
185 +++ b/crypto/acompress.c
186 @@ -0,0 +1,169 @@
187 +/*
188 + * Asynchronous Compression operations
189 + *
190 + * Copyright (c) 2016, Intel Corporation
191 + * Authors: Weigang Li <weigang.li@intel.com>
192 + *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
193 + *
194 + * This program is free software; you can redistribute it and/or modify it
195 + * under the terms of the GNU General Public License as published by the Free
196 + * Software Foundation; either version 2 of the License, or (at your option)
197 + * any later version.
198 + *
199 + */
200 +#include <linux/errno.h>
201 +#include <linux/kernel.h>
202 +#include <linux/module.h>
203 +#include <linux/seq_file.h>
204 +#include <linux/slab.h>
205 +#include <linux/string.h>
206 +#include <linux/crypto.h>
207 +#include <crypto/algapi.h>
208 +#include <linux/cryptouser.h>
209 +#include <net/netlink.h>
210 +#include <crypto/internal/acompress.h>
211 +#include <crypto/internal/scompress.h>
212 +#include "internal.h"
213 +
214 +static const struct crypto_type crypto_acomp_type;
215 +
216 +#ifdef CONFIG_NET
217 +static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
218 +{
219 +       struct crypto_report_acomp racomp;
220 +
221 +       strncpy(racomp.type, "acomp", sizeof(racomp.type));
222 +
223 +       if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
224 +                   sizeof(struct crypto_report_acomp), &racomp))
225 +               goto nla_put_failure;
226 +       return 0;
227 +
228 +nla_put_failure:
229 +       return -EMSGSIZE;
230 +}
231 +#else
232 +static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
233 +{
234 +       return -ENOSYS;
235 +}
236 +#endif
237 +
238 +static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
239 +       __attribute__ ((unused));
240 +
241 +static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
242 +{
243 +       seq_puts(m, "type         : acomp\n");
244 +}
245 +
246 +static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
247 +{
248 +       struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
249 +       struct acomp_alg *alg = crypto_acomp_alg(acomp);
250 +
251 +       alg->exit(acomp);
252 +}
253 +
254 +static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
255 +{
256 +       struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
257 +       struct acomp_alg *alg = crypto_acomp_alg(acomp);
258 +
259 +       if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
260 +               return crypto_init_scomp_ops_async(tfm);
261 +
262 +       acomp->compress = alg->compress;
263 +       acomp->decompress = alg->decompress;
264 +       acomp->dst_free = alg->dst_free;
265 +       acomp->reqsize = alg->reqsize;
266 +
267 +       if (alg->exit)
268 +               acomp->base.exit = crypto_acomp_exit_tfm;
269 +
270 +       if (alg->init)
271 +               return alg->init(acomp);
272 +
273 +       return 0;
274 +}
275 +
276 +static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
277 +{
278 +       int extsize = crypto_alg_extsize(alg);
279 +
280 +       if (alg->cra_type != &crypto_acomp_type)
281 +               extsize += sizeof(struct crypto_scomp *);
282 +
283 +       return extsize;
284 +}
285 +
286 +static const struct crypto_type crypto_acomp_type = {
287 +       .extsize = crypto_acomp_extsize,
288 +       .init_tfm = crypto_acomp_init_tfm,
289 +#ifdef CONFIG_PROC_FS
290 +       .show = crypto_acomp_show,
291 +#endif
292 +       .report = crypto_acomp_report,
293 +       .maskclear = ~CRYPTO_ALG_TYPE_MASK,
294 +       .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
295 +       .type = CRYPTO_ALG_TYPE_ACOMPRESS,
296 +       .tfmsize = offsetof(struct crypto_acomp, base),
297 +};
298 +
299 +struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
300 +                                       u32 mask)
301 +{
302 +       return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask);
303 +}
304 +EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
305 +
306 +struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp)
307 +{
308 +       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
309 +       struct acomp_req *req;
310 +
311 +       req = __acomp_request_alloc(acomp);
312 +       if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type))
313 +               return crypto_acomp_scomp_alloc_ctx(req);
314 +
315 +       return req;
316 +}
317 +EXPORT_SYMBOL_GPL(acomp_request_alloc);
318 +
319 +void acomp_request_free(struct acomp_req *req)
320 +{
321 +       struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
322 +       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
323 +
324 +       if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
325 +               crypto_acomp_scomp_free_ctx(req);
326 +
327 +       if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) {
328 +               acomp->dst_free(req->dst);
329 +               req->dst = NULL;
330 +       }
331 +
332 +       __acomp_request_free(req);
333 +}
334 +EXPORT_SYMBOL_GPL(acomp_request_free);
335 +
336 +int crypto_register_acomp(struct acomp_alg *alg)
337 +{
338 +       struct crypto_alg *base = &alg->base;
339 +
340 +       base->cra_type = &crypto_acomp_type;
341 +       base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
342 +       base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
343 +
344 +       return crypto_register_alg(base);
345 +}
346 +EXPORT_SYMBOL_GPL(crypto_register_acomp);
347 +
348 +int crypto_unregister_acomp(struct acomp_alg *alg)
349 +{
350 +       return crypto_unregister_alg(&alg->base);
351 +}
352 +EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
353 +
354 +MODULE_LICENSE("GPL");
355 +MODULE_DESCRIPTION("Asynchronous compression type");
356 --- a/crypto/algboss.c
357 +++ b/crypto/algboss.c
358 @@ -247,17 +247,9 @@ static int cryptomgr_schedule_test(struc
359         memcpy(param->alg, alg->cra_name, sizeof(param->alg));
360         type = alg->cra_flags;
361  
362 -       /* This piece of crap needs to disappear into per-type test hooks. */
363 -#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
364 -       type |= CRYPTO_ALG_TESTED;
365 -#else
366 -       if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
367 -             CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
368 -           ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
369 -            CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
370 -                                        alg->cra_ablkcipher.ivsize))
371 +       /* Do not test internal algorithms. */
372 +       if (type & CRYPTO_ALG_INTERNAL)
373                 type |= CRYPTO_ALG_TESTED;
374 -#endif
375  
376         param->type = type;
377  
378 --- a/crypto/crypto_user.c
379 +++ b/crypto/crypto_user.c
380 @@ -112,6 +112,21 @@ nla_put_failure:
381         return -EMSGSIZE;
382  }
383  
384 +static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
385 +{
386 +       struct crypto_report_acomp racomp;
387 +
388 +       strncpy(racomp.type, "acomp", sizeof(racomp.type));
389 +
390 +       if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
391 +                   sizeof(struct crypto_report_acomp), &racomp))
392 +               goto nla_put_failure;
393 +       return 0;
394 +
395 +nla_put_failure:
396 +       return -EMSGSIZE;
397 +}
398 +
399  static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
400  {
401         struct crypto_report_akcipher rakcipher;
402 @@ -186,7 +201,11 @@ static int crypto_report_one(struct cryp
403                         goto nla_put_failure;
404  
405                 break;
406 +       case CRYPTO_ALG_TYPE_ACOMPRESS:
407 +               if (crypto_report_acomp(skb, alg))
408 +                       goto nla_put_failure;
409  
410 +               break;
411         case CRYPTO_ALG_TYPE_AKCIPHER:
412                 if (crypto_report_akcipher(skb, alg))
413                         goto nla_put_failure;
414 --- /dev/null
415 +++ b/crypto/scompress.c
416 @@ -0,0 +1,356 @@
417 +/*
418 + * Synchronous Compression operations
419 + *
420 + * Copyright 2015 LG Electronics Inc.
421 + * Copyright (c) 2016, Intel Corporation
422 + * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
423 + *
424 + * This program is free software; you can redistribute it and/or modify it
425 + * under the terms of the GNU General Public License as published by the Free
426 + * Software Foundation; either version 2 of the License, or (at your option)
427 + * any later version.
428 + *
429 + */
430 +#include <linux/errno.h>
431 +#include <linux/kernel.h>
432 +#include <linux/module.h>
433 +#include <linux/seq_file.h>
434 +#include <linux/slab.h>
435 +#include <linux/string.h>
436 +#include <linux/crypto.h>
437 +#include <linux/vmalloc.h>
438 +#include <crypto/algapi.h>
439 +#include <linux/cryptouser.h>
440 +#include <net/netlink.h>
441 +#include <linux/scatterlist.h>
442 +#include <crypto/scatterwalk.h>
443 +#include <crypto/internal/acompress.h>
444 +#include <crypto/internal/scompress.h>
445 +#include "internal.h"
446 +
447 +static const struct crypto_type crypto_scomp_type;
448 +static void * __percpu *scomp_src_scratches;
449 +static void * __percpu *scomp_dst_scratches;
450 +static int scomp_scratch_users;
451 +static DEFINE_MUTEX(scomp_lock);
452 +
453 +#ifdef CONFIG_NET
454 +static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
455 +{
456 +       struct crypto_report_comp rscomp;
457 +
458 +       strncpy(rscomp.type, "scomp", sizeof(rscomp.type));
459 +
460 +       if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
461 +                   sizeof(struct crypto_report_comp), &rscomp))
462 +               goto nla_put_failure;
463 +       return 0;
464 +
465 +nla_put_failure:
466 +       return -EMSGSIZE;
467 +}
468 +#else
469 +static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
470 +{
471 +       return -ENOSYS;
472 +}
473 +#endif
474 +
475 +static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
476 +       __attribute__ ((unused));
477 +
478 +static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
479 +{
480 +       seq_puts(m, "type         : scomp\n");
481 +}
482 +
483 +static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
484 +{
485 +       return 0;
486 +}
487 +
488 +static void crypto_scomp_free_scratches(void * __percpu *scratches)
489 +{
490 +       int i;
491 +
492 +       if (!scratches)
493 +               return;
494 +
495 +       for_each_possible_cpu(i)
496 +               vfree(*per_cpu_ptr(scratches, i));
497 +
498 +       free_percpu(scratches);
499 +}
500 +
501 +static void * __percpu *crypto_scomp_alloc_scratches(void)
502 +{
503 +       void * __percpu *scratches;
504 +       int i;
505 +
506 +       scratches = alloc_percpu(void *);
507 +       if (!scratches)
508 +               return NULL;
509 +
510 +       for_each_possible_cpu(i) {
511 +               void *scratch;
512 +
513 +               scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
514 +               if (!scratch)
515 +                       goto error;
516 +               *per_cpu_ptr(scratches, i) = scratch;
517 +       }
518 +
519 +       return scratches;
520 +
521 +error:
522 +       crypto_scomp_free_scratches(scratches);
523 +       return NULL;
524 +}
525 +
526 +static void crypto_scomp_free_all_scratches(void)
527 +{
528 +       if (!--scomp_scratch_users) {
529 +               crypto_scomp_free_scratches(scomp_src_scratches);
530 +               crypto_scomp_free_scratches(scomp_dst_scratches);
531 +               scomp_src_scratches = NULL;
532 +               scomp_dst_scratches = NULL;
533 +       }
534 +}
535 +
536 +static int crypto_scomp_alloc_all_scratches(void)
537 +{
538 +       if (!scomp_scratch_users++) {
539 +               scomp_src_scratches = crypto_scomp_alloc_scratches();
540 +               if (!scomp_src_scratches)
541 +                       return -ENOMEM;
542 +               scomp_dst_scratches = crypto_scomp_alloc_scratches();
543 +               if (!scomp_dst_scratches)
544 +                       return -ENOMEM;
545 +       }
546 +       return 0;
547 +}
548 +
549 +static void crypto_scomp_sg_free(struct scatterlist *sgl)
550 +{
551 +       int i, n;
552 +       struct page *page;
553 +
554 +       if (!sgl)
555 +               return;
556 +
557 +       n = sg_nents(sgl);
558 +       for_each_sg(sgl, sgl, n, i) {
559 +               page = sg_page(sgl);
560 +               if (page)
561 +                       __free_page(page);
562 +       }
563 +
564 +       kfree(sgl);
565 +}
566 +
567 +static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp)
568 +{
569 +       struct scatterlist *sgl;
570 +       struct page *page;
571 +       int i, n;
572 +
573 +       n = ((size - 1) >> PAGE_SHIFT) + 1;
574 +
575 +       sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp);
576 +       if (!sgl)
577 +               return NULL;
578 +
579 +       sg_init_table(sgl, n);
580 +
581 +       for (i = 0; i < n; i++) {
582 +               page = alloc_page(gfp);
583 +               if (!page)
584 +                       goto err;
585 +               sg_set_page(sgl + i, page, PAGE_SIZE, 0);
586 +       }
587 +
588 +       return sgl;
589 +
590 +err:
591 +       sg_mark_end(sgl + i);
592 +       crypto_scomp_sg_free(sgl);
593 +       return NULL;
594 +}
595 +
596 +static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
597 +{
598 +       struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
599 +       void **tfm_ctx = acomp_tfm_ctx(tfm);
600 +       struct crypto_scomp *scomp = *tfm_ctx;
601 +       void **ctx = acomp_request_ctx(req);
602 +       const int cpu = get_cpu();
603 +       u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
604 +       u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
605 +       int ret;
606 +
607 +       if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
608 +               ret = -EINVAL;
609 +               goto out;
610 +       }
611 +
612 +       if (req->dst && !req->dlen) {
613 +               ret = -EINVAL;
614 +               goto out;
615 +       }
616 +
617 +       if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
618 +               req->dlen = SCOMP_SCRATCH_SIZE;
619 +
620 +       scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
621 +       if (dir)
622 +               ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
623 +                                           scratch_dst, &req->dlen, *ctx);
624 +       else
625 +               ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
626 +                                             scratch_dst, &req->dlen, *ctx);
627 +       if (!ret) {
628 +               if (!req->dst) {
629 +                       req->dst = crypto_scomp_sg_alloc(req->dlen,
630 +                                  req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
631 +                                  GFP_KERNEL : GFP_ATOMIC);
632 +                       if (!req->dst)
633 +                               goto out;
634 +               }
635 +               scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
636 +                                        1);
637 +       }
638 +out:
639 +       put_cpu();
640 +       return ret;
641 +}
642 +
643 +static int scomp_acomp_compress(struct acomp_req *req)
644 +{
645 +       return scomp_acomp_comp_decomp(req, 1);
646 +}
647 +
648 +static int scomp_acomp_decompress(struct acomp_req *req)
649 +{
650 +       return scomp_acomp_comp_decomp(req, 0);
651 +}
652 +
653 +static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
654 +{
655 +       struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
656 +
657 +       crypto_free_scomp(*ctx);
658 +}
659 +
660 +int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
661 +{
662 +       struct crypto_alg *calg = tfm->__crt_alg;
663 +       struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
664 +       struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
665 +       struct crypto_scomp *scomp;
666 +
667 +       if (!crypto_mod_get(calg))
668 +               return -EAGAIN;
669 +
670 +       scomp = crypto_create_tfm(calg, &crypto_scomp_type);
671 +       if (IS_ERR(scomp)) {
672 +               crypto_mod_put(calg);
673 +               return PTR_ERR(scomp);
674 +       }
675 +
676 +       *ctx = scomp;
677 +       tfm->exit = crypto_exit_scomp_ops_async;
678 +
679 +       crt->compress = scomp_acomp_compress;
680 +       crt->decompress = scomp_acomp_decompress;
681 +       crt->dst_free = crypto_scomp_sg_free;
682 +       crt->reqsize = sizeof(void *);
683 +
684 +       return 0;
685 +}
686 +
687 +struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
688 +{
689 +       struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
690 +       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
691 +       struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
692 +       struct crypto_scomp *scomp = *tfm_ctx;
693 +       void *ctx;
694 +
695 +       ctx = crypto_scomp_alloc_ctx(scomp);
696 +       if (IS_ERR(ctx)) {
697 +               kfree(req);
698 +               return NULL;
699 +       }
700 +
701 +       *req->__ctx = ctx;
702 +
703 +       return req;
704 +}
705 +
706 +void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
707 +{
708 +       struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
709 +       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
710 +       struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
711 +       struct crypto_scomp *scomp = *tfm_ctx;
712 +       void *ctx = *req->__ctx;
713 +
714 +       if (ctx)
715 +               crypto_scomp_free_ctx(scomp, ctx);
716 +}
717 +
718 +static const struct crypto_type crypto_scomp_type = {
719 +       .extsize = crypto_alg_extsize,
720 +       .init_tfm = crypto_scomp_init_tfm,
721 +#ifdef CONFIG_PROC_FS
722 +       .show = crypto_scomp_show,
723 +#endif
724 +       .report = crypto_scomp_report,
725 +       .maskclear = ~CRYPTO_ALG_TYPE_MASK,
726 +       .maskset = CRYPTO_ALG_TYPE_MASK,
727 +       .type = CRYPTO_ALG_TYPE_SCOMPRESS,
728 +       .tfmsize = offsetof(struct crypto_scomp, base),
729 +};
730 +
731 +int crypto_register_scomp(struct scomp_alg *alg)
732 +{
733 +       struct crypto_alg *base = &alg->base;
734 +       int ret = -ENOMEM;
735 +
736 +       mutex_lock(&scomp_lock);
737 +       if (crypto_scomp_alloc_all_scratches())
738 +               goto error;
739 +
740 +       base->cra_type = &crypto_scomp_type;
741 +       base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
742 +       base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
743 +
744 +       ret = crypto_register_alg(base);
745 +       if (ret)
746 +               goto error;
747 +
748 +       mutex_unlock(&scomp_lock);
749 +       return ret;
750 +
751 +error:
752 +       crypto_scomp_free_all_scratches();
753 +       mutex_unlock(&scomp_lock);
754 +       return ret;
755 +}
756 +EXPORT_SYMBOL_GPL(crypto_register_scomp);
757 +
758 +int crypto_unregister_scomp(struct scomp_alg *alg)
759 +{
760 +       int ret;
761 +
762 +       mutex_lock(&scomp_lock);
763 +       ret = crypto_unregister_alg(&alg->base);
764 +       crypto_scomp_free_all_scratches();
765 +       mutex_unlock(&scomp_lock);
766 +
767 +       return ret;
768 +}
769 +EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
770 +
771 +MODULE_LICENSE("GPL");
772 +MODULE_DESCRIPTION("Synchronous compression type");
773 --- a/crypto/tcrypt.c
774 +++ b/crypto/tcrypt.c
775 @@ -74,7 +74,7 @@ static char *check[] = {
776         "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta",  "fcrypt",
777         "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
778         "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
779 -       NULL
780 +       "rsa", NULL
781  };
782  
783  struct tcrypt_result {
784 @@ -1333,6 +1333,10 @@ static int do_test(const char *alg, u32
785                 ret += tcrypt_test("hmac(sha3-512)");
786                 break;
787  
788 +       case 115:
789 +               ret += tcrypt_test("rsa");
790 +               break;
791 +
792         case 150:
793                 ret += tcrypt_test("ansi_cprng");
794                 break;
795 @@ -1394,6 +1398,9 @@ static int do_test(const char *alg, u32
796         case 190:
797                 ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
798                 break;
799 +       case 191:
800 +               ret += tcrypt_test("tls10(hmac(sha1),cbc(aes))");
801 +               break;
802         case 200:
803                 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
804                                 speed_template_16_24_32);
805 @@ -1408,9 +1415,9 @@ static int do_test(const char *alg, u32
806                 test_cipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
807                                 speed_template_32_40_48);
808                 test_cipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
809 -                               speed_template_32_48_64);
810 +                               speed_template_32_64);
811                 test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
812 -                               speed_template_32_48_64);
813 +                               speed_template_32_64);
814                 test_cipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
815                                 speed_template_16_24_32);
816                 test_cipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
817 @@ -1841,9 +1848,9 @@ static int do_test(const char *alg, u32
818                 test_acipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
819                                    speed_template_32_40_48);
820                 test_acipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
821 -                                  speed_template_32_48_64);
822 +                                  speed_template_32_64);
823                 test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
824 -                                  speed_template_32_48_64);
825 +                                  speed_template_32_64);
826                 test_acipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
827                                    speed_template_16_24_32);
828                 test_acipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
829 --- a/crypto/testmgr.c
830 +++ b/crypto/testmgr.c
831 @@ -33,6 +33,7 @@
832  #include <crypto/drbg.h>
833  #include <crypto/akcipher.h>
834  #include <crypto/kpp.h>
835 +#include <crypto/acompress.h>
836  
837  #include "internal.h"
838  
839 @@ -62,7 +63,7 @@ int alg_test(const char *driver, const c
840   */
841  #define IDX1           32
842  #define IDX2           32400
843 -#define IDX3           1
844 +#define IDX3           1511
845  #define IDX4           8193
846  #define IDX5           22222
847  #define IDX6           17101
848 @@ -82,47 +83,54 @@ struct tcrypt_result {
849  
850  struct aead_test_suite {
851         struct {
852 -               struct aead_testvec *vecs;
853 +               const struct aead_testvec *vecs;
854                 unsigned int count;
855         } enc, dec;
856  };
857  
858  struct cipher_test_suite {
859         struct {
860 -               struct cipher_testvec *vecs;
861 +               const struct cipher_testvec *vecs;
862                 unsigned int count;
863         } enc, dec;
864  };
865  
866  struct comp_test_suite {
867         struct {
868 -               struct comp_testvec *vecs;
869 +               const struct comp_testvec *vecs;
870                 unsigned int count;
871         } comp, decomp;
872  };
873  
874  struct hash_test_suite {
875 -       struct hash_testvec *vecs;
876 +       const struct hash_testvec *vecs;
877         unsigned int count;
878  };
879  
880  struct cprng_test_suite {
881 -       struct cprng_testvec *vecs;
882 +       const struct cprng_testvec *vecs;
883         unsigned int count;
884  };
885  
886  struct drbg_test_suite {
887 -       struct drbg_testvec *vecs;
888 +       const struct drbg_testvec *vecs;
889         unsigned int count;
890  };
891  
892 +struct tls_test_suite {
893 +       struct {
894 +               struct tls_testvec *vecs;
895 +               unsigned int count;
896 +       } enc, dec;
897 +};
898 +
899  struct akcipher_test_suite {
900 -       struct akcipher_testvec *vecs;
901 +       const struct akcipher_testvec *vecs;
902         unsigned int count;
903  };
904  
905  struct kpp_test_suite {
906 -       struct kpp_testvec *vecs;
907 +       const struct kpp_testvec *vecs;
908         unsigned int count;
909  };
910  
911 @@ -139,12 +147,14 @@ struct alg_test_desc {
912                 struct hash_test_suite hash;
913                 struct cprng_test_suite cprng;
914                 struct drbg_test_suite drbg;
915 +               struct tls_test_suite tls;
916                 struct akcipher_test_suite akcipher;
917                 struct kpp_test_suite kpp;
918         } suite;
919  };
920  
921 -static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
922 +static const unsigned int IDX[8] = {
923 +       IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
924  
925  static void hexdump(unsigned char *buf, unsigned int len)
926  {
927 @@ -202,7 +212,7 @@ static int wait_async_op(struct tcrypt_r
928  }
929  
930  static int ahash_partial_update(struct ahash_request **preq,
931 -       struct crypto_ahash *tfm, struct hash_testvec *template,
932 +       struct crypto_ahash *tfm, const struct hash_testvec *template,
933         void *hash_buff, int k, int temp, struct scatterlist *sg,
934         const char *algo, char *result, struct tcrypt_result *tresult)
935  {
936 @@ -259,11 +269,12 @@ out_nostate:
937         return ret;
938  }
939  
940 -static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
941 -                      unsigned int tcount, bool use_digest,
942 -                      const int align_offset)
943 +static int __test_hash(struct crypto_ahash *tfm,
944 +                      const struct hash_testvec *template, unsigned int tcount,
945 +                      bool use_digest, const int align_offset)
946  {
947         const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
948 +       size_t digest_size = crypto_ahash_digestsize(tfm);
949         unsigned int i, j, k, temp;
950         struct scatterlist sg[8];
951         char *result;
952 @@ -274,7 +285,7 @@ static int __test_hash(struct crypto_aha
953         char *xbuf[XBUFSIZE];
954         int ret = -ENOMEM;
955  
956 -       result = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL);
957 +       result = kmalloc(digest_size, GFP_KERNEL);
958         if (!result)
959                 return ret;
960         key = kmalloc(MAX_KEYLEN, GFP_KERNEL);
961 @@ -304,7 +315,7 @@ static int __test_hash(struct crypto_aha
962                         goto out;
963  
964                 j++;
965 -               memset(result, 0, MAX_DIGEST_SIZE);
966 +               memset(result, 0, digest_size);
967  
968                 hash_buff = xbuf[0];
969                 hash_buff += align_offset;
970 @@ -379,7 +390,7 @@ static int __test_hash(struct crypto_aha
971                         continue;
972  
973                 j++;
974 -               memset(result, 0, MAX_DIGEST_SIZE);
975 +               memset(result, 0, digest_size);
976  
977                 temp = 0;
978                 sg_init_table(sg, template[i].np);
979 @@ -457,7 +468,7 @@ static int __test_hash(struct crypto_aha
980                         continue;
981  
982                 j++;
983 -               memset(result, 0, MAX_DIGEST_SIZE);
984 +               memset(result, 0, digest_size);
985  
986                 ret = -EINVAL;
987                 hash_buff = xbuf[0];
988 @@ -536,7 +547,8 @@ out_nobuf:
989         return ret;
990  }
991  
992 -static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
993 +static int test_hash(struct crypto_ahash *tfm,
994 +                    const struct hash_testvec *template,
995                      unsigned int tcount, bool use_digest)
996  {
997         unsigned int alignmask;
998 @@ -564,7 +576,7 @@ static int test_hash(struct crypto_ahash
999  }
1000  
1001  static int __test_aead(struct crypto_aead *tfm, int enc,
1002 -                      struct aead_testvec *template, unsigned int tcount,
1003 +                      const struct aead_testvec *template, unsigned int tcount,
1004                        const bool diff_dst, const int align_offset)
1005  {
1006         const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
1007 @@ -955,7 +967,7 @@ out_noxbuf:
1008  }
1009  
1010  static int test_aead(struct crypto_aead *tfm, int enc,
1011 -                    struct aead_testvec *template, unsigned int tcount)
1012 +                    const struct aead_testvec *template, unsigned int tcount)
1013  {
1014         unsigned int alignmask;
1015         int ret;
1016 @@ -987,8 +999,236 @@ static int test_aead(struct crypto_aead
1017         return 0;
1018  }
1019  
1020 +static int __test_tls(struct crypto_aead *tfm, int enc,
1021 +                     struct tls_testvec *template, unsigned int tcount,
1022 +                     const bool diff_dst)
1023 +{
1024 +       const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
1025 +       unsigned int i, k, authsize;
1026 +       char *q;
1027 +       struct aead_request *req;
1028 +       struct scatterlist *sg;
1029 +       struct scatterlist *sgout;
1030 +       const char *e, *d;
1031 +       struct tcrypt_result result;
1032 +       void *input;
1033 +       void *output;
1034 +       void *assoc;
1035 +       char *iv;
1036 +       char *key;
1037 +       char *xbuf[XBUFSIZE];
1038 +       char *xoutbuf[XBUFSIZE];
1039 +       char *axbuf[XBUFSIZE];
1040 +       int ret = -ENOMEM;
1041 +
1042 +       if (testmgr_alloc_buf(xbuf))
1043 +               goto out_noxbuf;
1044 +
1045 +       if (diff_dst && testmgr_alloc_buf(xoutbuf))
1046 +               goto out_nooutbuf;
1047 +
1048 +       if (testmgr_alloc_buf(axbuf))
1049 +               goto out_noaxbuf;
1050 +
1051 +       iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
1052 +       if (!iv)
1053 +               goto out_noiv;
1054 +
1055 +       key = kzalloc(MAX_KEYLEN, GFP_KERNEL);
1056 +       if (!key)
1057 +               goto out_nokey;
1058 +
1059 +       sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 2 : 1), GFP_KERNEL);
1060 +       if (!sg)
1061 +               goto out_nosg;
1062 +
1063 +       sgout = sg + 8;
1064 +
1065 +       d = diff_dst ? "-ddst" : "";
1066 +       e = enc ? "encryption" : "decryption";
1067 +
1068 +       init_completion(&result.completion);
1069 +
1070 +       req = aead_request_alloc(tfm, GFP_KERNEL);
1071 +       if (!req) {
1072 +               pr_err("alg: tls%s: Failed to allocate request for %s\n",
1073 +                      d, algo);
1074 +               goto out;
1075 +       }
1076 +
1077 +       aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1078 +                                 tcrypt_complete, &result);
1079 +
1080 +       for (i = 0; i < tcount; i++) {
1081 +               input = xbuf[0];
1082 +               assoc = axbuf[0];
1083 +
1084 +               ret = -EINVAL;
1085 +               if (WARN_ON(template[i].ilen > PAGE_SIZE ||
1086 +                           template[i].alen > PAGE_SIZE))
1087 +                       goto out;
1088 +
1089 +               memcpy(assoc, template[i].assoc, template[i].alen);
1090 +               memcpy(input, template[i].input, template[i].ilen);
1091 +
1092 +               if (template[i].iv)
1093 +                       memcpy(iv, template[i].iv, MAX_IVLEN);
1094 +               else
1095 +                       memset(iv, 0, MAX_IVLEN);
1096 +
1097 +               crypto_aead_clear_flags(tfm, ~0);
1098 +
1099 +               if (template[i].klen > MAX_KEYLEN) {
1100 +                       pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
1101 +                              d, i, algo, template[i].klen, MAX_KEYLEN);
1102 +                       ret = -EINVAL;
1103 +                       goto out;
1104 +               }
1105 +               memcpy(key, template[i].key, template[i].klen);
1106 +
1107 +               ret = crypto_aead_setkey(tfm, key, template[i].klen);
1108 +               if (!ret == template[i].fail) {
1109 +                       pr_err("alg: tls%s: setkey failed on test %d for %s: flags=%x\n",
1110 +                              d, i, algo, crypto_aead_get_flags(tfm));
1111 +                       goto out;
1112 +               } else if (ret)
1113 +                       continue;
1114 +
1115 +               authsize = 20;
1116 +               ret = crypto_aead_setauthsize(tfm, authsize);
1117 +               if (ret) {
1118 +                       pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n",
1119 +                              d, authsize, i, algo);
1120 +                       goto out;
1121 +               }
1122 +
1123 +               k = !!template[i].alen;
1124 +               sg_init_table(sg, k + 1);
1125 +               sg_set_buf(&sg[0], assoc, template[i].alen);
1126 +               sg_set_buf(&sg[k], input, (enc ? template[i].rlen :
1127 +                                          template[i].ilen));
1128 +               output = input;
1129 +
1130 +               if (diff_dst) {
1131 +                       sg_init_table(sgout, k + 1);
1132 +                       sg_set_buf(&sgout[0], assoc, template[i].alen);
1133 +
1134 +                       output = xoutbuf[0];
1135 +                       sg_set_buf(&sgout[k], output,
1136 +                                  (enc ? template[i].rlen : template[i].ilen));
1137 +               }
1138 +
1139 +               aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
1140 +                                      template[i].ilen, iv);
1141 +
1142 +               aead_request_set_ad(req, template[i].alen);
1143 +
1144 +               ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
1145 +
1146 +               switch (ret) {
1147 +               case 0:
1148 +                       if (template[i].novrfy) {
1149 +                               /* verification was supposed to fail */
1150 +                               pr_err("alg: tls%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
1151 +                                      d, e, i, algo);
1152 +                               /* so really, we got a bad message */
1153 +                               ret = -EBADMSG;
1154 +                               goto out;
1155 +                       }
1156 +                       break;
1157 +               case -EINPROGRESS:
1158 +               case -EBUSY:
1159 +                       wait_for_completion(&result.completion);
1160 +                       reinit_completion(&result.completion);
1161 +                       ret = result.err;
1162 +                       if (!ret)
1163 +                               break;
1164 +               case -EBADMSG:
1165 +                       /* verification failure was expected */
1166 +                       if (template[i].novrfy)
1167 +                               continue;
1168 +                       /* fall through */
1169 +               default:
1170 +                       pr_err("alg: tls%s: %s failed on test %d for %s: ret=%d\n",
1171 +                              d, e, i, algo, -ret);
1172 +                       goto out;
1173 +               }
1174 +
1175 +               q = output;
1176 +               if (memcmp(q, template[i].result, template[i].rlen)) {
1177 +                       pr_err("alg: tls%s: Test %d failed on %s for %s\n",
1178 +                              d, i, e, algo);
1179 +                       hexdump(q, template[i].rlen);
1180 +                       pr_err("should be:\n");
1181 +                       hexdump(template[i].result, template[i].rlen);
1182 +                       ret = -EINVAL;
1183 +                       goto out;
1184 +               }
1185 +       }
1186 +
1187 +out:
1188 +       aead_request_free(req);
1189 +
1190 +       kfree(sg);
1191 +out_nosg:
1192 +       kfree(key);
1193 +out_nokey:
1194 +       kfree(iv);
1195 +out_noiv:
1196 +       testmgr_free_buf(axbuf);
1197 +out_noaxbuf:
1198 +       if (diff_dst)
1199 +               testmgr_free_buf(xoutbuf);
1200 +out_nooutbuf:
1201 +       testmgr_free_buf(xbuf);
1202 +out_noxbuf:
1203 +       return ret;
1204 +}
1205 +
1206 +static int test_tls(struct crypto_aead *tfm, int enc,
1207 +                   struct tls_testvec *template, unsigned int tcount)
1208 +{
1209 +       int ret;
1210 +       /* test 'dst == src' case */
1211 +       ret = __test_tls(tfm, enc, template, tcount, false);
1212 +       if (ret)
1213 +               return ret;
1214 +       /* test 'dst != src' case */
1215 +       return __test_tls(tfm, enc, template, tcount, true);
1216 +}
1217 +
1218 +static int alg_test_tls(const struct alg_test_desc *desc, const char *driver,
1219 +                       u32 type, u32 mask)
1220 +{
1221 +       struct crypto_aead *tfm;
1222 +       int err = 0;
1223 +
1224 +       tfm = crypto_alloc_aead(driver, type, mask);
1225 +       if (IS_ERR(tfm)) {
1226 +               pr_err("alg: aead: Failed to load transform for %s: %ld\n",
1227 +                      driver, PTR_ERR(tfm));
1228 +               return PTR_ERR(tfm);
1229 +       }
1230 +
1231 +       if (desc->suite.tls.enc.vecs) {
1232 +               err = test_tls(tfm, ENCRYPT, desc->suite.tls.enc.vecs,
1233 +                              desc->suite.tls.enc.count);
1234 +               if (err)
1235 +                       goto out;
1236 +       }
1237 +
1238 +       if (!err && desc->suite.tls.dec.vecs)
1239 +               err = test_tls(tfm, DECRYPT, desc->suite.tls.dec.vecs,
1240 +                              desc->suite.tls.dec.count);
1241 +
1242 +out:
1243 +       crypto_free_aead(tfm);
1244 +       return err;
1245 +}
1246 +
1247  static int test_cipher(struct crypto_cipher *tfm, int enc,
1248 -                      struct cipher_testvec *template, unsigned int tcount)
1249 +                      const struct cipher_testvec *template,
1250 +                      unsigned int tcount)
1251  {
1252         const char *algo = crypto_tfm_alg_driver_name(crypto_cipher_tfm(tfm));
1253         unsigned int i, j, k;
1254 @@ -1066,7 +1306,8 @@ out_nobuf:
1255  }
1256  
1257  static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
1258 -                          struct cipher_testvec *template, unsigned int tcount,
1259 +                          const struct cipher_testvec *template,
1260 +                          unsigned int tcount,
1261                            const bool diff_dst, const int align_offset)
1262  {
1263         const char *algo =
1264 @@ -1079,12 +1320,16 @@ static int __test_skcipher(struct crypto
1265         const char *e, *d;
1266         struct tcrypt_result result;
1267         void *data;
1268 -       char iv[MAX_IVLEN];
1269 +       char *iv;
1270         char *xbuf[XBUFSIZE];
1271         char *xoutbuf[XBUFSIZE];
1272         int ret = -ENOMEM;
1273         unsigned int ivsize = crypto_skcipher_ivsize(tfm);
1274  
1275 +       iv = kmalloc(MAX_IVLEN, GFP_KERNEL);
1276 +       if (!iv)
1277 +               return ret;
1278 +
1279         if (testmgr_alloc_buf(xbuf))
1280                 goto out_nobuf;
1281  
1282 @@ -1325,12 +1570,14 @@ out:
1283                 testmgr_free_buf(xoutbuf);
1284  out_nooutbuf:
1285         testmgr_free_buf(xbuf);
1286 +       kfree(iv);
1287  out_nobuf:
1288         return ret;
1289  }
1290  
1291  static int test_skcipher(struct crypto_skcipher *tfm, int enc,
1292 -                        struct cipher_testvec *template, unsigned int tcount)
1293 +                        const struct cipher_testvec *template,
1294 +                        unsigned int tcount)
1295  {
1296         unsigned int alignmask;
1297         int ret;
1298 @@ -1362,8 +1609,10 @@ static int test_skcipher(struct crypto_s
1299         return 0;
1300  }
1301  
1302 -static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate,
1303 -                    struct comp_testvec *dtemplate, int ctcount, int dtcount)
1304 +static int test_comp(struct crypto_comp *tfm,
1305 +                    const struct comp_testvec *ctemplate,
1306 +                    const struct comp_testvec *dtemplate,
1307 +                    int ctcount, int dtcount)
1308  {
1309         const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm));
1310         unsigned int i;
1311 @@ -1442,7 +1691,154 @@ out:
1312         return ret;
1313  }
1314  
1315 -static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
1316 +static int test_acomp(struct crypto_acomp *tfm,
1317 +                     const struct comp_testvec *ctemplate,
1318 +                     const struct comp_testvec *dtemplate,
1319 +                     int ctcount, int dtcount)
1320 +{
1321 +       const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
1322 +       unsigned int i;
1323 +       char *output;
1324 +       int ret;
1325 +       struct scatterlist src, dst;
1326 +       struct acomp_req *req;
1327 +       struct tcrypt_result result;
1328 +
1329 +       output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
1330 +       if (!output)
1331 +               return -ENOMEM;
1332 +
1333 +       for (i = 0; i < ctcount; i++) {
1334 +               unsigned int dlen = COMP_BUF_SIZE;
1335 +               int ilen = ctemplate[i].inlen;
1336 +               void *input_vec;
1337 +
1338 +               input_vec = kmemdup(ctemplate[i].input, ilen, GFP_KERNEL);
1339 +               if (!input_vec) {
1340 +                       ret = -ENOMEM;
1341 +                       goto out;
1342 +               }
1343 +
1344 +               memset(output, 0, dlen);
1345 +               init_completion(&result.completion);
1346 +               sg_init_one(&src, input_vec, ilen);
1347 +               sg_init_one(&dst, output, dlen);
1348 +
1349 +               req = acomp_request_alloc(tfm);
1350 +               if (!req) {
1351 +                       pr_err("alg: acomp: request alloc failed for %s\n",
1352 +                              algo);
1353 +                       kfree(input_vec);
1354 +                       ret = -ENOMEM;
1355 +                       goto out;
1356 +               }
1357 +
1358 +               acomp_request_set_params(req, &src, &dst, ilen, dlen);
1359 +               acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1360 +                                          tcrypt_complete, &result);
1361 +
1362 +               ret = wait_async_op(&result, crypto_acomp_compress(req));
1363 +               if (ret) {
1364 +                       pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
1365 +                              i + 1, algo, -ret);
1366 +                       kfree(input_vec);
1367 +                       acomp_request_free(req);
1368 +                       goto out;
1369 +               }
1370 +
1371 +               if (req->dlen != ctemplate[i].outlen) {
1372 +                       pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
1373 +                              i + 1, algo, req->dlen);
1374 +                       ret = -EINVAL;
1375 +                       kfree(input_vec);
1376 +                       acomp_request_free(req);
1377 +                       goto out;
1378 +               }
1379 +
1380 +               if (memcmp(output, ctemplate[i].output, req->dlen)) {
1381 +                       pr_err("alg: acomp: Compression test %d failed for %s\n",
1382 +                              i + 1, algo);
1383 +                       hexdump(output, req->dlen);
1384 +                       ret = -EINVAL;
1385 +                       kfree(input_vec);
1386 +                       acomp_request_free(req);
1387 +                       goto out;
1388 +               }
1389 +
1390 +               kfree(input_vec);
1391 +               acomp_request_free(req);
1392 +       }
1393 +
1394 +       for (i = 0; i < dtcount; i++) {
1395 +               unsigned int dlen = COMP_BUF_SIZE;
1396 +               int ilen = dtemplate[i].inlen;
1397 +               void *input_vec;
1398 +
1399 +               input_vec = kmemdup(dtemplate[i].input, ilen, GFP_KERNEL);
1400 +               if (!input_vec) {
1401 +                       ret = -ENOMEM;
1402 +                       goto out;
1403 +               }
1404 +
1405 +               memset(output, 0, dlen);
1406 +               init_completion(&result.completion);
1407 +               sg_init_one(&src, input_vec, ilen);
1408 +               sg_init_one(&dst, output, dlen);
1409 +
1410 +               req = acomp_request_alloc(tfm);
1411 +               if (!req) {
1412 +                       pr_err("alg: acomp: request alloc failed for %s\n",
1413 +                              algo);
1414 +                       kfree(input_vec);
1415 +                       ret = -ENOMEM;
1416 +                       goto out;
1417 +               }
1418 +
1419 +               acomp_request_set_params(req, &src, &dst, ilen, dlen);
1420 +               acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1421 +                                          tcrypt_complete, &result);
1422 +
1423 +               ret = wait_async_op(&result, crypto_acomp_decompress(req));
1424 +               if (ret) {
1425 +                       pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
1426 +                              i + 1, algo, -ret);
1427 +                       kfree(input_vec);
1428 +                       acomp_request_free(req);
1429 +                       goto out;
1430 +               }
1431 +
1432 +               if (req->dlen != dtemplate[i].outlen) {
1433 +                       pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n",
1434 +                              i + 1, algo, req->dlen);
1435 +                       ret = -EINVAL;
1436 +                       kfree(input_vec);
1437 +                       acomp_request_free(req);
1438 +                       goto out;
1439 +               }
1440 +
1441 +               if (memcmp(output, dtemplate[i].output, req->dlen)) {
1442 +                       pr_err("alg: acomp: Decompression test %d failed for %s\n",
1443 +                              i + 1, algo);
1444 +                       hexdump(output, req->dlen);
1445 +                       ret = -EINVAL;
1446 +                       kfree(input_vec);
1447 +                       acomp_request_free(req);
1448 +                       goto out;
1449 +               }
1450 +
1451 +               kfree(input_vec);
1452 +               acomp_request_free(req);
1453 +       }
1454 +
1455 +       ret = 0;
1456 +
1457 +out:
1458 +       kfree(output);
1459 +       return ret;
1460 +}
1461 +
1462 +static int test_cprng(struct crypto_rng *tfm,
1463 +                     const struct cprng_testvec *template,
1464                       unsigned int tcount)
1465  {
1466         const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
1467 @@ -1509,7 +1905,7 @@ static int alg_test_aead(const struct al
1468         struct crypto_aead *tfm;
1469         int err = 0;
1470  
1471 -       tfm = crypto_alloc_aead(driver, type | CRYPTO_ALG_INTERNAL, mask);
1472 +       tfm = crypto_alloc_aead(driver, type, mask);
1473         if (IS_ERR(tfm)) {
1474                 printk(KERN_ERR "alg: aead: Failed to load transform for %s: "
1475                        "%ld\n", driver, PTR_ERR(tfm));
1476 @@ -1538,7 +1934,7 @@ static int alg_test_cipher(const struct
1477         struct crypto_cipher *tfm;
1478         int err = 0;
1479  
1480 -       tfm = crypto_alloc_cipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1481 +       tfm = crypto_alloc_cipher(driver, type, mask);
1482         if (IS_ERR(tfm)) {
1483                 printk(KERN_ERR "alg: cipher: Failed to load transform for "
1484                        "%s: %ld\n", driver, PTR_ERR(tfm));
1485 @@ -1567,7 +1963,7 @@ static int alg_test_skcipher(const struc
1486         struct crypto_skcipher *tfm;
1487         int err = 0;
1488  
1489 -       tfm = crypto_alloc_skcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1490 +       tfm = crypto_alloc_skcipher(driver, type, mask);
1491         if (IS_ERR(tfm)) {
1492                 printk(KERN_ERR "alg: skcipher: Failed to load transform for "
1493                        "%s: %ld\n", driver, PTR_ERR(tfm));
1494 @@ -1593,22 +1989,38 @@ out:
1495  static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
1496                          u32 type, u32 mask)
1497  {
1498 -       struct crypto_comp *tfm;
1499 +       struct crypto_comp *comp;
1500 +       struct crypto_acomp *acomp;
1501         int err;
1502 +       u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
1503  
1504 -       tfm = crypto_alloc_comp(driver, type, mask);
1505 -       if (IS_ERR(tfm)) {
1506 -               printk(KERN_ERR "alg: comp: Failed to load transform for %s: "
1507 -                      "%ld\n", driver, PTR_ERR(tfm));
1508 -               return PTR_ERR(tfm);
1509 -       }
1510 +       if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) {
1511 +               acomp = crypto_alloc_acomp(driver, type, mask);
1512 +               if (IS_ERR(acomp)) {
1513 +                       pr_err("alg: acomp: Failed to load transform for %s: %ld\n",
1514 +                              driver, PTR_ERR(acomp));
1515 +                       return PTR_ERR(acomp);
1516 +               }
1517 +               err = test_acomp(acomp, desc->suite.comp.comp.vecs,
1518 +                                desc->suite.comp.decomp.vecs,
1519 +                                desc->suite.comp.comp.count,
1520 +                                desc->suite.comp.decomp.count);
1521 +               crypto_free_acomp(acomp);
1522 +       } else {
1523 +               comp = crypto_alloc_comp(driver, type, mask);
1524 +               if (IS_ERR(comp)) {
1525 +                       pr_err("alg: comp: Failed to load transform for %s: %ld\n",
1526 +                              driver, PTR_ERR(comp));
1527 +                       return PTR_ERR(comp);
1528 +               }
1529  
1530 -       err = test_comp(tfm, desc->suite.comp.comp.vecs,
1531 -                       desc->suite.comp.decomp.vecs,
1532 -                       desc->suite.comp.comp.count,
1533 -                       desc->suite.comp.decomp.count);
1534 +               err = test_comp(comp, desc->suite.comp.comp.vecs,
1535 +                               desc->suite.comp.decomp.vecs,
1536 +                               desc->suite.comp.comp.count,
1537 +                               desc->suite.comp.decomp.count);
1538  
1539 -       crypto_free_comp(tfm);
1540 +               crypto_free_comp(comp);
1541 +       }
1542         return err;
1543  }
1544  
1545 @@ -1618,7 +2030,7 @@ static int alg_test_hash(const struct al
1546         struct crypto_ahash *tfm;
1547         int err;
1548  
1549 -       tfm = crypto_alloc_ahash(driver, type | CRYPTO_ALG_INTERNAL, mask);
1550 +       tfm = crypto_alloc_ahash(driver, type, mask);
1551         if (IS_ERR(tfm)) {
1552                 printk(KERN_ERR "alg: hash: Failed to load transform for %s: "
1553                        "%ld\n", driver, PTR_ERR(tfm));
1554 @@ -1646,7 +2058,7 @@ static int alg_test_crc32c(const struct
1555         if (err)
1556                 goto out;
1557  
1558 -       tfm = crypto_alloc_shash(driver, type | CRYPTO_ALG_INTERNAL, mask);
1559 +       tfm = crypto_alloc_shash(driver, type, mask);
1560         if (IS_ERR(tfm)) {
1561                 printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
1562                        "%ld\n", driver, PTR_ERR(tfm));
1563 @@ -1688,7 +2100,7 @@ static int alg_test_cprng(const struct a
1564         struct crypto_rng *rng;
1565         int err;
1566  
1567 -       rng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
1568 +       rng = crypto_alloc_rng(driver, type, mask);
1569         if (IS_ERR(rng)) {
1570                 printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
1571                        "%ld\n", driver, PTR_ERR(rng));
1572 @@ -1703,7 +2115,7 @@ static int alg_test_cprng(const struct a
1573  }
1574  
1575  
1576 -static int drbg_cavs_test(struct drbg_testvec *test, int pr,
1577 +static int drbg_cavs_test(const struct drbg_testvec *test, int pr,
1578                           const char *driver, u32 type, u32 mask)
1579  {
1580         int ret = -EAGAIN;
1581 @@ -1715,7 +2127,7 @@ static int drbg_cavs_test(struct drbg_te
1582         if (!buf)
1583                 return -ENOMEM;
1584  
1585 -       drng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
1586 +       drng = crypto_alloc_rng(driver, type, mask);
1587         if (IS_ERR(drng)) {
1588                 printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
1589                        "%s\n", driver);
1590 @@ -1777,7 +2189,7 @@ static int alg_test_drbg(const struct al
1591         int err = 0;
1592         int pr = 0;
1593         int i = 0;
1594 -       struct drbg_testvec *template = desc->suite.drbg.vecs;
1595 +       const struct drbg_testvec *template = desc->suite.drbg.vecs;
1596         unsigned int tcount = desc->suite.drbg.count;
1597  
1598         if (0 == memcmp(driver, "drbg_pr_", 8))
1599 @@ -1796,7 +2208,7 @@ static int alg_test_drbg(const struct al
1600  
1601  }
1602  
1603 -static int do_test_kpp(struct crypto_kpp *tfm, struct kpp_testvec *vec,
1604 +static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
1605                        const char *alg)
1606  {
1607         struct kpp_request *req;
1608 @@ -1888,7 +2300,7 @@ free_req:
1609  }
1610  
1611  static int test_kpp(struct crypto_kpp *tfm, const char *alg,
1612 -                   struct kpp_testvec *vecs, unsigned int tcount)
1613 +                   const struct kpp_testvec *vecs, unsigned int tcount)
1614  {
1615         int ret, i;
1616  
1617 @@ -1909,7 +2321,7 @@ static int alg_test_kpp(const struct alg
1618         struct crypto_kpp *tfm;
1619         int err = 0;
1620  
1621 -       tfm = crypto_alloc_kpp(driver, type | CRYPTO_ALG_INTERNAL, mask);
1622 +       tfm = crypto_alloc_kpp(driver, type, mask);
1623         if (IS_ERR(tfm)) {
1624                 pr_err("alg: kpp: Failed to load tfm for %s: %ld\n",
1625                        driver, PTR_ERR(tfm));
1626 @@ -1924,7 +2336,7 @@ static int alg_test_kpp(const struct alg
1627  }
1628  
1629  static int test_akcipher_one(struct crypto_akcipher *tfm,
1630 -                            struct akcipher_testvec *vecs)
1631 +                            const struct akcipher_testvec *vecs)
1632  {
1633         char *xbuf[XBUFSIZE];
1634         struct akcipher_request *req;
1635 @@ -2044,7 +2456,8 @@ free_xbuf:
1636  }
1637  
1638  static int test_akcipher(struct crypto_akcipher *tfm, const char *alg,
1639 -                        struct akcipher_testvec *vecs, unsigned int tcount)
1640 +                        const struct akcipher_testvec *vecs,
1641 +                        unsigned int tcount)
1642  {
1643         const char *algo =
1644                 crypto_tfm_alg_driver_name(crypto_akcipher_tfm(tfm));
1645 @@ -2068,7 +2481,7 @@ static int alg_test_akcipher(const struc
1646         struct crypto_akcipher *tfm;
1647         int err = 0;
1648  
1649 -       tfm = crypto_alloc_akcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1650 +       tfm = crypto_alloc_akcipher(driver, type, mask);
1651         if (IS_ERR(tfm)) {
1652                 pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n",
1653                        driver, PTR_ERR(tfm));
1654 @@ -2088,112 +2501,23 @@ static int alg_test_null(const struct al
1655         return 0;
1656  }
1657  
1658 +#define __VECS(tv)     { .vecs = tv, .count = ARRAY_SIZE(tv) }
1659 +
1660  /* Please keep this list sorted by algorithm name. */
1661  static const struct alg_test_desc alg_test_descs[] = {
1662         {
1663 -               .alg = "__cbc-cast5-avx",
1664 -               .test = alg_test_null,
1665 -       }, {
1666 -               .alg = "__cbc-cast6-avx",
1667 -               .test = alg_test_null,
1668 -       }, {
1669 -               .alg = "__cbc-serpent-avx",
1670 -               .test = alg_test_null,
1671 -       }, {
1672 -               .alg = "__cbc-serpent-avx2",
1673 -               .test = alg_test_null,
1674 -       }, {
1675 -               .alg = "__cbc-serpent-sse2",
1676 -               .test = alg_test_null,
1677 -       }, {
1678 -               .alg = "__cbc-twofish-avx",
1679 -               .test = alg_test_null,
1680 -       }, {
1681 -               .alg = "__driver-cbc-aes-aesni",
1682 -               .test = alg_test_null,
1683 -               .fips_allowed = 1,
1684 -       }, {
1685 -               .alg = "__driver-cbc-camellia-aesni",
1686 -               .test = alg_test_null,
1687 -       }, {
1688 -               .alg = "__driver-cbc-camellia-aesni-avx2",
1689 -               .test = alg_test_null,
1690 -       }, {
1691 -               .alg = "__driver-cbc-cast5-avx",
1692 -               .test = alg_test_null,
1693 -       }, {
1694 -               .alg = "__driver-cbc-cast6-avx",
1695 -               .test = alg_test_null,
1696 -       }, {
1697 -               .alg = "__driver-cbc-serpent-avx",
1698 -               .test = alg_test_null,
1699 -       }, {
1700 -               .alg = "__driver-cbc-serpent-avx2",
1701 -               .test = alg_test_null,
1702 -       }, {
1703 -               .alg = "__driver-cbc-serpent-sse2",
1704 -               .test = alg_test_null,
1705 -       }, {
1706 -               .alg = "__driver-cbc-twofish-avx",
1707 -               .test = alg_test_null,
1708 -       }, {
1709 -               .alg = "__driver-ecb-aes-aesni",
1710 -               .test = alg_test_null,
1711 -               .fips_allowed = 1,
1712 -       }, {
1713 -               .alg = "__driver-ecb-camellia-aesni",
1714 -               .test = alg_test_null,
1715 -       }, {
1716 -               .alg = "__driver-ecb-camellia-aesni-avx2",
1717 -               .test = alg_test_null,
1718 -       }, {
1719 -               .alg = "__driver-ecb-cast5-avx",
1720 -               .test = alg_test_null,
1721 -       }, {
1722 -               .alg = "__driver-ecb-cast6-avx",
1723 -               .test = alg_test_null,
1724 -       }, {
1725 -               .alg = "__driver-ecb-serpent-avx",
1726 -               .test = alg_test_null,
1727 -       }, {
1728 -               .alg = "__driver-ecb-serpent-avx2",
1729 -               .test = alg_test_null,
1730 -       }, {
1731 -               .alg = "__driver-ecb-serpent-sse2",
1732 -               .test = alg_test_null,
1733 -       }, {
1734 -               .alg = "__driver-ecb-twofish-avx",
1735 -               .test = alg_test_null,
1736 -       }, {
1737 -               .alg = "__driver-gcm-aes-aesni",
1738 -               .test = alg_test_null,
1739 -               .fips_allowed = 1,
1740 -       }, {
1741 -               .alg = "__ghash-pclmulqdqni",
1742 -               .test = alg_test_null,
1743 -               .fips_allowed = 1,
1744 -       }, {
1745                 .alg = "ansi_cprng",
1746                 .test = alg_test_cprng,
1747                 .suite = {
1748 -                       .cprng = {
1749 -                               .vecs = ansi_cprng_aes_tv_template,
1750 -                               .count = ANSI_CPRNG_AES_TEST_VECTORS
1751 -                       }
1752 +                       .cprng = __VECS(ansi_cprng_aes_tv_template)
1753                 }
1754         }, {
1755                 .alg = "authenc(hmac(md5),ecb(cipher_null))",
1756                 .test = alg_test_aead,
1757                 .suite = {
1758                         .aead = {
1759 -                               .enc = {
1760 -                                       .vecs = hmac_md5_ecb_cipher_null_enc_tv_template,
1761 -                                       .count = HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS
1762 -                               },
1763 -                               .dec = {
1764 -                                       .vecs = hmac_md5_ecb_cipher_null_dec_tv_template,
1765 -                                       .count = HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS
1766 -                               }
1767 +                               .enc = __VECS(hmac_md5_ecb_cipher_null_enc_tv_template),
1768 +                               .dec = __VECS(hmac_md5_ecb_cipher_null_dec_tv_template)
1769                         }
1770                 }
1771         }, {
1772 @@ -2201,12 +2525,7 @@ static const struct alg_test_desc alg_te
1773                 .test = alg_test_aead,
1774                 .suite = {
1775                         .aead = {
1776 -                               .enc = {
1777 -                                       .vecs =
1778 -                                       hmac_sha1_aes_cbc_enc_tv_temp,
1779 -                                       .count =
1780 -                                       HMAC_SHA1_AES_CBC_ENC_TEST_VEC
1781 -                               }
1782 +                               .enc = __VECS(hmac_sha1_aes_cbc_enc_tv_temp)
1783                         }
1784                 }
1785         }, {
1786 @@ -2214,12 +2533,7 @@ static const struct alg_test_desc alg_te
1787                 .test = alg_test_aead,
1788                 .suite = {
1789                         .aead = {
1790 -                               .enc = {
1791 -                                       .vecs =
1792 -                                       hmac_sha1_des_cbc_enc_tv_temp,
1793 -                                       .count =
1794 -                                       HMAC_SHA1_DES_CBC_ENC_TEST_VEC
1795 -                               }
1796 +                               .enc = __VECS(hmac_sha1_des_cbc_enc_tv_temp)
1797                         }
1798                 }
1799         }, {
1800 @@ -2228,12 +2542,7 @@ static const struct alg_test_desc alg_te
1801                 .fips_allowed = 1,
1802                 .suite = {
1803                         .aead = {
1804 -                               .enc = {
1805 -                                       .vecs =
1806 -                                       hmac_sha1_des3_ede_cbc_enc_tv_temp,
1807 -                                       .count =
1808 -                                       HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC
1809 -                               }
1810 +                               .enc = __VECS(hmac_sha1_des3_ede_cbc_enc_tv_temp)
1811                         }
1812                 }
1813         }, {
1814 @@ -2245,18 +2554,8 @@ static const struct alg_test_desc alg_te
1815                 .test = alg_test_aead,
1816                 .suite = {
1817                         .aead = {
1818 -                               .enc = {
1819 -                                       .vecs =
1820 -                                       hmac_sha1_ecb_cipher_null_enc_tv_temp,
1821 -                                       .count =
1822 -                                       HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC
1823 -                               },
1824 -                               .dec = {
1825 -                                       .vecs =
1826 -                                       hmac_sha1_ecb_cipher_null_dec_tv_temp,
1827 -                                       .count =
1828 -                                       HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC
1829 -                               }
1830 +                               .enc = __VECS(hmac_sha1_ecb_cipher_null_enc_tv_temp),
1831 +                               .dec = __VECS(hmac_sha1_ecb_cipher_null_dec_tv_temp)
1832                         }
1833                 }
1834         }, {
1835 @@ -2268,12 +2567,7 @@ static const struct alg_test_desc alg_te
1836                 .test = alg_test_aead,
1837                 .suite = {
1838                         .aead = {
1839 -                               .enc = {
1840 -                                       .vecs =
1841 -                                       hmac_sha224_des_cbc_enc_tv_temp,
1842 -                                       .count =
1843 -                                       HMAC_SHA224_DES_CBC_ENC_TEST_VEC
1844 -                               }
1845 +                               .enc = __VECS(hmac_sha224_des_cbc_enc_tv_temp)
1846                         }
1847                 }
1848         }, {
1849 @@ -2282,12 +2576,7 @@ static const struct alg_test_desc alg_te
1850                 .fips_allowed = 1,
1851                 .suite = {
1852                         .aead = {
1853 -                               .enc = {
1854 -                                       .vecs =
1855 -                                       hmac_sha224_des3_ede_cbc_enc_tv_temp,
1856 -                                       .count =
1857 -                                       HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC
1858 -                               }
1859 +                               .enc = __VECS(hmac_sha224_des3_ede_cbc_enc_tv_temp)
1860                         }
1861                 }
1862         }, {
1863 @@ -2296,12 +2585,7 @@ static const struct alg_test_desc alg_te
1864                 .fips_allowed = 1,
1865                 .suite = {
1866                         .aead = {
1867 -                               .enc = {
1868 -                                       .vecs =
1869 -                                       hmac_sha256_aes_cbc_enc_tv_temp,
1870 -                                       .count =
1871 -                                       HMAC_SHA256_AES_CBC_ENC_TEST_VEC
1872 -                               }
1873 +                               .enc = __VECS(hmac_sha256_aes_cbc_enc_tv_temp)
1874                         }
1875                 }
1876         }, {
1877 @@ -2309,12 +2593,7 @@ static const struct alg_test_desc alg_te
1878                 .test = alg_test_aead,
1879                 .suite = {
1880                         .aead = {
1881 -                               .enc = {
1882 -                                       .vecs =
1883 -                                       hmac_sha256_des_cbc_enc_tv_temp,
1884 -                                       .count =
1885 -                                       HMAC_SHA256_DES_CBC_ENC_TEST_VEC
1886 -                               }
1887 +                               .enc = __VECS(hmac_sha256_des_cbc_enc_tv_temp)
1888                         }
1889                 }
1890         }, {
1891 @@ -2323,12 +2602,7 @@ static const struct alg_test_desc alg_te
1892                 .fips_allowed = 1,
1893                 .suite = {
1894                         .aead = {
1895 -                               .enc = {
1896 -                                       .vecs =
1897 -                                       hmac_sha256_des3_ede_cbc_enc_tv_temp,
1898 -                                       .count =
1899 -                                       HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC
1900 -                               }
1901 +                               .enc = __VECS(hmac_sha256_des3_ede_cbc_enc_tv_temp)
1902                         }
1903                 }
1904         }, {
1905 @@ -2344,12 +2618,7 @@ static const struct alg_test_desc alg_te
1906                 .test = alg_test_aead,
1907                 .suite = {
1908                         .aead = {
1909 -                               .enc = {
1910 -                                       .vecs =
1911 -                                       hmac_sha384_des_cbc_enc_tv_temp,
1912 -                                       .count =
1913 -                                       HMAC_SHA384_DES_CBC_ENC_TEST_VEC
1914 -                               }
1915 +                               .enc = __VECS(hmac_sha384_des_cbc_enc_tv_temp)
1916                         }
1917                 }
1918         }, {
1919 @@ -2358,12 +2627,7 @@ static const struct alg_test_desc alg_te
1920                 .fips_allowed = 1,
1921                 .suite = {
1922                         .aead = {
1923 -                               .enc = {
1924 -                                       .vecs =
1925 -                                       hmac_sha384_des3_ede_cbc_enc_tv_temp,
1926 -                                       .count =
1927 -                                       HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC
1928 -                               }
1929 +                               .enc = __VECS(hmac_sha384_des3_ede_cbc_enc_tv_temp)
1930                         }
1931                 }
1932         }, {
1933 @@ -2380,12 +2644,7 @@ static const struct alg_test_desc alg_te
1934                 .test = alg_test_aead,
1935                 .suite = {
1936                         .aead = {
1937 -                               .enc = {
1938 -                                       .vecs =
1939 -                                       hmac_sha512_aes_cbc_enc_tv_temp,
1940 -                                       .count =
1941 -                                       HMAC_SHA512_AES_CBC_ENC_TEST_VEC
1942 -                               }
1943 +                               .enc = __VECS(hmac_sha512_aes_cbc_enc_tv_temp)
1944                         }
1945                 }
1946         }, {
1947 @@ -2393,12 +2652,7 @@ static const struct alg_test_desc alg_te
1948                 .test = alg_test_aead,
1949                 .suite = {
1950                         .aead = {
1951 -                               .enc = {
1952 -                                       .vecs =
1953 -                                       hmac_sha512_des_cbc_enc_tv_temp,
1954 -                                       .count =
1955 -                                       HMAC_SHA512_DES_CBC_ENC_TEST_VEC
1956 -                               }
1957 +                               .enc = __VECS(hmac_sha512_des_cbc_enc_tv_temp)
1958                         }
1959                 }
1960         }, {
1961 @@ -2407,12 +2661,7 @@ static const struct alg_test_desc alg_te
1962                 .fips_allowed = 1,
1963                 .suite = {
1964                         .aead = {
1965 -                               .enc = {
1966 -                                       .vecs =
1967 -                                       hmac_sha512_des3_ede_cbc_enc_tv_temp,
1968 -                                       .count =
1969 -                                       HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC
1970 -                               }
1971 +                               .enc = __VECS(hmac_sha512_des3_ede_cbc_enc_tv_temp)
1972                         }
1973                 }
1974         }, {
1975 @@ -2429,14 +2678,8 @@ static const struct alg_test_desc alg_te
1976                 .fips_allowed = 1,
1977                 .suite = {
1978                         .cipher = {
1979 -                               .enc = {
1980 -                                       .vecs = aes_cbc_enc_tv_template,
1981 -                                       .count = AES_CBC_ENC_TEST_VECTORS
1982 -                               },
1983 -                               .dec = {
1984 -                                       .vecs = aes_cbc_dec_tv_template,
1985 -                                       .count = AES_CBC_DEC_TEST_VECTORS
1986 -                               }
1987 +                               .enc = __VECS(aes_cbc_enc_tv_template),
1988 +                               .dec = __VECS(aes_cbc_dec_tv_template)
1989                         }
1990                 }
1991         }, {
1992 @@ -2444,14 +2687,8 @@ static const struct alg_test_desc alg_te
1993                 .test = alg_test_skcipher,
1994                 .suite = {
1995                         .cipher = {
1996 -                               .enc = {
1997 -                                       .vecs = anubis_cbc_enc_tv_template,
1998 -                                       .count = ANUBIS_CBC_ENC_TEST_VECTORS
1999 -                               },
2000 -                               .dec = {
2001 -                                       .vecs = anubis_cbc_dec_tv_template,
2002 -                                       .count = ANUBIS_CBC_DEC_TEST_VECTORS
2003 -                               }
2004 +                               .enc = __VECS(anubis_cbc_enc_tv_template),
2005 +                               .dec = __VECS(anubis_cbc_dec_tv_template)
2006                         }
2007                 }
2008         }, {
2009 @@ -2459,14 +2696,8 @@ static const struct alg_test_desc alg_te
2010                 .test = alg_test_skcipher,
2011                 .suite = {
2012                         .cipher = {
2013 -                               .enc = {
2014 -                                       .vecs = bf_cbc_enc_tv_template,
2015 -                                       .count = BF_CBC_ENC_TEST_VECTORS
2016 -                               },
2017 -                               .dec = {
2018 -                                       .vecs = bf_cbc_dec_tv_template,
2019 -                                       .count = BF_CBC_DEC_TEST_VECTORS
2020 -                               }
2021 +                               .enc = __VECS(bf_cbc_enc_tv_template),
2022 +                               .dec = __VECS(bf_cbc_dec_tv_template)
2023                         }
2024                 }
2025         }, {
2026 @@ -2474,14 +2705,8 @@ static const struct alg_test_desc alg_te
2027                 .test = alg_test_skcipher,
2028                 .suite = {
2029                         .cipher = {
2030 -                               .enc = {
2031 -                                       .vecs = camellia_cbc_enc_tv_template,
2032 -                                       .count = CAMELLIA_CBC_ENC_TEST_VECTORS
2033 -                               },
2034 -                               .dec = {
2035 -                                       .vecs = camellia_cbc_dec_tv_template,
2036 -                                       .count = CAMELLIA_CBC_DEC_TEST_VECTORS
2037 -                               }
2038 +                               .enc = __VECS(camellia_cbc_enc_tv_template),
2039 +                               .dec = __VECS(camellia_cbc_dec_tv_template)
2040                         }
2041                 }
2042         }, {
2043 @@ -2489,14 +2714,8 @@ static const struct alg_test_desc alg_te
2044                 .test = alg_test_skcipher,
2045                 .suite = {
2046                         .cipher = {
2047 -                               .enc = {
2048 -                                       .vecs = cast5_cbc_enc_tv_template,
2049 -                                       .count = CAST5_CBC_ENC_TEST_VECTORS
2050 -                               },
2051 -                               .dec = {
2052 -                                       .vecs = cast5_cbc_dec_tv_template,
2053 -                                       .count = CAST5_CBC_DEC_TEST_VECTORS
2054 -                               }
2055 +                               .enc = __VECS(cast5_cbc_enc_tv_template),
2056 +                               .dec = __VECS(cast5_cbc_dec_tv_template)
2057                         }
2058                 }
2059         }, {
2060 @@ -2504,14 +2723,8 @@ static const struct alg_test_desc alg_te
2061                 .test = alg_test_skcipher,
2062                 .suite = {
2063                         .cipher = {
2064 -                               .enc = {
2065 -                                       .vecs = cast6_cbc_enc_tv_template,
2066 -                                       .count = CAST6_CBC_ENC_TEST_VECTORS
2067 -                               },
2068 -                               .dec = {
2069 -                                       .vecs = cast6_cbc_dec_tv_template,
2070 -                                       .count = CAST6_CBC_DEC_TEST_VECTORS
2071 -                               }
2072 +                               .enc = __VECS(cast6_cbc_enc_tv_template),
2073 +                               .dec = __VECS(cast6_cbc_dec_tv_template)
2074                         }
2075                 }
2076         }, {
2077 @@ -2519,14 +2732,8 @@ static const struct alg_test_desc alg_te
2078                 .test = alg_test_skcipher,
2079                 .suite = {
2080                         .cipher = {
2081 -                               .enc = {
2082 -                                       .vecs = des_cbc_enc_tv_template,
2083 -                                       .count = DES_CBC_ENC_TEST_VECTORS
2084 -                               },
2085 -                               .dec = {
2086 -                                       .vecs = des_cbc_dec_tv_template,
2087 -                                       .count = DES_CBC_DEC_TEST_VECTORS
2088 -                               }
2089 +                               .enc = __VECS(des_cbc_enc_tv_template),
2090 +                               .dec = __VECS(des_cbc_dec_tv_template)
2091                         }
2092                 }
2093         }, {
2094 @@ -2535,14 +2742,8 @@ static const struct alg_test_desc alg_te
2095                 .fips_allowed = 1,
2096                 .suite = {
2097                         .cipher = {
2098 -                               .enc = {
2099 -                                       .vecs = des3_ede_cbc_enc_tv_template,
2100 -                                       .count = DES3_EDE_CBC_ENC_TEST_VECTORS
2101 -                               },
2102 -                               .dec = {
2103 -                                       .vecs = des3_ede_cbc_dec_tv_template,
2104 -                                       .count = DES3_EDE_CBC_DEC_TEST_VECTORS
2105 -                               }
2106 +                               .enc = __VECS(des3_ede_cbc_enc_tv_template),
2107 +                               .dec = __VECS(des3_ede_cbc_dec_tv_template)
2108                         }
2109                 }
2110         }, {
2111 @@ -2550,14 +2751,8 @@ static const struct alg_test_desc alg_te
2112                 .test = alg_test_skcipher,
2113                 .suite = {
2114                         .cipher = {
2115 -                               .enc = {
2116 -                                       .vecs = serpent_cbc_enc_tv_template,
2117 -                                       .count = SERPENT_CBC_ENC_TEST_VECTORS
2118 -                               },
2119 -                               .dec = {
2120 -                                       .vecs = serpent_cbc_dec_tv_template,
2121 -                                       .count = SERPENT_CBC_DEC_TEST_VECTORS
2122 -                               }
2123 +                               .enc = __VECS(serpent_cbc_enc_tv_template),
2124 +                               .dec = __VECS(serpent_cbc_dec_tv_template)
2125                         }
2126                 }
2127         }, {
2128 @@ -2565,30 +2760,25 @@ static const struct alg_test_desc alg_te
2129                 .test = alg_test_skcipher,
2130                 .suite = {
2131                         .cipher = {
2132 -                               .enc = {
2133 -                                       .vecs = tf_cbc_enc_tv_template,
2134 -                                       .count = TF_CBC_ENC_TEST_VECTORS
2135 -                               },
2136 -                               .dec = {
2137 -                                       .vecs = tf_cbc_dec_tv_template,
2138 -                                       .count = TF_CBC_DEC_TEST_VECTORS
2139 -                               }
2140 +                               .enc = __VECS(tf_cbc_enc_tv_template),
2141 +                               .dec = __VECS(tf_cbc_dec_tv_template)
2142                         }
2143                 }
2144         }, {
2145 +               .alg = "cbcmac(aes)",
2146 +               .fips_allowed = 1,
2147 +               .test = alg_test_hash,
2148 +               .suite = {
2149 +                       .hash = __VECS(aes_cbcmac_tv_template)
2150 +               }
2151 +       }, {
2152                 .alg = "ccm(aes)",
2153                 .test = alg_test_aead,
2154                 .fips_allowed = 1,
2155                 .suite = {
2156                         .aead = {
2157 -                               .enc = {
2158 -                                       .vecs = aes_ccm_enc_tv_template,
2159 -                                       .count = AES_CCM_ENC_TEST_VECTORS
2160 -                               },
2161 -                               .dec = {
2162 -                                       .vecs = aes_ccm_dec_tv_template,
2163 -                                       .count = AES_CCM_DEC_TEST_VECTORS
2164 -                               }
2165 +                               .enc = __VECS(aes_ccm_enc_tv_template),
2166 +                               .dec = __VECS(aes_ccm_dec_tv_template)
2167                         }
2168                 }
2169         }, {
2170 @@ -2596,14 +2786,8 @@ static const struct alg_test_desc alg_te
2171                 .test = alg_test_skcipher,
2172                 .suite = {
2173                         .cipher = {
2174 -                               .enc = {
2175 -                                       .vecs = chacha20_enc_tv_template,
2176 -                                       .count = CHACHA20_ENC_TEST_VECTORS
2177 -                               },
2178 -                               .dec = {
2179 -                                       .vecs = chacha20_enc_tv_template,
2180 -                                       .count = CHACHA20_ENC_TEST_VECTORS
2181 -                               },
2182 +                               .enc = __VECS(chacha20_enc_tv_template),
2183 +                               .dec = __VECS(chacha20_enc_tv_template),
2184                         }
2185                 }
2186         }, {
2187 @@ -2611,20 +2795,14 @@ static const struct alg_test_desc alg_te
2188                 .fips_allowed = 1,
2189                 .test = alg_test_hash,
2190                 .suite = {
2191 -                       .hash = {
2192 -                               .vecs = aes_cmac128_tv_template,
2193 -                               .count = CMAC_AES_TEST_VECTORS
2194 -                       }
2195 +                       .hash = __VECS(aes_cmac128_tv_template)
2196                 }
2197         }, {
2198                 .alg = "cmac(des3_ede)",
2199                 .fips_allowed = 1,
2200                 .test = alg_test_hash,
2201                 .suite = {
2202 -                       .hash = {
2203 -                               .vecs = des3_ede_cmac64_tv_template,
2204 -                               .count = CMAC_DES3_EDE_TEST_VECTORS
2205 -                       }
2206 +                       .hash = __VECS(des3_ede_cmac64_tv_template)
2207                 }
2208         }, {
2209                 .alg = "compress_null",
2210 @@ -2633,94 +2811,30 @@ static const struct alg_test_desc alg_te
2211                 .alg = "crc32",
2212                 .test = alg_test_hash,
2213                 .suite = {
2214 -                       .hash = {
2215 -                               .vecs = crc32_tv_template,
2216 -                               .count = CRC32_TEST_VECTORS
2217 -                       }
2218 +                       .hash = __VECS(crc32_tv_template)
2219                 }
2220         }, {
2221                 .alg = "crc32c",
2222                 .test = alg_test_crc32c,
2223                 .fips_allowed = 1,
2224                 .suite = {
2225 -                       .hash = {
2226 -                               .vecs = crc32c_tv_template,
2227 -                               .count = CRC32C_TEST_VECTORS
2228 -                       }
2229 +                       .hash = __VECS(crc32c_tv_template)
2230                 }
2231         }, {
2232                 .alg = "crct10dif",
2233                 .test = alg_test_hash,
2234                 .fips_allowed = 1,
2235                 .suite = {
2236 -                       .hash = {
2237 -                               .vecs = crct10dif_tv_template,
2238 -                               .count = CRCT10DIF_TEST_VECTORS
2239 -                       }
2240 +                       .hash = __VECS(crct10dif_tv_template)
2241                 }
2242         }, {
2243 -               .alg = "cryptd(__driver-cbc-aes-aesni)",
2244 -               .test = alg_test_null,
2245 -               .fips_allowed = 1,
2246 -       }, {
2247 -               .alg = "cryptd(__driver-cbc-camellia-aesni)",
2248 -               .test = alg_test_null,
2249 -       }, {
2250 -               .alg = "cryptd(__driver-cbc-camellia-aesni-avx2)",
2251 -               .test = alg_test_null,
2252 -       }, {
2253 -               .alg = "cryptd(__driver-cbc-serpent-avx2)",
2254 -               .test = alg_test_null,
2255 -       }, {
2256 -               .alg = "cryptd(__driver-ecb-aes-aesni)",
2257 -               .test = alg_test_null,
2258 -               .fips_allowed = 1,
2259 -       }, {
2260 -               .alg = "cryptd(__driver-ecb-camellia-aesni)",
2261 -               .test = alg_test_null,
2262 -       }, {
2263 -               .alg = "cryptd(__driver-ecb-camellia-aesni-avx2)",
2264 -               .test = alg_test_null,
2265 -       }, {
2266 -               .alg = "cryptd(__driver-ecb-cast5-avx)",
2267 -               .test = alg_test_null,
2268 -       }, {
2269 -               .alg = "cryptd(__driver-ecb-cast6-avx)",
2270 -               .test = alg_test_null,
2271 -       }, {
2272 -               .alg = "cryptd(__driver-ecb-serpent-avx)",
2273 -               .test = alg_test_null,
2274 -       }, {
2275 -               .alg = "cryptd(__driver-ecb-serpent-avx2)",
2276 -               .test = alg_test_null,
2277 -       }, {
2278 -               .alg = "cryptd(__driver-ecb-serpent-sse2)",
2279 -               .test = alg_test_null,
2280 -       }, {
2281 -               .alg = "cryptd(__driver-ecb-twofish-avx)",
2282 -               .test = alg_test_null,
2283 -       }, {
2284 -               .alg = "cryptd(__driver-gcm-aes-aesni)",
2285 -               .test = alg_test_null,
2286 -               .fips_allowed = 1,
2287 -       }, {
2288 -               .alg = "cryptd(__ghash-pclmulqdqni)",
2289 -               .test = alg_test_null,
2290 -               .fips_allowed = 1,
2291 -       }, {
2292                 .alg = "ctr(aes)",
2293                 .test = alg_test_skcipher,
2294                 .fips_allowed = 1,
2295                 .suite = {
2296                         .cipher = {
2297 -                               .enc = {
2298 -                                       .vecs = aes_ctr_enc_tv_template,
2299 -                                       .count = AES_CTR_ENC_TEST_VECTORS
2300 -                               },
2301 -                               .dec = {
2302 -                                       .vecs = aes_ctr_dec_tv_template,
2303 -                                       .count = AES_CTR_DEC_TEST_VECTORS
2304 -                               }
2305 +                               .enc = __VECS(aes_ctr_enc_tv_template),
2306 +                               .dec = __VECS(aes_ctr_dec_tv_template)
2307                         }
2308                 }
2309         }, {
2310 @@ -2728,14 +2842,8 @@ static const struct alg_test_desc alg_te
2311                 .test = alg_test_skcipher,
2312                 .suite = {
2313                         .cipher = {
2314 -                               .enc = {
2315 -                                       .vecs = bf_ctr_enc_tv_template,
2316 -                                       .count = BF_CTR_ENC_TEST_VECTORS
2317 -                               },
2318 -                               .dec = {
2319 -                                       .vecs = bf_ctr_dec_tv_template,
2320 -                                       .count = BF_CTR_DEC_TEST_VECTORS
2321 -                               }
2322 +                               .enc = __VECS(bf_ctr_enc_tv_template),
2323 +                               .dec = __VECS(bf_ctr_dec_tv_template)
2324                         }
2325                 }
2326         }, {
2327 @@ -2743,14 +2851,8 @@ static const struct alg_test_desc alg_te
2328                 .test = alg_test_skcipher,
2329                 .suite = {
2330                         .cipher = {
2331 -                               .enc = {
2332 -                                       .vecs = camellia_ctr_enc_tv_template,
2333 -                                       .count = CAMELLIA_CTR_ENC_TEST_VECTORS
2334 -                               },
2335 -                               .dec = {
2336 -                                       .vecs = camellia_ctr_dec_tv_template,
2337 -                                       .count = CAMELLIA_CTR_DEC_TEST_VECTORS
2338 -                               }
2339 +                               .enc = __VECS(camellia_ctr_enc_tv_template),
2340 +                               .dec = __VECS(camellia_ctr_dec_tv_template)
2341                         }
2342                 }
2343         }, {
2344 @@ -2758,14 +2860,8 @@ static const struct alg_test_desc alg_te
2345                 .test = alg_test_skcipher,
2346                 .suite = {
2347                         .cipher = {
2348 -                               .enc = {
2349 -                                       .vecs = cast5_ctr_enc_tv_template,
2350 -                                       .count = CAST5_CTR_ENC_TEST_VECTORS
2351 -                               },
2352 -                               .dec = {
2353 -                                       .vecs = cast5_ctr_dec_tv_template,
2354 -                                       .count = CAST5_CTR_DEC_TEST_VECTORS
2355 -                               }
2356 +                               .enc = __VECS(cast5_ctr_enc_tv_template),
2357 +                               .dec = __VECS(cast5_ctr_dec_tv_template)
2358                         }
2359                 }
2360         }, {
2361 @@ -2773,14 +2869,8 @@ static const struct alg_test_desc alg_te
2362                 .test = alg_test_skcipher,
2363                 .suite = {
2364                         .cipher = {
2365 -                               .enc = {
2366 -                                       .vecs = cast6_ctr_enc_tv_template,
2367 -                                       .count = CAST6_CTR_ENC_TEST_VECTORS
2368 -                               },
2369 -                               .dec = {
2370 -                                       .vecs = cast6_ctr_dec_tv_template,
2371 -                                       .count = CAST6_CTR_DEC_TEST_VECTORS
2372 -                               }
2373 +                               .enc = __VECS(cast6_ctr_enc_tv_template),
2374 +                               .dec = __VECS(cast6_ctr_dec_tv_template)
2375                         }
2376                 }
2377         }, {
2378 @@ -2788,29 +2878,18 @@ static const struct alg_test_desc alg_te
2379                 .test = alg_test_skcipher,
2380                 .suite = {
2381                         .cipher = {
2382 -                               .enc = {
2383 -                                       .vecs = des_ctr_enc_tv_template,
2384 -                                       .count = DES_CTR_ENC_TEST_VECTORS
2385 -                               },
2386 -                               .dec = {
2387 -                                       .vecs = des_ctr_dec_tv_template,
2388 -                                       .count = DES_CTR_DEC_TEST_VECTORS
2389 -                               }
2390 +                               .enc = __VECS(des_ctr_enc_tv_template),
2391 +                               .dec = __VECS(des_ctr_dec_tv_template)
2392                         }
2393                 }
2394         }, {
2395                 .alg = "ctr(des3_ede)",
2396                 .test = alg_test_skcipher,
2397 +               .fips_allowed = 1,
2398                 .suite = {
2399                         .cipher = {
2400 -                               .enc = {
2401 -                                       .vecs = des3_ede_ctr_enc_tv_template,
2402 -                                       .count = DES3_EDE_CTR_ENC_TEST_VECTORS
2403 -                               },
2404 -                               .dec = {
2405 -                                       .vecs = des3_ede_ctr_dec_tv_template,
2406 -                                       .count = DES3_EDE_CTR_DEC_TEST_VECTORS
2407 -                               }
2408 +                               .enc = __VECS(des3_ede_ctr_enc_tv_template),
2409 +                               .dec = __VECS(des3_ede_ctr_dec_tv_template)
2410                         }
2411                 }
2412         }, {
2413 @@ -2818,14 +2897,8 @@ static const struct alg_test_desc alg_te
2414                 .test = alg_test_skcipher,
2415                 .suite = {
2416                         .cipher = {
2417 -                               .enc = {
2418 -                                       .vecs = serpent_ctr_enc_tv_template,
2419 -                                       .count = SERPENT_CTR_ENC_TEST_VECTORS
2420 -                               },
2421 -                               .dec = {
2422 -                                       .vecs = serpent_ctr_dec_tv_template,
2423 -                                       .count = SERPENT_CTR_DEC_TEST_VECTORS
2424 -                               }
2425 +                               .enc = __VECS(serpent_ctr_enc_tv_template),
2426 +                               .dec = __VECS(serpent_ctr_dec_tv_template)
2427                         }
2428                 }
2429         }, {
2430 @@ -2833,14 +2906,8 @@ static const struct alg_test_desc alg_te
2431                 .test = alg_test_skcipher,
2432                 .suite = {
2433                         .cipher = {
2434 -                               .enc = {
2435 -                                       .vecs = tf_ctr_enc_tv_template,
2436 -                                       .count = TF_CTR_ENC_TEST_VECTORS
2437 -                               },
2438 -                               .dec = {
2439 -                                       .vecs = tf_ctr_dec_tv_template,
2440 -                                       .count = TF_CTR_DEC_TEST_VECTORS
2441 -                               }
2442 +                               .enc = __VECS(tf_ctr_enc_tv_template),
2443 +                               .dec = __VECS(tf_ctr_dec_tv_template)
2444                         }
2445                 }
2446         }, {
2447 @@ -2848,14 +2915,8 @@ static const struct alg_test_desc alg_te
2448                 .test = alg_test_skcipher,
2449                 .suite = {
2450                         .cipher = {
2451 -                               .enc = {
2452 -                                       .vecs = cts_mode_enc_tv_template,
2453 -                                       .count = CTS_MODE_ENC_TEST_VECTORS
2454 -                               },
2455 -                               .dec = {
2456 -                                       .vecs = cts_mode_dec_tv_template,
2457 -                                       .count = CTS_MODE_DEC_TEST_VECTORS
2458 -                               }
2459 +                               .enc = __VECS(cts_mode_enc_tv_template),
2460 +                               .dec = __VECS(cts_mode_dec_tv_template)
2461                         }
2462                 }
2463         }, {
2464 @@ -2864,14 +2925,8 @@ static const struct alg_test_desc alg_te
2465                 .fips_allowed = 1,
2466                 .suite = {
2467                         .comp = {
2468 -                               .comp = {
2469 -                                       .vecs = deflate_comp_tv_template,
2470 -                                       .count = DEFLATE_COMP_TEST_VECTORS
2471 -                               },
2472 -                               .decomp = {
2473 -                                       .vecs = deflate_decomp_tv_template,
2474 -                                       .count = DEFLATE_DECOMP_TEST_VECTORS
2475 -                               }
2476 +                               .comp = __VECS(deflate_comp_tv_template),
2477 +                               .decomp = __VECS(deflate_decomp_tv_template)
2478                         }
2479                 }
2480         }, {
2481 @@ -2879,10 +2934,7 @@ static const struct alg_test_desc alg_te
2482                 .test = alg_test_kpp,
2483                 .fips_allowed = 1,
2484                 .suite = {
2485 -                       .kpp = {
2486 -                               .vecs = dh_tv_template,
2487 -                               .count = DH_TEST_VECTORS
2488 -                       }
2489 +                       .kpp = __VECS(dh_tv_template)
2490                 }
2491         }, {
2492                 .alg = "digest_null",
2493 @@ -2892,30 +2944,21 @@ static const struct alg_test_desc alg_te
2494                 .test = alg_test_drbg,
2495                 .fips_allowed = 1,
2496                 .suite = {
2497 -                       .drbg = {
2498 -                               .vecs = drbg_nopr_ctr_aes128_tv_template,
2499 -                               .count = ARRAY_SIZE(drbg_nopr_ctr_aes128_tv_template)
2500 -                       }
2501 +                       .drbg = __VECS(drbg_nopr_ctr_aes128_tv_template)
2502                 }
2503         }, {
2504                 .alg = "drbg_nopr_ctr_aes192",
2505                 .test = alg_test_drbg,
2506                 .fips_allowed = 1,
2507                 .suite = {
2508 -                       .drbg = {
2509 -                               .vecs = drbg_nopr_ctr_aes192_tv_template,
2510 -                               .count = ARRAY_SIZE(drbg_nopr_ctr_aes192_tv_template)
2511 -                       }
2512 +                       .drbg = __VECS(drbg_nopr_ctr_aes192_tv_template)
2513                 }
2514         }, {
2515                 .alg = "drbg_nopr_ctr_aes256",
2516                 .test = alg_test_drbg,
2517                 .fips_allowed = 1,
2518                 .suite = {
2519 -                       .drbg = {
2520 -                               .vecs = drbg_nopr_ctr_aes256_tv_template,
2521 -                               .count = ARRAY_SIZE(drbg_nopr_ctr_aes256_tv_template)
2522 -                       }
2523 +                       .drbg = __VECS(drbg_nopr_ctr_aes256_tv_template)
2524                 }
2525         }, {
2526                 /*
2527 @@ -2930,11 +2973,7 @@ static const struct alg_test_desc alg_te
2528                 .test = alg_test_drbg,
2529                 .fips_allowed = 1,
2530                 .suite = {
2531 -                       .drbg = {
2532 -                               .vecs = drbg_nopr_hmac_sha256_tv_template,
2533 -                               .count =
2534 -                               ARRAY_SIZE(drbg_nopr_hmac_sha256_tv_template)
2535 -                       }
2536 +                       .drbg = __VECS(drbg_nopr_hmac_sha256_tv_template)
2537                 }
2538         }, {
2539                 /* covered by drbg_nopr_hmac_sha256 test */
2540 @@ -2954,10 +2993,7 @@ static const struct alg_test_desc alg_te
2541                 .test = alg_test_drbg,
2542                 .fips_allowed = 1,
2543                 .suite = {
2544 -                       .drbg = {
2545 -                               .vecs = drbg_nopr_sha256_tv_template,
2546 -                               .count = ARRAY_SIZE(drbg_nopr_sha256_tv_template)
2547 -                       }
2548 +                       .drbg = __VECS(drbg_nopr_sha256_tv_template)
2549                 }
2550         }, {
2551                 /* covered by drbg_nopr_sha256 test */
2552 @@ -2973,10 +3009,7 @@ static const struct alg_test_desc alg_te
2553                 .test = alg_test_drbg,
2554                 .fips_allowed = 1,
2555                 .suite = {
2556 -                       .drbg = {
2557 -                               .vecs = drbg_pr_ctr_aes128_tv_template,
2558 -                               .count = ARRAY_SIZE(drbg_pr_ctr_aes128_tv_template)
2559 -                       }
2560 +                       .drbg = __VECS(drbg_pr_ctr_aes128_tv_template)
2561                 }
2562         }, {
2563                 /* covered by drbg_pr_ctr_aes128 test */
2564 @@ -2996,10 +3029,7 @@ static const struct alg_test_desc alg_te
2565                 .test = alg_test_drbg,
2566                 .fips_allowed = 1,
2567                 .suite = {
2568 -                       .drbg = {
2569 -                               .vecs = drbg_pr_hmac_sha256_tv_template,
2570 -                               .count = ARRAY_SIZE(drbg_pr_hmac_sha256_tv_template)
2571 -                       }
2572 +                       .drbg = __VECS(drbg_pr_hmac_sha256_tv_template)
2573                 }
2574         }, {
2575                 /* covered by drbg_pr_hmac_sha256 test */
2576 @@ -3019,10 +3049,7 @@ static const struct alg_test_desc alg_te
2577                 .test = alg_test_drbg,
2578                 .fips_allowed = 1,
2579                 .suite = {
2580 -                       .drbg = {
2581 -                               .vecs = drbg_pr_sha256_tv_template,
2582 -                               .count = ARRAY_SIZE(drbg_pr_sha256_tv_template)
2583 -                       }
2584 +                       .drbg = __VECS(drbg_pr_sha256_tv_template)
2585                 }
2586         }, {
2587                 /* covered by drbg_pr_sha256 test */
2588 @@ -3034,23 +3061,13 @@ static const struct alg_test_desc alg_te
2589                 .fips_allowed = 1,
2590                 .test = alg_test_null,
2591         }, {
2592 -               .alg = "ecb(__aes-aesni)",
2593 -               .test = alg_test_null,
2594 -               .fips_allowed = 1,
2595 -       }, {
2596                 .alg = "ecb(aes)",
2597                 .test = alg_test_skcipher,
2598                 .fips_allowed = 1,
2599                 .suite = {
2600                         .cipher = {
2601 -                               .enc = {
2602 -                                       .vecs = aes_enc_tv_template,
2603 -                                       .count = AES_ENC_TEST_VECTORS
2604 -                               },
2605 -                               .dec = {
2606 -                                       .vecs = aes_dec_tv_template,
2607 -                                       .count = AES_DEC_TEST_VECTORS
2608 -                               }
2609 +                               .enc = __VECS(aes_enc_tv_template),
2610 +                               .dec = __VECS(aes_dec_tv_template)
2611                         }
2612                 }
2613         }, {
2614 @@ -3058,14 +3075,8 @@ static const struct alg_test_desc alg_te
2615                 .test = alg_test_skcipher,
2616                 .suite = {
2617                         .cipher = {
2618 -                               .enc = {
2619 -                                       .vecs = anubis_enc_tv_template,
2620 -                                       .count = ANUBIS_ENC_TEST_VECTORS
2621 -                               },
2622 -                               .dec = {
2623 -                                       .vecs = anubis_dec_tv_template,
2624 -                                       .count = ANUBIS_DEC_TEST_VECTORS
2625 -                               }
2626 +                               .enc = __VECS(anubis_enc_tv_template),
2627 +                               .dec = __VECS(anubis_dec_tv_template)
2628                         }
2629                 }
2630         }, {
2631 @@ -3073,14 +3084,8 @@ static const struct alg_test_desc alg_te
2632                 .test = alg_test_skcipher,
2633                 .suite = {
2634                         .cipher = {
2635 -                               .enc = {
2636 -                                       .vecs = arc4_enc_tv_template,
2637 -                                       .count = ARC4_ENC_TEST_VECTORS
2638 -                               },
2639 -                               .dec = {
2640 -                                       .vecs = arc4_dec_tv_template,
2641 -                                       .count = ARC4_DEC_TEST_VECTORS
2642 -                               }
2643 +                               .enc = __VECS(arc4_enc_tv_template),
2644 +                               .dec = __VECS(arc4_dec_tv_template)
2645                         }
2646                 }
2647         }, {
2648 @@ -3088,14 +3093,8 @@ static const struct alg_test_desc alg_te
2649                 .test = alg_test_skcipher,
2650                 .suite = {
2651                         .cipher = {
2652 -                               .enc = {
2653 -                                       .vecs = bf_enc_tv_template,
2654 -                                       .count = BF_ENC_TEST_VECTORS
2655 -                               },
2656 -                               .dec = {
2657 -                                       .vecs = bf_dec_tv_template,
2658 -                                       .count = BF_DEC_TEST_VECTORS
2659 -                               }
2660 +                               .enc = __VECS(bf_enc_tv_template),
2661 +                               .dec = __VECS(bf_dec_tv_template)
2662                         }
2663                 }
2664         }, {
2665 @@ -3103,14 +3102,8 @@ static const struct alg_test_desc alg_te
2666                 .test = alg_test_skcipher,
2667                 .suite = {
2668                         .cipher = {
2669 -                               .enc = {
2670 -                                       .vecs = camellia_enc_tv_template,
2671 -                                       .count = CAMELLIA_ENC_TEST_VECTORS
2672 -                               },
2673 -                               .dec = {
2674 -                                       .vecs = camellia_dec_tv_template,
2675 -                                       .count = CAMELLIA_DEC_TEST_VECTORS
2676 -                               }
2677 +                               .enc = __VECS(camellia_enc_tv_template),
2678 +                               .dec = __VECS(camellia_dec_tv_template)
2679                         }
2680                 }
2681         }, {
2682 @@ -3118,14 +3111,8 @@ static const struct alg_test_desc alg_te
2683                 .test = alg_test_skcipher,
2684                 .suite = {
2685                         .cipher = {
2686 -                               .enc = {
2687 -                                       .vecs = cast5_enc_tv_template,
2688 -                                       .count = CAST5_ENC_TEST_VECTORS
2689 -                               },
2690 -                               .dec = {
2691 -                                       .vecs = cast5_dec_tv_template,
2692 -                                       .count = CAST5_DEC_TEST_VECTORS
2693 -                               }
2694 +                               .enc = __VECS(cast5_enc_tv_template),
2695 +                               .dec = __VECS(cast5_dec_tv_template)
2696                         }
2697                 }
2698         }, {
2699 @@ -3133,14 +3120,8 @@ static const struct alg_test_desc alg_te
2700                 .test = alg_test_skcipher,
2701                 .suite = {
2702                         .cipher = {
2703 -                               .enc = {
2704 -                                       .vecs = cast6_enc_tv_template,
2705 -                                       .count = CAST6_ENC_TEST_VECTORS
2706 -                               },
2707 -                               .dec = {
2708 -                                       .vecs = cast6_dec_tv_template,
2709 -                                       .count = CAST6_DEC_TEST_VECTORS
2710 -                               }
2711 +                               .enc = __VECS(cast6_enc_tv_template),
2712 +                               .dec = __VECS(cast6_dec_tv_template)
2713                         }
2714                 }
2715         }, {
2716 @@ -3151,14 +3132,8 @@ static const struct alg_test_desc alg_te
2717                 .test = alg_test_skcipher,
2718                 .suite = {
2719                         .cipher = {
2720 -                               .enc = {
2721 -                                       .vecs = des_enc_tv_template,
2722 -                                       .count = DES_ENC_TEST_VECTORS
2723 -                               },
2724 -                               .dec = {
2725 -                                       .vecs = des_dec_tv_template,
2726 -                                       .count = DES_DEC_TEST_VECTORS
2727 -                               }
2728 +                               .enc = __VECS(des_enc_tv_template),
2729 +                               .dec = __VECS(des_dec_tv_template)
2730                         }
2731                 }
2732         }, {
2733 @@ -3167,14 +3142,8 @@ static const struct alg_test_desc alg_te
2734                 .fips_allowed = 1,
2735                 .suite = {
2736                         .cipher = {
2737 -                               .enc = {
2738 -                                       .vecs = des3_ede_enc_tv_template,
2739 -                                       .count = DES3_EDE_ENC_TEST_VECTORS
2740 -                               },
2741 -                               .dec = {
2742 -                                       .vecs = des3_ede_dec_tv_template,
2743 -                                       .count = DES3_EDE_DEC_TEST_VECTORS
2744 -                               }
2745 +                               .enc = __VECS(des3_ede_enc_tv_template),
2746 +                               .dec = __VECS(des3_ede_dec_tv_template)
2747                         }
2748                 }
2749         }, {
2750 @@ -3197,14 +3166,8 @@ static const struct alg_test_desc alg_te
2751                 .test = alg_test_skcipher,
2752                 .suite = {
2753                         .cipher = {
2754 -                               .enc = {
2755 -                                       .vecs = khazad_enc_tv_template,
2756 -                                       .count = KHAZAD_ENC_TEST_VECTORS
2757 -                               },
2758 -                               .dec = {
2759 -                                       .vecs = khazad_dec_tv_template,
2760 -                                       .count = KHAZAD_DEC_TEST_VECTORS
2761 -                               }
2762 +                               .enc = __VECS(khazad_enc_tv_template),
2763 +                               .dec = __VECS(khazad_dec_tv_template)
2764                         }
2765                 }
2766         }, {
2767 @@ -3212,14 +3175,8 @@ static const struct alg_test_desc alg_te
2768                 .test = alg_test_skcipher,
2769                 .suite = {
2770                         .cipher = {
2771 -                               .enc = {
2772 -                                       .vecs = seed_enc_tv_template,
2773 -                                       .count = SEED_ENC_TEST_VECTORS
2774 -                               },
2775 -                               .dec = {
2776 -                                       .vecs = seed_dec_tv_template,
2777 -                                       .count = SEED_DEC_TEST_VECTORS
2778 -                               }
2779 +                               .enc = __VECS(seed_enc_tv_template),
2780 +                               .dec = __VECS(seed_dec_tv_template)
2781                         }
2782                 }
2783         }, {
2784 @@ -3227,14 +3184,8 @@ static const struct alg_test_desc alg_te
2785                 .test = alg_test_skcipher,
2786                 .suite = {
2787                         .cipher = {
2788 -                               .enc = {
2789 -                                       .vecs = serpent_enc_tv_template,
2790 -                                       .count = SERPENT_ENC_TEST_VECTORS
2791 -                               },
2792 -                               .dec = {
2793 -                                       .vecs = serpent_dec_tv_template,
2794 -                                       .count = SERPENT_DEC_TEST_VECTORS
2795 -                               }
2796 +                               .enc = __VECS(serpent_enc_tv_template),
2797 +                               .dec = __VECS(serpent_dec_tv_template)
2798                         }
2799                 }
2800         }, {
2801 @@ -3242,14 +3193,8 @@ static const struct alg_test_desc alg_te
2802                 .test = alg_test_skcipher,
2803                 .suite = {
2804                         .cipher = {
2805 -                               .enc = {
2806 -                                       .vecs = tea_enc_tv_template,
2807 -                                       .count = TEA_ENC_TEST_VECTORS
2808 -                               },
2809 -                               .dec = {
2810 -                                       .vecs = tea_dec_tv_template,
2811 -                                       .count = TEA_DEC_TEST_VECTORS
2812 -                               }
2813 +                               .enc = __VECS(tea_enc_tv_template),
2814 +                               .dec = __VECS(tea_dec_tv_template)
2815                         }
2816                 }
2817         }, {
2818 @@ -3257,14 +3202,8 @@ static const struct alg_test_desc alg_te
2819                 .test = alg_test_skcipher,
2820                 .suite = {
2821                         .cipher = {
2822 -                               .enc = {
2823 -                                       .vecs = tnepres_enc_tv_template,
2824 -                                       .count = TNEPRES_ENC_TEST_VECTORS
2825 -                               },
2826 -                               .dec = {
2827 -                                       .vecs = tnepres_dec_tv_template,
2828 -                                       .count = TNEPRES_DEC_TEST_VECTORS
2829 -                               }
2830 +                               .enc = __VECS(tnepres_enc_tv_template),
2831 +                               .dec = __VECS(tnepres_dec_tv_template)
2832                         }
2833                 }
2834         }, {
2835 @@ -3272,14 +3211,8 @@ static const struct alg_test_desc alg_te
2836                 .test = alg_test_skcipher,
2837                 .suite = {
2838                         .cipher = {
2839 -                               .enc = {
2840 -                                       .vecs = tf_enc_tv_template,
2841 -                                       .count = TF_ENC_TEST_VECTORS
2842 -                               },
2843 -                               .dec = {
2844 -                                       .vecs = tf_dec_tv_template,
2845 -                                       .count = TF_DEC_TEST_VECTORS
2846 -                               }
2847 +                               .enc = __VECS(tf_enc_tv_template),
2848 +                               .dec = __VECS(tf_dec_tv_template)
2849                         }
2850                 }
2851         }, {
2852 @@ -3287,14 +3220,8 @@ static const struct alg_test_desc alg_te
2853                 .test = alg_test_skcipher,
2854                 .suite = {
2855                         .cipher = {
2856 -                               .enc = {
2857 -                                       .vecs = xeta_enc_tv_template,
2858 -                                       .count = XETA_ENC_TEST_VECTORS
2859 -                               },
2860 -                               .dec = {
2861 -                                       .vecs = xeta_dec_tv_template,
2862 -                                       .count = XETA_DEC_TEST_VECTORS
2863 -                               }
2864 +                               .enc = __VECS(xeta_enc_tv_template),
2865 +                               .dec = __VECS(xeta_dec_tv_template)
2866                         }
2867                 }
2868         }, {
2869 @@ -3302,14 +3229,8 @@ static const struct alg_test_desc alg_te
2870                 .test = alg_test_skcipher,
2871                 .suite = {
2872                         .cipher = {
2873 -                               .enc = {
2874 -                                       .vecs = xtea_enc_tv_template,
2875 -                                       .count = XTEA_ENC_TEST_VECTORS
2876 -                               },
2877 -                               .dec = {
2878 -                                       .vecs = xtea_dec_tv_template,
2879 -                                       .count = XTEA_DEC_TEST_VECTORS
2880 -                               }
2881 +                               .enc = __VECS(xtea_enc_tv_template),
2882 +                               .dec = __VECS(xtea_dec_tv_template)
2883                         }
2884                 }
2885         }, {
2886 @@ -3317,10 +3238,7 @@ static const struct alg_test_desc alg_te
2887                 .test = alg_test_kpp,
2888                 .fips_allowed = 1,
2889                 .suite = {
2890 -                       .kpp = {
2891 -                               .vecs = ecdh_tv_template,
2892 -                               .count = ECDH_TEST_VECTORS
2893 -                       }
2894 +                       .kpp = __VECS(ecdh_tv_template)
2895                 }
2896         }, {
2897                 .alg = "gcm(aes)",
2898 @@ -3328,14 +3246,8 @@ static const struct alg_test_desc alg_te
2899                 .fips_allowed = 1,
2900                 .suite = {
2901                         .aead = {
2902 -                               .enc = {
2903 -                                       .vecs = aes_gcm_enc_tv_template,
2904 -                                       .count = AES_GCM_ENC_TEST_VECTORS
2905 -                               },
2906 -                               .dec = {
2907 -                                       .vecs = aes_gcm_dec_tv_template,
2908 -                                       .count = AES_GCM_DEC_TEST_VECTORS
2909 -                               }
2910 +                               .enc = __VECS(aes_gcm_enc_tv_template),
2911 +                               .dec = __VECS(aes_gcm_dec_tv_template)
2912                         }
2913                 }
2914         }, {
2915 @@ -3343,136 +3255,94 @@ static const struct alg_test_desc alg_te
2916                 .test = alg_test_hash,
2917                 .fips_allowed = 1,
2918                 .suite = {
2919 -                       .hash = {
2920 -                               .vecs = ghash_tv_template,
2921 -                               .count = GHASH_TEST_VECTORS
2922 -                       }
2923 +                       .hash = __VECS(ghash_tv_template)
2924                 }
2925         }, {
2926                 .alg = "hmac(crc32)",
2927                 .test = alg_test_hash,
2928                 .suite = {
2929 -                       .hash = {
2930 -                               .vecs = bfin_crc_tv_template,
2931 -                               .count = BFIN_CRC_TEST_VECTORS
2932 -                       }
2933 +                       .hash = __VECS(bfin_crc_tv_template)
2934                 }
2935         }, {
2936                 .alg = "hmac(md5)",
2937                 .test = alg_test_hash,
2938                 .suite = {
2939 -                       .hash = {
2940 -                               .vecs = hmac_md5_tv_template,
2941 -                               .count = HMAC_MD5_TEST_VECTORS
2942 -                       }
2943 +                       .hash = __VECS(hmac_md5_tv_template)
2944                 }
2945         }, {
2946                 .alg = "hmac(rmd128)",
2947                 .test = alg_test_hash,
2948                 .suite = {
2949 -                       .hash = {
2950 -                               .vecs = hmac_rmd128_tv_template,
2951 -                               .count = HMAC_RMD128_TEST_VECTORS
2952 -                       }
2953 +                       .hash = __VECS(hmac_rmd128_tv_template)
2954                 }
2955         }, {
2956                 .alg = "hmac(rmd160)",
2957                 .test = alg_test_hash,
2958                 .suite = {
2959 -                       .hash = {
2960 -                               .vecs = hmac_rmd160_tv_template,
2961 -                               .count = HMAC_RMD160_TEST_VECTORS
2962 -                       }
2963 +                       .hash = __VECS(hmac_rmd160_tv_template)
2964                 }
2965         }, {
2966                 .alg = "hmac(sha1)",
2967                 .test = alg_test_hash,
2968                 .fips_allowed = 1,
2969                 .suite = {
2970 -                       .hash = {
2971 -                               .vecs = hmac_sha1_tv_template,
2972 -                               .count = HMAC_SHA1_TEST_VECTORS
2973 -                       }
2974 +                       .hash = __VECS(hmac_sha1_tv_template)
2975                 }
2976         }, {
2977                 .alg = "hmac(sha224)",
2978                 .test = alg_test_hash,
2979                 .fips_allowed = 1,
2980                 .suite = {
2981 -                       .hash = {
2982 -                               .vecs = hmac_sha224_tv_template,
2983 -                               .count = HMAC_SHA224_TEST_VECTORS
2984 -                       }
2985 +                       .hash = __VECS(hmac_sha224_tv_template)
2986                 }
2987         }, {
2988                 .alg = "hmac(sha256)",
2989                 .test = alg_test_hash,
2990                 .fips_allowed = 1,
2991                 .suite = {
2992 -                       .hash = {
2993 -                               .vecs = hmac_sha256_tv_template,
2994 -                               .count = HMAC_SHA256_TEST_VECTORS
2995 -                       }
2996 +                       .hash = __VECS(hmac_sha256_tv_template)
2997                 }
2998         }, {
2999                 .alg = "hmac(sha3-224)",
3000                 .test = alg_test_hash,
3001                 .fips_allowed = 1,
3002                 .suite = {
3003 -                       .hash = {
3004 -                               .vecs = hmac_sha3_224_tv_template,
3005 -                               .count = HMAC_SHA3_224_TEST_VECTORS
3006 -                       }
3007 +                       .hash = __VECS(hmac_sha3_224_tv_template)
3008                 }
3009         }, {
3010                 .alg = "hmac(sha3-256)",
3011                 .test = alg_test_hash,
3012                 .fips_allowed = 1,
3013                 .suite = {
3014 -                       .hash = {
3015 -                               .vecs = hmac_sha3_256_tv_template,
3016 -                               .count = HMAC_SHA3_256_TEST_VECTORS
3017 -                       }
3018 +                       .hash = __VECS(hmac_sha3_256_tv_template)
3019                 }
3020         }, {
3021                 .alg = "hmac(sha3-384)",
3022                 .test = alg_test_hash,
3023                 .fips_allowed = 1,
3024                 .suite = {
3025 -                       .hash = {
3026 -                               .vecs = hmac_sha3_384_tv_template,
3027 -                               .count = HMAC_SHA3_384_TEST_VECTORS
3028 -                       }
3029 +                       .hash = __VECS(hmac_sha3_384_tv_template)
3030                 }
3031         }, {
3032                 .alg = "hmac(sha3-512)",
3033                 .test = alg_test_hash,
3034                 .fips_allowed = 1,
3035                 .suite = {
3036 -                       .hash = {
3037 -                               .vecs = hmac_sha3_512_tv_template,
3038 -                               .count = HMAC_SHA3_512_TEST_VECTORS
3039 -                       }
3040 +                       .hash = __VECS(hmac_sha3_512_tv_template)
3041                 }
3042         }, {
3043                 .alg = "hmac(sha384)",
3044                 .test = alg_test_hash,
3045                 .fips_allowed = 1,
3046                 .suite = {
3047 -                       .hash = {
3048 -                               .vecs = hmac_sha384_tv_template,
3049 -                               .count = HMAC_SHA384_TEST_VECTORS
3050 -                       }
3051 +                       .hash = __VECS(hmac_sha384_tv_template)
3052                 }
3053         }, {
3054                 .alg = "hmac(sha512)",
3055                 .test = alg_test_hash,
3056                 .fips_allowed = 1,
3057                 .suite = {
3058 -                       .hash = {
3059 -                               .vecs = hmac_sha512_tv_template,
3060 -                               .count = HMAC_SHA512_TEST_VECTORS
3061 -                       }
3062 +                       .hash = __VECS(hmac_sha512_tv_template)
3063                 }
3064         }, {
3065                 .alg = "jitterentropy_rng",
3066 @@ -3484,14 +3354,8 @@ static const struct alg_test_desc alg_te
3067                 .fips_allowed = 1,
3068                 .suite = {
3069                         .cipher = {
3070 -                               .enc = {
3071 -                                       .vecs = aes_kw_enc_tv_template,
3072 -                                       .count = ARRAY_SIZE(aes_kw_enc_tv_template)
3073 -                               },
3074 -                               .dec = {
3075 -                                       .vecs = aes_kw_dec_tv_template,
3076 -                                       .count = ARRAY_SIZE(aes_kw_dec_tv_template)
3077 -                               }
3078 +                               .enc = __VECS(aes_kw_enc_tv_template),
3079 +                               .dec = __VECS(aes_kw_dec_tv_template)
3080                         }
3081                 }
3082         }, {
3083 @@ -3499,14 +3363,8 @@ static const struct alg_test_desc alg_te
3084                 .test = alg_test_skcipher,
3085                 .suite = {
3086                         .cipher = {
3087 -                               .enc = {
3088 -                                       .vecs = aes_lrw_enc_tv_template,
3089 -                                       .count = AES_LRW_ENC_TEST_VECTORS
3090 -                               },
3091 -                               .dec = {
3092 -                                       .vecs = aes_lrw_dec_tv_template,
3093 -                                       .count = AES_LRW_DEC_TEST_VECTORS
3094 -                               }
3095 +                               .enc = __VECS(aes_lrw_enc_tv_template),
3096 +                               .dec = __VECS(aes_lrw_dec_tv_template)
3097                         }
3098                 }
3099         }, {
3100 @@ -3514,14 +3372,8 @@ static const struct alg_test_desc alg_te
3101                 .test = alg_test_skcipher,
3102                 .suite = {
3103                         .cipher = {
3104 -                               .enc = {
3105 -                                       .vecs = camellia_lrw_enc_tv_template,
3106 -                                       .count = CAMELLIA_LRW_ENC_TEST_VECTORS
3107 -                               },
3108 -                               .dec = {
3109 -                                       .vecs = camellia_lrw_dec_tv_template,
3110 -                                       .count = CAMELLIA_LRW_DEC_TEST_VECTORS
3111 -                               }
3112 +                               .enc = __VECS(camellia_lrw_enc_tv_template),
3113 +                               .dec = __VECS(camellia_lrw_dec_tv_template)
3114                         }
3115                 }
3116         }, {
3117 @@ -3529,14 +3381,8 @@ static const struct alg_test_desc alg_te
3118                 .test = alg_test_skcipher,
3119                 .suite = {
3120                         .cipher = {
3121 -                               .enc = {
3122 -                                       .vecs = cast6_lrw_enc_tv_template,
3123 -                                       .count = CAST6_LRW_ENC_TEST_VECTORS
3124 -                               },
3125 -                               .dec = {
3126 -                                       .vecs = cast6_lrw_dec_tv_template,
3127 -                                       .count = CAST6_LRW_DEC_TEST_VECTORS
3128 -                               }
3129 +                               .enc = __VECS(cast6_lrw_enc_tv_template),
3130 +                               .dec = __VECS(cast6_lrw_dec_tv_template)
3131                         }
3132                 }
3133         }, {
3134 @@ -3544,14 +3390,8 @@ static const struct alg_test_desc alg_te
3135                 .test = alg_test_skcipher,
3136                 .suite = {
3137                         .cipher = {
3138 -                               .enc = {
3139 -                                       .vecs = serpent_lrw_enc_tv_template,
3140 -                                       .count = SERPENT_LRW_ENC_TEST_VECTORS
3141 -                               },
3142 -                               .dec = {
3143 -                                       .vecs = serpent_lrw_dec_tv_template,
3144 -                                       .count = SERPENT_LRW_DEC_TEST_VECTORS
3145 -                               }
3146 +                               .enc = __VECS(serpent_lrw_enc_tv_template),
3147 +                               .dec = __VECS(serpent_lrw_dec_tv_template)
3148                         }
3149                 }
3150         }, {
3151 @@ -3559,14 +3399,8 @@ static const struct alg_test_desc alg_te
3152                 .test = alg_test_skcipher,
3153                 .suite = {
3154                         .cipher = {
3155 -                               .enc = {
3156 -                                       .vecs = tf_lrw_enc_tv_template,
3157 -                                       .count = TF_LRW_ENC_TEST_VECTORS
3158 -                               },
3159 -                               .dec = {
3160 -                                       .vecs = tf_lrw_dec_tv_template,
3161 -                                       .count = TF_LRW_DEC_TEST_VECTORS
3162 -                               }
3163 +                               .enc = __VECS(tf_lrw_enc_tv_template),
3164 +                               .dec = __VECS(tf_lrw_dec_tv_template)
3165                         }
3166                 }
3167         }, {
3168 @@ -3575,14 +3409,8 @@ static const struct alg_test_desc alg_te
3169                 .fips_allowed = 1,
3170                 .suite = {
3171                         .comp = {
3172 -                               .comp = {
3173 -                                       .vecs = lz4_comp_tv_template,
3174 -                                       .count = LZ4_COMP_TEST_VECTORS
3175 -                               },
3176 -                               .decomp = {
3177 -                                       .vecs = lz4_decomp_tv_template,
3178 -                                       .count = LZ4_DECOMP_TEST_VECTORS
3179 -                               }
3180 +                               .comp = __VECS(lz4_comp_tv_template),
3181 +                               .decomp = __VECS(lz4_decomp_tv_template)
3182                         }
3183                 }
3184         }, {
3185 @@ -3591,14 +3419,8 @@ static const struct alg_test_desc alg_te
3186                 .fips_allowed = 1,
3187                 .suite = {
3188                         .comp = {
3189 -                               .comp = {
3190 -                                       .vecs = lz4hc_comp_tv_template,
3191 -                                       .count = LZ4HC_COMP_TEST_VECTORS
3192 -                               },
3193 -                               .decomp = {
3194 -                                       .vecs = lz4hc_decomp_tv_template,
3195 -                                       .count = LZ4HC_DECOMP_TEST_VECTORS
3196 -                               }
3197 +                               .comp = __VECS(lz4hc_comp_tv_template),
3198 +                               .decomp = __VECS(lz4hc_decomp_tv_template)
3199                         }
3200                 }
3201         }, {
3202 @@ -3607,42 +3429,27 @@ static const struct alg_test_desc alg_te
3203                 .fips_allowed = 1,
3204                 .suite = {
3205                         .comp = {
3206 -                               .comp = {
3207 -                                       .vecs = lzo_comp_tv_template,
3208 -                                       .count = LZO_COMP_TEST_VECTORS
3209 -                               },
3210 -                               .decomp = {
3211 -                                       .vecs = lzo_decomp_tv_template,
3212 -                                       .count = LZO_DECOMP_TEST_VECTORS
3213 -                               }
3214 +                               .comp = __VECS(lzo_comp_tv_template),
3215 +                               .decomp = __VECS(lzo_decomp_tv_template)
3216                         }
3217                 }
3218         }, {
3219                 .alg = "md4",
3220                 .test = alg_test_hash,
3221                 .suite = {
3222 -                       .hash = {
3223 -                               .vecs = md4_tv_template,
3224 -                               .count = MD4_TEST_VECTORS
3225 -                       }
3226 +                       .hash = __VECS(md4_tv_template)
3227                 }
3228         }, {
3229                 .alg = "md5",
3230                 .test = alg_test_hash,
3231                 .suite = {
3232 -                       .hash = {
3233 -                               .vecs = md5_tv_template,
3234 -                               .count = MD5_TEST_VECTORS
3235 -                       }
3236 +                       .hash = __VECS(md5_tv_template)
3237                 }
3238         }, {
3239                 .alg = "michael_mic",
3240                 .test = alg_test_hash,
3241                 .suite = {
3242 -                       .hash = {
3243 -                               .vecs = michael_mic_tv_template,
3244 -                               .count = MICHAEL_MIC_TEST_VECTORS
3245 -                       }
3246 +                       .hash = __VECS(michael_mic_tv_template)
3247                 }
3248         }, {
3249                 .alg = "ofb(aes)",
3250 @@ -3650,14 +3457,8 @@ static const struct alg_test_desc alg_te
3251                 .fips_allowed = 1,
3252                 .suite = {
3253                         .cipher = {
3254 -                               .enc = {
3255 -                                       .vecs = aes_ofb_enc_tv_template,
3256 -                                       .count = AES_OFB_ENC_TEST_VECTORS
3257 -                               },
3258 -                               .dec = {
3259 -                                       .vecs = aes_ofb_dec_tv_template,
3260 -                                       .count = AES_OFB_DEC_TEST_VECTORS
3261 -                               }
3262 +                               .enc = __VECS(aes_ofb_enc_tv_template),
3263 +                               .dec = __VECS(aes_ofb_dec_tv_template)
3264                         }
3265                 }
3266         }, {
3267 @@ -3665,24 +3466,15 @@ static const struct alg_test_desc alg_te
3268                 .test = alg_test_skcipher,
3269                 .suite = {
3270                         .cipher = {
3271 -                               .enc = {
3272 -                                       .vecs = fcrypt_pcbc_enc_tv_template,
3273 -                                       .count = FCRYPT_ENC_TEST_VECTORS
3274 -                               },
3275 -                               .dec = {
3276 -                                       .vecs = fcrypt_pcbc_dec_tv_template,
3277 -                                       .count = FCRYPT_DEC_TEST_VECTORS
3278 -                               }
3279 +                               .enc = __VECS(fcrypt_pcbc_enc_tv_template),
3280 +                               .dec = __VECS(fcrypt_pcbc_dec_tv_template)
3281                         }
3282                 }
3283         }, {
3284                 .alg = "poly1305",
3285                 .test = alg_test_hash,
3286                 .suite = {
3287 -                       .hash = {
3288 -                               .vecs = poly1305_tv_template,
3289 -                               .count = POLY1305_TEST_VECTORS
3290 -                       }
3291 +                       .hash = __VECS(poly1305_tv_template)
3292                 }
3293         }, {
3294                 .alg = "rfc3686(ctr(aes))",
3295 @@ -3690,14 +3482,8 @@ static const struct alg_test_desc alg_te
3296                 .fips_allowed = 1,
3297                 .suite = {
3298                         .cipher = {
3299 -                               .enc = {
3300 -                                       .vecs = aes_ctr_rfc3686_enc_tv_template,
3301 -                                       .count = AES_CTR_3686_ENC_TEST_VECTORS
3302 -                               },
3303 -                               .dec = {
3304 -                                       .vecs = aes_ctr_rfc3686_dec_tv_template,
3305 -                                       .count = AES_CTR_3686_DEC_TEST_VECTORS
3306 -                               }
3307 +                               .enc = __VECS(aes_ctr_rfc3686_enc_tv_template),
3308 +                               .dec = __VECS(aes_ctr_rfc3686_dec_tv_template)
3309                         }
3310                 }
3311         }, {
3312 @@ -3706,14 +3492,8 @@ static const struct alg_test_desc alg_te
3313                 .fips_allowed = 1,
3314                 .suite = {
3315                         .aead = {
3316 -                               .enc = {
3317 -                                       .vecs = aes_gcm_rfc4106_enc_tv_template,
3318 -                                       .count = AES_GCM_4106_ENC_TEST_VECTORS
3319 -                               },
3320 -                               .dec = {
3321 -                                       .vecs = aes_gcm_rfc4106_dec_tv_template,
3322 -                                       .count = AES_GCM_4106_DEC_TEST_VECTORS
3323 -                               }
3324 +                               .enc = __VECS(aes_gcm_rfc4106_enc_tv_template),
3325 +                               .dec = __VECS(aes_gcm_rfc4106_dec_tv_template)
3326                         }
3327                 }
3328         }, {
3329 @@ -3722,14 +3502,8 @@ static const struct alg_test_desc alg_te
3330                 .fips_allowed = 1,
3331                 .suite = {
3332                         .aead = {
3333 -                               .enc = {
3334 -                                       .vecs = aes_ccm_rfc4309_enc_tv_template,
3335 -                                       .count = AES_CCM_4309_ENC_TEST_VECTORS
3336 -                               },
3337 -                               .dec = {
3338 -                                       .vecs = aes_ccm_rfc4309_dec_tv_template,
3339 -                                       .count = AES_CCM_4309_DEC_TEST_VECTORS
3340 -                               }
3341 +                               .enc = __VECS(aes_ccm_rfc4309_enc_tv_template),
3342 +                               .dec = __VECS(aes_ccm_rfc4309_dec_tv_template)
3343                         }
3344                 }
3345         }, {
3346 @@ -3737,14 +3511,8 @@ static const struct alg_test_desc alg_te
3347                 .test = alg_test_aead,
3348                 .suite = {
3349                         .aead = {
3350 -                               .enc = {
3351 -                                       .vecs = aes_gcm_rfc4543_enc_tv_template,
3352 -                                       .count = AES_GCM_4543_ENC_TEST_VECTORS
3353 -                               },
3354 -                               .dec = {
3355 -                                       .vecs = aes_gcm_rfc4543_dec_tv_template,
3356 -                                       .count = AES_GCM_4543_DEC_TEST_VECTORS
3357 -                               },
3358 +                               .enc = __VECS(aes_gcm_rfc4543_enc_tv_template),
3359 +                               .dec = __VECS(aes_gcm_rfc4543_dec_tv_template),
3360                         }
3361                 }
3362         }, {
3363 @@ -3752,14 +3520,8 @@ static const struct alg_test_desc alg_te
3364                 .test = alg_test_aead,
3365                 .suite = {
3366                         .aead = {
3367 -                               .enc = {
3368 -                                       .vecs = rfc7539_enc_tv_template,
3369 -                                       .count = RFC7539_ENC_TEST_VECTORS
3370 -                               },
3371 -                               .dec = {
3372 -                                       .vecs = rfc7539_dec_tv_template,
3373 -                                       .count = RFC7539_DEC_TEST_VECTORS
3374 -                               },
3375 +                               .enc = __VECS(rfc7539_enc_tv_template),
3376 +                               .dec = __VECS(rfc7539_dec_tv_template),
3377                         }
3378                 }
3379         }, {
3380 @@ -3767,71 +3529,47 @@ static const struct alg_test_desc alg_te
3381                 .test = alg_test_aead,
3382                 .suite = {
3383                         .aead = {
3384 -                               .enc = {
3385 -                                       .vecs = rfc7539esp_enc_tv_template,
3386 -                                       .count = RFC7539ESP_ENC_TEST_VECTORS
3387 -                               },
3388 -                               .dec = {
3389 -                                       .vecs = rfc7539esp_dec_tv_template,
3390 -                                       .count = RFC7539ESP_DEC_TEST_VECTORS
3391 -                               },
3392 +                               .enc = __VECS(rfc7539esp_enc_tv_template),
3393 +                               .dec = __VECS(rfc7539esp_dec_tv_template),
3394                         }
3395                 }
3396         }, {
3397                 .alg = "rmd128",
3398                 .test = alg_test_hash,
3399                 .suite = {
3400 -                       .hash = {
3401 -                               .vecs = rmd128_tv_template,
3402 -                               .count = RMD128_TEST_VECTORS
3403 -                       }
3404 +                       .hash = __VECS(rmd128_tv_template)
3405                 }
3406         }, {
3407                 .alg = "rmd160",
3408                 .test = alg_test_hash,
3409                 .suite = {
3410 -                       .hash = {
3411 -                               .vecs = rmd160_tv_template,
3412 -                               .count = RMD160_TEST_VECTORS
3413 -                       }
3414 +                       .hash = __VECS(rmd160_tv_template)
3415                 }
3416         }, {
3417                 .alg = "rmd256",
3418                 .test = alg_test_hash,
3419                 .suite = {
3420 -                       .hash = {
3421 -                               .vecs = rmd256_tv_template,
3422 -                               .count = RMD256_TEST_VECTORS
3423 -                       }
3424 +                       .hash = __VECS(rmd256_tv_template)
3425                 }
3426         }, {
3427                 .alg = "rmd320",
3428                 .test = alg_test_hash,
3429                 .suite = {
3430 -                       .hash = {
3431 -                               .vecs = rmd320_tv_template,
3432 -                               .count = RMD320_TEST_VECTORS
3433 -                       }
3434 +                       .hash = __VECS(rmd320_tv_template)
3435                 }
3436         }, {
3437                 .alg = "rsa",
3438                 .test = alg_test_akcipher,
3439                 .fips_allowed = 1,
3440                 .suite = {
3441 -                       .akcipher = {
3442 -                               .vecs = rsa_tv_template,
3443 -                               .count = RSA_TEST_VECTORS
3444 -                       }
3445 +                       .akcipher = __VECS(rsa_tv_template)
3446                 }
3447         }, {
3448                 .alg = "salsa20",
3449                 .test = alg_test_skcipher,
3450                 .suite = {
3451                         .cipher = {
3452 -                               .enc = {
3453 -                                       .vecs = salsa20_stream_enc_tv_template,
3454 -                                       .count = SALSA20_STREAM_ENC_TEST_VECTORS
3455 -                               }
3456 +                               .enc = __VECS(salsa20_stream_enc_tv_template)
3457                         }
3458                 }
3459         }, {
3460 @@ -3839,162 +3577,120 @@ static const struct alg_test_desc alg_te
3461                 .test = alg_test_hash,
3462                 .fips_allowed = 1,
3463                 .suite = {
3464 -                       .hash = {
3465 -                               .vecs = sha1_tv_template,
3466 -                               .count = SHA1_TEST_VECTORS
3467 -                       }
3468 +                       .hash = __VECS(sha1_tv_template)
3469                 }
3470         }, {
3471                 .alg = "sha224",
3472                 .test = alg_test_hash,
3473                 .fips_allowed = 1,
3474                 .suite = {
3475 -                       .hash = {
3476 -                               .vecs = sha224_tv_template,
3477 -                               .count = SHA224_TEST_VECTORS
3478 -                       }
3479 +                       .hash = __VECS(sha224_tv_template)
3480                 }
3481         }, {
3482                 .alg = "sha256",
3483                 .test = alg_test_hash,
3484                 .fips_allowed = 1,
3485                 .suite = {
3486 -                       .hash = {
3487 -                               .vecs = sha256_tv_template,
3488 -                               .count = SHA256_TEST_VECTORS
3489 -                       }
3490 +                       .hash = __VECS(sha256_tv_template)
3491                 }
3492         }, {
3493                 .alg = "sha3-224",
3494                 .test = alg_test_hash,
3495                 .fips_allowed = 1,
3496                 .suite = {
3497 -                       .hash = {
3498 -                               .vecs = sha3_224_tv_template,
3499 -                               .count = SHA3_224_TEST_VECTORS
3500 -                       }
3501 +                       .hash = __VECS(sha3_224_tv_template)
3502                 }
3503         }, {
3504                 .alg = "sha3-256",
3505                 .test = alg_test_hash,
3506                 .fips_allowed = 1,
3507                 .suite = {
3508 -                       .hash = {
3509 -                               .vecs = sha3_256_tv_template,
3510 -                               .count = SHA3_256_TEST_VECTORS
3511 -                       }
3512 +                       .hash = __VECS(sha3_256_tv_template)
3513                 }
3514         }, {
3515                 .alg = "sha3-384",
3516                 .test = alg_test_hash,
3517                 .fips_allowed = 1,
3518                 .suite = {
3519 -                       .hash = {
3520 -                               .vecs = sha3_384_tv_template,
3521 -                               .count = SHA3_384_TEST_VECTORS
3522 -                       }
3523 +                       .hash = __VECS(sha3_384_tv_template)
3524                 }
3525         }, {
3526                 .alg = "sha3-512",
3527                 .test = alg_test_hash,
3528                 .fips_allowed = 1,
3529                 .suite = {
3530 -                       .hash = {
3531 -                               .vecs = sha3_512_tv_template,
3532 -                               .count = SHA3_512_TEST_VECTORS
3533 -                       }
3534 +                       .hash = __VECS(sha3_512_tv_template)
3535                 }
3536         }, {
3537                 .alg = "sha384",
3538                 .test = alg_test_hash,
3539                 .fips_allowed = 1,
3540                 .suite = {
3541 -                       .hash = {
3542 -                               .vecs = sha384_tv_template,
3543 -                               .count = SHA384_TEST_VECTORS
3544 -                       }
3545 +                       .hash = __VECS(sha384_tv_template)
3546                 }
3547         }, {
3548                 .alg = "sha512",
3549                 .test = alg_test_hash,
3550                 .fips_allowed = 1,
3551                 .suite = {
3552 -                       .hash = {
3553 -                               .vecs = sha512_tv_template,
3554 -                               .count = SHA512_TEST_VECTORS
3555 -                       }
3556 +                       .hash = __VECS(sha512_tv_template)
3557                 }
3558         }, {
3559                 .alg = "tgr128",
3560                 .test = alg_test_hash,
3561                 .suite = {
3562 -                       .hash = {
3563 -                               .vecs = tgr128_tv_template,
3564 -                               .count = TGR128_TEST_VECTORS
3565 -                       }
3566 +                       .hash = __VECS(tgr128_tv_template)
3567                 }
3568         }, {
3569                 .alg = "tgr160",
3570                 .test = alg_test_hash,
3571                 .suite = {
3572 -                       .hash = {
3573 -                               .vecs = tgr160_tv_template,
3574 -                               .count = TGR160_TEST_VECTORS
3575 -                       }
3576 +                       .hash = __VECS(tgr160_tv_template)
3577                 }
3578         }, {
3579                 .alg = "tgr192",
3580                 .test = alg_test_hash,
3581                 .suite = {
3582 -                       .hash = {
3583 -                               .vecs = tgr192_tv_template,
3584 -                               .count = TGR192_TEST_VECTORS
3585 +                       .hash = __VECS(tgr192_tv_template)
3586 +               }
3587 +       }, {
3588 +               .alg = "tls10(hmac(sha1),cbc(aes))",
3589 +               .test = alg_test_tls,
3590 +               .suite = {
3591 +                       .tls = {
3592 +                               .enc = __VECS(tls_enc_tv_template),
3593 +                               .dec = __VECS(tls_dec_tv_template)
3594                         }
3595                 }
3596         }, {
3597                 .alg = "vmac(aes)",
3598                 .test = alg_test_hash,
3599                 .suite = {
3600 -                       .hash = {
3601 -                               .vecs = aes_vmac128_tv_template,
3602 -                               .count = VMAC_AES_TEST_VECTORS
3603 -                       }
3604 +                       .hash = __VECS(aes_vmac128_tv_template)
3605                 }
3606         }, {
3607                 .alg = "wp256",
3608                 .test = alg_test_hash,
3609                 .suite = {
3610 -                       .hash = {
3611 -                               .vecs = wp256_tv_template,
3612 -                               .count = WP256_TEST_VECTORS
3613 -                       }
3614 +                       .hash = __VECS(wp256_tv_template)
3615                 }
3616         }, {
3617                 .alg = "wp384",
3618                 .test = alg_test_hash,
3619                 .suite = {
3620 -                       .hash = {
3621 -                               .vecs = wp384_tv_template,
3622 -                               .count = WP384_TEST_VECTORS
3623 -                       }
3624 +                       .hash = __VECS(wp384_tv_template)
3625                 }
3626         }, {
3627                 .alg = "wp512",
3628                 .test = alg_test_hash,
3629                 .suite = {
3630 -                       .hash = {
3631 -                               .vecs = wp512_tv_template,
3632 -                               .count = WP512_TEST_VECTORS
3633 -                       }
3634 +                       .hash = __VECS(wp512_tv_template)
3635                 }
3636         }, {
3637                 .alg = "xcbc(aes)",
3638                 .test = alg_test_hash,
3639                 .suite = {
3640 -                       .hash = {
3641 -                               .vecs = aes_xcbc128_tv_template,
3642 -                               .count = XCBC_AES_TEST_VECTORS
3643 -                       }
3644 +                       .hash = __VECS(aes_xcbc128_tv_template)
3645                 }
3646         }, {
3647                 .alg = "xts(aes)",
3648 @@ -4002,14 +3698,8 @@ static const struct alg_test_desc alg_te
3649                 .fips_allowed = 1,
3650                 .suite = {
3651                         .cipher = {
3652 -                               .enc = {
3653 -                                       .vecs = aes_xts_enc_tv_template,
3654 -                                       .count = AES_XTS_ENC_TEST_VECTORS
3655 -                               },
3656 -                               .dec = {
3657 -                                       .vecs = aes_xts_dec_tv_template,
3658 -                                       .count = AES_XTS_DEC_TEST_VECTORS
3659 -                               }
3660 +                               .enc = __VECS(aes_xts_enc_tv_template),
3661 +                               .dec = __VECS(aes_xts_dec_tv_template)
3662                         }
3663                 }
3664         }, {
3665 @@ -4017,14 +3707,8 @@ static const struct alg_test_desc alg_te
3666                 .test = alg_test_skcipher,
3667                 .suite = {
3668                         .cipher = {
3669 -                               .enc = {
3670 -                                       .vecs = camellia_xts_enc_tv_template,
3671 -                                       .count = CAMELLIA_XTS_ENC_TEST_VECTORS
3672 -                               },
3673 -                               .dec = {
3674 -                                       .vecs = camellia_xts_dec_tv_template,
3675 -                                       .count = CAMELLIA_XTS_DEC_TEST_VECTORS
3676 -                               }
3677 +                               .enc = __VECS(camellia_xts_enc_tv_template),
3678 +                               .dec = __VECS(camellia_xts_dec_tv_template)
3679                         }
3680                 }
3681         }, {
3682 @@ -4032,14 +3716,8 @@ static const struct alg_test_desc alg_te
3683                 .test = alg_test_skcipher,
3684                 .suite = {
3685                         .cipher = {
3686 -                               .enc = {
3687 -                                       .vecs = cast6_xts_enc_tv_template,
3688 -                                       .count = CAST6_XTS_ENC_TEST_VECTORS
3689 -                               },
3690 -                               .dec = {
3691 -                                       .vecs = cast6_xts_dec_tv_template,
3692 -                                       .count = CAST6_XTS_DEC_TEST_VECTORS
3693 -                               }
3694 +                               .enc = __VECS(cast6_xts_enc_tv_template),
3695 +                               .dec = __VECS(cast6_xts_dec_tv_template)
3696                         }
3697                 }
3698         }, {
3699 @@ -4047,14 +3725,8 @@ static const struct alg_test_desc alg_te
3700                 .test = alg_test_skcipher,
3701                 .suite = {
3702                         .cipher = {
3703 -                               .enc = {
3704 -                                       .vecs = serpent_xts_enc_tv_template,
3705 -                                       .count = SERPENT_XTS_ENC_TEST_VECTORS
3706 -                               },
3707 -                               .dec = {
3708 -                                       .vecs = serpent_xts_dec_tv_template,
3709 -                                       .count = SERPENT_XTS_DEC_TEST_VECTORS
3710 -                               }
3711 +                               .enc = __VECS(serpent_xts_enc_tv_template),
3712 +                               .dec = __VECS(serpent_xts_dec_tv_template)
3713                         }
3714                 }
3715         }, {
3716 @@ -4062,14 +3734,8 @@ static const struct alg_test_desc alg_te
3717                 .test = alg_test_skcipher,
3718                 .suite = {
3719                         .cipher = {
3720 -                               .enc = {
3721 -                                       .vecs = tf_xts_enc_tv_template,
3722 -                                       .count = TF_XTS_ENC_TEST_VECTORS
3723 -                               },
3724 -                               .dec = {
3725 -                                       .vecs = tf_xts_dec_tv_template,
3726 -                                       .count = TF_XTS_DEC_TEST_VECTORS
3727 -                               }
3728 +                               .enc = __VECS(tf_xts_enc_tv_template),
3729 +                               .dec = __VECS(tf_xts_dec_tv_template)
3730                         }
3731                 }
3732         }
3733 --- a/crypto/testmgr.h
3734 +++ b/crypto/testmgr.h
3735 @@ -34,9 +34,9 @@
3736  
3737  struct hash_testvec {
3738         /* only used with keyed hash algorithms */
3739 -       char *key;
3740 -       char *plaintext;
3741 -       char *digest;
3742 +       const char *key;
3743 +       const char *plaintext;
3744 +       const char *digest;
3745         unsigned char tap[MAX_TAP];
3746         unsigned short psize;
3747         unsigned char np;
3748 @@ -63,11 +63,11 @@ struct hash_testvec {
3749   */
3750  
3751  struct cipher_testvec {
3752 -       char *key;
3753 -       char *iv;
3754 -       char *iv_out;
3755 -       char *input;
3756 -       char *result;
3757 +       const char *key;
3758 +       const char *iv;
3759 +       const char *iv_out;
3760 +       const char *input;
3761 +       const char *result;
3762         unsigned short tap[MAX_TAP];
3763         int np;
3764         unsigned char also_non_np;
3765 @@ -80,11 +80,11 @@ struct cipher_testvec {
3766  };
3767  
3768  struct aead_testvec {
3769 -       char *key;
3770 -       char *iv;
3771 -       char *input;
3772 -       char *assoc;
3773 -       char *result;
3774 +       const char *key;
3775 +       const char *iv;
3776 +       const char *input;
3777 +       const char *assoc;
3778 +       const char *result;
3779         unsigned char tap[MAX_TAP];
3780         unsigned char atap[MAX_TAP];
3781         int np;
3782 @@ -99,10 +99,10 @@ struct aead_testvec {
3783  };
3784  
3785  struct cprng_testvec {
3786 -       char *key;
3787 -       char *dt;
3788 -       char *v;
3789 -       char *result;
3790 +       const char *key;
3791 +       const char *dt;
3792 +       const char *v;
3793 +       const char *result;
3794         unsigned char klen;
3795         unsigned short dtlen;
3796         unsigned short vlen;
3797 @@ -111,24 +111,38 @@ struct cprng_testvec {
3798  };
3799  
3800  struct drbg_testvec {
3801 -       unsigned char *entropy;
3802 +       const unsigned char *entropy;
3803         size_t entropylen;
3804 -       unsigned char *entpra;
3805 -       unsigned char *entprb;
3806 +       const unsigned char *entpra;
3807 +       const unsigned char *entprb;
3808         size_t entprlen;
3809 -       unsigned char *addtla;
3810 -       unsigned char *addtlb;
3811 +       const unsigned char *addtla;
3812 +       const unsigned char *addtlb;
3813         size_t addtllen;
3814 -       unsigned char *pers;
3815 +       const unsigned char *pers;
3816         size_t perslen;
3817 -       unsigned char *expected;
3818 +       const unsigned char *expected;
3819         size_t expectedlen;
3820  };
3821  
3822 +struct tls_testvec {
3823 +       char *key;      /* wrapped keys for encryption and authentication */
3824 +       char *iv;       /* initialization vector */
3825 +       char *input;    /* input data */
3826 +       char *assoc;    /* associated data: seq num, type, version, input len */
3827 +       char *result;   /* result data */
3828 +       unsigned char fail;     /* the test failure is expected */
3829 +       unsigned char novrfy;   /* dec verification failure expected */
3830 +       unsigned char klen;     /* key length */
3831 +       unsigned short ilen;    /* input data length */
3832 +       unsigned short alen;    /* associated data length */
3833 +       unsigned short rlen;    /* result length */
3834 +};
3835 +
3836  struct akcipher_testvec {
3837 -       unsigned char *key;
3838 -       unsigned char *m;
3839 -       unsigned char *c;
3840 +       const unsigned char *key;
3841 +       const unsigned char *m;
3842 +       const unsigned char *c;
3843         unsigned int key_len;
3844         unsigned int m_size;
3845         unsigned int c_size;
3846 @@ -136,27 +150,227 @@ struct akcipher_testvec {
3847  };
3848  
3849  struct kpp_testvec {
3850 -       unsigned char *secret;
3851 -       unsigned char *b_public;
3852 -       unsigned char *expected_a_public;
3853 -       unsigned char *expected_ss;
3854 +       const unsigned char *secret;
3855 +       const unsigned char *b_public;
3856 +       const unsigned char *expected_a_public;
3857 +       const unsigned char *expected_ss;
3858         unsigned short secret_size;
3859         unsigned short b_public_size;
3860         unsigned short expected_a_public_size;
3861         unsigned short expected_ss_size;
3862  };
3863  
3864 -static char zeroed_string[48];
3865 +static const char zeroed_string[48];
3866  
3867  /*
3868 - * RSA test vectors. Borrowed from openSSL.
3869 + * TLS1.0 synthetic test vectors
3870   */
3871 -#ifdef CONFIG_CRYPTO_FIPS
3872 -#define RSA_TEST_VECTORS       2
3873 +static struct tls_testvec tls_enc_tv_template[] = {
3874 +       {
3875 +#ifdef __LITTLE_ENDIAN
3876 +               .key    = "\x08\x00"            /* rta length */
3877 +                       "\x01\x00"              /* rta type */
3878 +#else
3879 +               .key    = "\x00\x08"            /* rta length */
3880 +                       "\x00\x01"              /* rta type */
3881 +#endif
3882 +                       "\x00\x00\x00\x10"      /* enc key length */
3883 +                       "authenticationkey20benckeyis16_bytes",
3884 +               .klen   = 8 + 20 + 16,
3885 +               .iv     = "iv0123456789abcd",
3886 +               .input  = "Single block msg",
3887 +               .ilen   = 16,
3888 +               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
3889 +                       "\x00\x03\x01\x00\x10",
3890 +               .alen   = 13,
3891 +               .result = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
3892 +                       "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
3893 +                       "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
3894 +                       "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
3895 +                       "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
3896 +                       "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
3897 +               .rlen   = 16 + 20 + 12,
3898 +       }, {
3899 +#ifdef __LITTLE_ENDIAN
3900 +               .key    = "\x08\x00"            /* rta length */
3901 +                       "\x01\x00"              /* rta type */
3902 +#else
3903 +               .key    = "\x00\x08"            /* rta length */
3904 +                       "\x00\x01"              /* rta type */
3905 +#endif
3906 +                       "\x00\x00\x00\x10"      /* enc key length */
3907 +                       "authenticationkey20benckeyis16_bytes",
3908 +               .klen   = 8 + 20 + 16,
3909 +               .iv     = "iv0123456789abcd",
3910 +               .input  = "",
3911 +               .ilen   = 0,
3912 +               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
3913 +                       "\x00\x03\x01\x00\x00",
3914 +               .alen   = 13,
3915 +               .result = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
3916 +                       "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
3917 +                       "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
3918 +                       "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
3919 +               .rlen   = 20 + 12,
3920 +       }, {
3921 +#ifdef __LITTLE_ENDIAN
3922 +               .key    = "\x08\x00"            /* rta length */
3923 +                       "\x01\x00"              /* rta type */
3924 +#else
3925 +               .key    = "\x00\x08"            /* rta length */
3926 +                       "\x00\x01"              /* rta type */
3927 +#endif
3928 +                       "\x00\x00\x00\x10"      /* enc key length */
3929 +                       "authenticationkey20benckeyis16_bytes",
3930 +               .klen   = 8 + 20 + 16,
3931 +               .iv     = "iv0123456789abcd",
3932 +               .input  = "285 bytes plaintext285 bytes plaintext285 bytes"
3933 +                       " plaintext285 bytes plaintext285 bytes plaintext285"
3934 +                       " bytes plaintext285 bytes plaintext285 bytes"
3935 +                       " plaintext285 bytes plaintext285 bytes plaintext285"
3936 +                       " bytes plaintext285 bytes plaintext285 bytes"
3937 +                       " plaintext285 bytes plaintext285 bytes plaintext285"
3938 +                       " bytes plaintext285 bytes plaintext",
3939 +               .ilen   = 285,
3940 +               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
3941 +                       "\x00\x03\x01\x01\x1d",
3942 +               .alen   = 13,
3943 +               .result = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
3944 +                       "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
3945 +                       "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
3946 +                       "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
3947 +                       "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
3948 +                       "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
3949 +                       "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
3950 +                       "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
3951 +                       "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
3952 +                       "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
3953 +                       "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
3954 +                       "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
3955 +                       "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
3956 +                       "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
3957 +                       "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
3958 +                       "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
3959 +                       "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
3960 +                       "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
3961 +                       "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
3962 +                       "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
3963 +                       "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
3964 +                       "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
3965 +                       "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
3966 +                       "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
3967 +                       "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
3968 +                       "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
3969 +                       "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
3970 +               .rlen   = 285 + 20 + 15,
3971 +       }
3972 +};
3973 +
3974 +static struct tls_testvec tls_dec_tv_template[] = {
3975 +       {
3976 +#ifdef __LITTLE_ENDIAN
3977 +               .key    = "\x08\x00"            /* rta length */
3978 +                       "\x01\x00"              /* rta type */
3979 +#else
3980 +               .key    = "\x00\x08"            /* rta length */
3981 +                       "\x00\x01"              /* rta type */
3982 +#endif
3983 +                       "\x00\x00\x00\x10"      /* enc key length */
3984 +                       "authenticationkey20benckeyis16_bytes",
3985 +               .klen   = 8 + 20 + 16,
3986 +               .iv     = "iv0123456789abcd",
3987 +               .input  = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
3988 +                       "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
3989 +                       "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
3990 +                       "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
3991 +                       "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
3992 +                       "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
3993 +               .ilen   = 16 + 20 + 12,
3994 +               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
3995 +                       "\x00\x03\x01\x00\x30",
3996 +               .alen   = 13,
3997 +               .result = "Single block msg",
3998 +               .rlen   = 16,
3999 +       }, {
4000 +#ifdef __LITTLE_ENDIAN
4001 +               .key    = "\x08\x00"            /* rta length */
4002 +                       "\x01\x00"              /* rta type */
4003  #else
4004 -#define RSA_TEST_VECTORS       5
4005 +               .key    = "\x00\x08"            /* rta length */
4006 +                       "\x00\x01"              /* rta type */
4007  #endif
4008 -static struct akcipher_testvec rsa_tv_template[] = {
4009 +                       "\x00\x00\x00\x10"      /* enc key length */
4010 +                       "authenticationkey20benckeyis16_bytes",
4011 +               .klen   = 8 + 20 + 16,
4012 +               .iv     = "iv0123456789abcd",
4013 +               .input = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
4014 +                       "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
4015 +                       "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
4016 +                       "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
4017 +               .ilen   = 20 + 12,
4018 +               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
4019 +                       "\x00\x03\x01\x00\x20",
4020 +               .alen   = 13,
4021 +               .result = "",
4022 +               .rlen   = 0,
4023 +       }, {
4024 +#ifdef __LITTLE_ENDIAN
4025 +               .key    = "\x08\x00"            /* rta length */
4026 +                       "\x01\x00"              /* rta type */
4027 +#else
4028 +               .key    = "\x00\x08"            /* rta length */
4029 +                       "\x00\x01"              /* rta type */
4030 +#endif
4031 +                       "\x00\x00\x00\x10"      /* enc key length */
4032 +                       "authenticationkey20benckeyis16_bytes",
4033 +               .klen   = 8 + 20 + 16,
4034 +               .iv     = "iv0123456789abcd",
4035 +               .input = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
4036 +                       "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
4037 +                       "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
4038 +                       "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
4039 +                       "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
4040 +                       "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
4041 +                       "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
4042 +                       "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
4043 +                       "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
4044 +                       "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
4045 +                       "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
4046 +                       "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
4047 +                       "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
4048 +                       "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
4049 +                       "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
4050 +                       "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
4051 +                       "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
4052 +                       "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
4053 +                       "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
4054 +                       "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
4055 +                       "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
4056 +                       "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
4057 +                       "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
4058 +                       "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
4059 +                       "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
4060 +                       "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
4061 +                       "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
4062 +
4063 +               .ilen   = 285 + 20 + 15,
4064 +               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
4065 +                       "\x00\x03\x01\x01\x40",
4066 +               .alen   = 13,
4067 +               .result = "285 bytes plaintext285 bytes plaintext285 bytes"
4068 +                       " plaintext285 bytes plaintext285 bytes plaintext285"
4069 +                       " bytes plaintext285 bytes plaintext285 bytes"
4070 +                       " plaintext285 bytes plaintext285 bytes plaintext285"
4071 +                       " bytes plaintext285 bytes plaintext285 bytes"
4072 +                       " plaintext285 bytes plaintext285 bytes plaintext",
4073 +               .rlen   = 285,
4074 +       }
4075 +};
4076 +
4077 +/*
4078 + * RSA test vectors. Borrowed from openSSL.
4079 + */
4080 +static const struct akcipher_testvec rsa_tv_template[] = {
4081         {
4082  #ifndef CONFIG_CRYPTO_FIPS
4083         .key =
4084 @@ -340,6 +554,7 @@ static struct akcipher_testvec rsa_tv_te
4085         .m_size = 8,
4086         .c_size = 256,
4087         .public_key_vec = true,
4088 +#ifndef CONFIG_CRYPTO_FIPS
4089         }, {
4090         .key =
4091         "\x30\x82\x09\x29" /* sequence of 2345 bytes */
4092 @@ -538,12 +753,11 @@ static struct akcipher_testvec rsa_tv_te
4093         .key_len = 2349,
4094         .m_size = 8,
4095         .c_size = 512,
4096 +#endif
4097         }
4098  };
4099  
4100 -#define DH_TEST_VECTORS 2
4101 -
4102 -struct kpp_testvec dh_tv_template[] = {
4103 +static const struct kpp_testvec dh_tv_template[] = {
4104         {
4105         .secret =
4106  #ifdef __LITTLE_ENDIAN
4107 @@ -760,12 +974,7 @@ struct kpp_testvec dh_tv_template[] = {
4108         }
4109  };
4110  
4111 -#ifdef CONFIG_CRYPTO_FIPS
4112 -#define ECDH_TEST_VECTORS 1
4113 -#else
4114 -#define ECDH_TEST_VECTORS 2
4115 -#endif
4116 -struct kpp_testvec ecdh_tv_template[] = {
4117 +static const struct kpp_testvec ecdh_tv_template[] = {
4118         {
4119  #ifndef CONFIG_CRYPTO_FIPS
4120         .secret =
4121 @@ -856,9 +1065,7 @@ struct kpp_testvec ecdh_tv_template[] =
4122  /*
4123   * MD4 test vectors from RFC1320
4124   */
4125 -#define MD4_TEST_VECTORS       7
4126 -
4127 -static struct hash_testvec md4_tv_template [] = {
4128 +static const struct hash_testvec md4_tv_template[] = {
4129         {
4130                 .plaintext = "",
4131                 .digest = "\x31\xd6\xcf\xe0\xd1\x6a\xe9\x31"
4132 @@ -899,8 +1106,7 @@ static struct hash_testvec md4_tv_templa
4133         },
4134  };
4135  
4136 -#define SHA3_224_TEST_VECTORS  3
4137 -static struct hash_testvec sha3_224_tv_template[] = {
4138 +static const struct hash_testvec sha3_224_tv_template[] = {
4139         {
4140                 .plaintext = "",
4141                 .digest = "\x6b\x4e\x03\x42\x36\x67\xdb\xb7"
4142 @@ -925,8 +1131,7 @@ static struct hash_testvec sha3_224_tv_t
4143         },
4144  };
4145  
4146 -#define SHA3_256_TEST_VECTORS  3
4147 -static struct hash_testvec sha3_256_tv_template[] = {
4148 +static const struct hash_testvec sha3_256_tv_template[] = {
4149         {
4150                 .plaintext = "",
4151                 .digest = "\xa7\xff\xc6\xf8\xbf\x1e\xd7\x66"
4152 @@ -952,8 +1157,7 @@ static struct hash_testvec sha3_256_tv_t
4153  };
4154  
4155  
4156 -#define SHA3_384_TEST_VECTORS  3
4157 -static struct hash_testvec sha3_384_tv_template[] = {
4158 +static const struct hash_testvec sha3_384_tv_template[] = {
4159         {
4160                 .plaintext = "",
4161                 .digest = "\x0c\x63\xa7\x5b\x84\x5e\x4f\x7d"
4162 @@ -985,8 +1189,7 @@ static struct hash_testvec sha3_384_tv_t
4163  };
4164  
4165  
4166 -#define SHA3_512_TEST_VECTORS  3
4167 -static struct hash_testvec sha3_512_tv_template[] = {
4168 +static const struct hash_testvec sha3_512_tv_template[] = {
4169         {
4170                 .plaintext = "",
4171                 .digest = "\xa6\x9f\x73\xcc\xa2\x3a\x9a\xc5"
4172 @@ -1027,9 +1230,7 @@ static struct hash_testvec sha3_512_tv_t
4173  /*
4174   * MD5 test vectors from RFC1321
4175   */
4176 -#define MD5_TEST_VECTORS       7
4177 -
4178 -static struct hash_testvec md5_tv_template[] = {
4179 +static const struct hash_testvec md5_tv_template[] = {
4180         {
4181                 .digest = "\xd4\x1d\x8c\xd9\x8f\x00\xb2\x04"
4182                           "\xe9\x80\x09\x98\xec\xf8\x42\x7e",
4183 @@ -1073,9 +1274,7 @@ static struct hash_testvec md5_tv_templa
4184  /*
4185   * RIPEMD-128 test vectors from ISO/IEC 10118-3:2004(E)
4186   */
4187 -#define RMD128_TEST_VECTORS     10
4188 -
4189 -static struct hash_testvec rmd128_tv_template[] = {
4190 +static const struct hash_testvec rmd128_tv_template[] = {
4191         {
4192                 .digest = "\xcd\xf2\x62\x13\xa1\x50\xdc\x3e"
4193                           "\xcb\x61\x0f\x18\xf6\xb3\x8b\x46",
4194 @@ -1137,9 +1336,7 @@ static struct hash_testvec rmd128_tv_tem
4195  /*
4196   * RIPEMD-160 test vectors from ISO/IEC 10118-3:2004(E)
4197   */
4198 -#define RMD160_TEST_VECTORS     10
4199 -
4200 -static struct hash_testvec rmd160_tv_template[] = {
4201 +static const struct hash_testvec rmd160_tv_template[] = {
4202         {
4203                 .digest = "\x9c\x11\x85\xa5\xc5\xe9\xfc\x54\x61\x28"
4204                           "\x08\x97\x7e\xe8\xf5\x48\xb2\x25\x8d\x31",
4205 @@ -1201,9 +1398,7 @@ static struct hash_testvec rmd160_tv_tem
4206  /*
4207   * RIPEMD-256 test vectors
4208   */
4209 -#define RMD256_TEST_VECTORS     8
4210 -
4211 -static struct hash_testvec rmd256_tv_template[] = {
4212 +static const struct hash_testvec rmd256_tv_template[] = {
4213         {
4214                 .digest = "\x02\xba\x4c\x4e\x5f\x8e\xcd\x18"
4215                           "\x77\xfc\x52\xd6\x4d\x30\xe3\x7a"
4216 @@ -1269,9 +1464,7 @@ static struct hash_testvec rmd256_tv_tem
4217  /*
4218   * RIPEMD-320 test vectors
4219   */
4220 -#define RMD320_TEST_VECTORS     8
4221 -
4222 -static struct hash_testvec rmd320_tv_template[] = {
4223 +static const struct hash_testvec rmd320_tv_template[] = {
4224         {
4225                 .digest = "\x22\xd6\x5d\x56\x61\x53\x6c\xdc\x75\xc1"
4226                           "\xfd\xf5\xc6\xde\x7b\x41\xb9\xf2\x73\x25"
4227 @@ -1334,36 +1527,49 @@ static struct hash_testvec rmd320_tv_tem
4228         }
4229  };
4230  
4231 -#define CRCT10DIF_TEST_VECTORS 3
4232 -static struct hash_testvec crct10dif_tv_template[] = {
4233 +static const struct hash_testvec crct10dif_tv_template[] = {
4234         {
4235 -               .plaintext = "abc",
4236 -               .psize  = 3,
4237 -#ifdef __LITTLE_ENDIAN
4238 -               .digest = "\x3b\x44",
4239 -#else
4240 -               .digest = "\x44\x3b",
4241 -#endif
4242 -       }, {
4243 -               .plaintext = "1234567890123456789012345678901234567890"
4244 -                            "123456789012345678901234567890123456789",
4245 -               .psize  = 79,
4246 -#ifdef __LITTLE_ENDIAN
4247 -               .digest = "\x70\x4b",
4248 -#else
4249 -               .digest = "\x4b\x70",
4250 -#endif
4251 -       }, {
4252 -               .plaintext =
4253 -               "abcddddddddddddddddddddddddddddddddddddddddddddddddddddd",
4254 -               .psize  = 56,
4255 -#ifdef __LITTLE_ENDIAN
4256 -               .digest = "\xe3\x9c",
4257 -#else
4258 -               .digest = "\x9c\xe3",
4259 -#endif
4260 -               .np     = 2,
4261 -               .tap    = { 28, 28 }
4262 +               .plaintext      = "abc",
4263 +               .psize          = 3,
4264 +               .digest         = (u8 *)(u16 []){ 0x443b },
4265 +       }, {
4266 +               .plaintext      = "1234567890123456789012345678901234567890"
4267 +                                 "123456789012345678901234567890123456789",
4268 +               .psize          = 79,
4269 +               .digest         = (u8 *)(u16 []){ 0x4b70 },
4270 +               .np             = 2,
4271 +               .tap            = { 63, 16 },
4272 +       }, {
4273 +               .plaintext      = "abcdddddddddddddddddddddddddddddddddddddddd"
4274 +                                 "ddddddddddddd",
4275 +               .psize          = 56,
4276 +               .digest         = (u8 *)(u16 []){ 0x9ce3 },
4277 +               .np             = 8,
4278 +               .tap            = { 1, 2, 28, 7, 6, 5, 4, 3 },
4279 +       }, {
4280 +               .plaintext      = "1234567890123456789012345678901234567890"
4281 +                                 "1234567890123456789012345678901234567890"
4282 +                                 "1234567890123456789012345678901234567890"
4283 +                                 "1234567890123456789012345678901234567890"
4284 +                                 "1234567890123456789012345678901234567890"
4285 +                                 "1234567890123456789012345678901234567890"
4286 +                                 "1234567890123456789012345678901234567890"
4287 +                                 "123456789012345678901234567890123456789",
4288 +               .psize          = 319,
4289 +               .digest         = (u8 *)(u16 []){ 0x44c6 },
4290 +       }, {
4291 +               .plaintext      = "1234567890123456789012345678901234567890"
4292 +                                 "1234567890123456789012345678901234567890"
4293 +                                 "1234567890123456789012345678901234567890"
4294 +                                 "1234567890123456789012345678901234567890"
4295 +                                 "1234567890123456789012345678901234567890"
4296 +                                 "1234567890123456789012345678901234567890"
4297 +                                 "1234567890123456789012345678901234567890"
4298 +                                 "123456789012345678901234567890123456789",
4299 +               .psize          = 319,
4300 +               .digest         = (u8 *)(u16 []){ 0x44c6 },
4301 +               .np             = 4,
4302 +               .tap            = { 1, 255, 57, 6 },
4303         }
4304  };
4305  
4306 @@ -1371,9 +1577,7 @@ static struct hash_testvec crct10dif_tv_
4307   * SHA1 test vectors  from from FIPS PUB 180-1
4308   * Long vector from CAVS 5.0
4309   */
4310 -#define SHA1_TEST_VECTORS      6
4311 -
4312 -static struct hash_testvec sha1_tv_template[] = {
4313 +static const struct hash_testvec sha1_tv_template[] = {
4314         {
4315                 .plaintext = "",
4316                 .psize  = 0,
4317 @@ -1563,9 +1767,7 @@ static struct hash_testvec sha1_tv_templ
4318  /*
4319   * SHA224 test vectors from from FIPS PUB 180-2
4320   */
4321 -#define SHA224_TEST_VECTORS     5
4322 -
4323 -static struct hash_testvec sha224_tv_template[] = {
4324 +static const struct hash_testvec sha224_tv_template[] = {
4325         {
4326                 .plaintext = "",
4327                 .psize  = 0,
4328 @@ -1737,9 +1939,7 @@ static struct hash_testvec sha224_tv_tem
4329  /*
4330   * SHA256 test vectors from from NIST
4331   */
4332 -#define SHA256_TEST_VECTORS    5
4333 -
4334 -static struct hash_testvec sha256_tv_template[] = {
4335 +static const struct hash_testvec sha256_tv_template[] = {
4336         {
4337                 .plaintext = "",
4338                 .psize  = 0,
4339 @@ -1910,9 +2110,7 @@ static struct hash_testvec sha256_tv_tem
4340  /*
4341   * SHA384 test vectors from from NIST and kerneli
4342   */
4343 -#define SHA384_TEST_VECTORS    6
4344 -
4345 -static struct hash_testvec sha384_tv_template[] = {
4346 +static const struct hash_testvec sha384_tv_template[] = {
4347         {
4348                 .plaintext = "",
4349                 .psize  = 0,
4350 @@ -2104,9 +2302,7 @@ static struct hash_testvec sha384_tv_tem
4351  /*
4352   * SHA512 test vectors from from NIST and kerneli
4353   */
4354 -#define SHA512_TEST_VECTORS    6
4355 -
4356 -static struct hash_testvec sha512_tv_template[] = {
4357 +static const struct hash_testvec sha512_tv_template[] = {
4358         {
4359                 .plaintext = "",
4360                 .psize  = 0,
4361 @@ -2313,9 +2509,7 @@ static struct hash_testvec sha512_tv_tem
4362   * by Vincent Rijmen and Paulo S. L. M. Barreto as part of the NESSIE
4363   * submission
4364   */
4365 -#define WP512_TEST_VECTORS     8
4366 -
4367 -static struct hash_testvec wp512_tv_template[] = {
4368 +static const struct hash_testvec wp512_tv_template[] = {
4369         {
4370                 .plaintext = "",
4371                 .psize  = 0,
4372 @@ -2411,9 +2605,7 @@ static struct hash_testvec wp512_tv_temp
4373         },
4374  };
4375  
4376 -#define WP384_TEST_VECTORS     8
4377 -
4378 -static struct hash_testvec wp384_tv_template[] = {
4379 +static const struct hash_testvec wp384_tv_template[] = {
4380         {
4381                 .plaintext = "",
4382                 .psize  = 0,
4383 @@ -2493,9 +2685,7 @@ static struct hash_testvec wp384_tv_temp
4384         },
4385  };
4386  
4387 -#define WP256_TEST_VECTORS     8
4388 -
4389 -static struct hash_testvec wp256_tv_template[] = {
4390 +static const struct hash_testvec wp256_tv_template[] = {
4391         {
4392                 .plaintext = "",
4393                 .psize  = 0,
4394 @@ -2562,9 +2752,7 @@ static struct hash_testvec wp256_tv_temp
4395  /*
4396   * TIGER test vectors from Tiger website
4397   */
4398 -#define TGR192_TEST_VECTORS    6
4399 -
4400 -static struct hash_testvec tgr192_tv_template[] = {
4401 +static const struct hash_testvec tgr192_tv_template[] = {
4402         {
4403                 .plaintext = "",
4404                 .psize  = 0,
4405 @@ -2607,9 +2795,7 @@ static struct hash_testvec tgr192_tv_tem
4406         },
4407  };
4408  
4409 -#define TGR160_TEST_VECTORS    6
4410 -
4411 -static struct hash_testvec tgr160_tv_template[] = {
4412 +static const struct hash_testvec tgr160_tv_template[] = {
4413         {
4414                 .plaintext = "",
4415                 .psize  = 0,
4416 @@ -2652,9 +2838,7 @@ static struct hash_testvec tgr160_tv_tem
4417         },
4418  };
4419  
4420 -#define TGR128_TEST_VECTORS    6
4421 -
4422 -static struct hash_testvec tgr128_tv_template[] = {
4423 +static const struct hash_testvec tgr128_tv_template[] = {
4424         {
4425                 .plaintext = "",
4426                 .psize  = 0,
4427 @@ -2691,9 +2875,7 @@ static struct hash_testvec tgr128_tv_tem
4428         },
4429  };
4430  
4431 -#define GHASH_TEST_VECTORS 6
4432 -
4433 -static struct hash_testvec ghash_tv_template[] =
4434 +static const struct hash_testvec ghash_tv_template[] =
4435  {
4436         {
4437                 .key    = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03"
4438 @@ -2808,9 +2990,7 @@ static struct hash_testvec ghash_tv_temp
4439   * HMAC-MD5 test vectors from RFC2202
4440   * (These need to be fixed to not use strlen).
4441   */
4442 -#define HMAC_MD5_TEST_VECTORS  7
4443 -
4444 -static struct hash_testvec hmac_md5_tv_template[] =
4445 +static const struct hash_testvec hmac_md5_tv_template[] =
4446  {
4447         {
4448                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4449 @@ -2890,9 +3070,7 @@ static struct hash_testvec hmac_md5_tv_t
4450  /*
4451   * HMAC-RIPEMD128 test vectors from RFC2286
4452   */
4453 -#define HMAC_RMD128_TEST_VECTORS       7
4454 -
4455 -static struct hash_testvec hmac_rmd128_tv_template[] = {
4456 +static const struct hash_testvec hmac_rmd128_tv_template[] = {
4457         {
4458                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4459                 .ksize  = 16,
4460 @@ -2971,9 +3149,7 @@ static struct hash_testvec hmac_rmd128_t
4461  /*
4462   * HMAC-RIPEMD160 test vectors from RFC2286
4463   */
4464 -#define HMAC_RMD160_TEST_VECTORS       7
4465 -
4466 -static struct hash_testvec hmac_rmd160_tv_template[] = {
4467 +static const struct hash_testvec hmac_rmd160_tv_template[] = {
4468         {
4469                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4470                 .ksize  = 20,
4471 @@ -3052,9 +3228,7 @@ static struct hash_testvec hmac_rmd160_t
4472  /*
4473   * HMAC-SHA1 test vectors from RFC2202
4474   */
4475 -#define HMAC_SHA1_TEST_VECTORS 7
4476 -
4477 -static struct hash_testvec hmac_sha1_tv_template[] = {
4478 +static const struct hash_testvec hmac_sha1_tv_template[] = {
4479         {
4480                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4481                 .ksize  = 20,
4482 @@ -3135,9 +3309,7 @@ static struct hash_testvec hmac_sha1_tv_
4483  /*
4484   * SHA224 HMAC test vectors from RFC4231
4485   */
4486 -#define HMAC_SHA224_TEST_VECTORS    4
4487 -
4488 -static struct hash_testvec hmac_sha224_tv_template[] = {
4489 +static const struct hash_testvec hmac_sha224_tv_template[] = {
4490         {
4491                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4492                         "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4493 @@ -3250,9 +3422,7 @@ static struct hash_testvec hmac_sha224_t
4494   * HMAC-SHA256 test vectors from
4495   * draft-ietf-ipsec-ciph-sha-256-01.txt
4496   */
4497 -#define HMAC_SHA256_TEST_VECTORS       10
4498 -
4499 -static struct hash_testvec hmac_sha256_tv_template[] = {
4500 +static const struct hash_testvec hmac_sha256_tv_template[] = {
4501         {
4502                 .key    = "\x01\x02\x03\x04\x05\x06\x07\x08"
4503                           "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
4504 @@ -3387,9 +3557,7 @@ static struct hash_testvec hmac_sha256_t
4505         },
4506  };
4507  
4508 -#define CMAC_AES_TEST_VECTORS 6
4509 -
4510 -static struct hash_testvec aes_cmac128_tv_template[] = {
4511 +static const struct hash_testvec aes_cmac128_tv_template[] = {
4512         { /* From NIST Special Publication 800-38B, AES-128 */
4513                 .key            = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4514                                   "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4515 @@ -3464,9 +3632,67 @@ static struct hash_testvec aes_cmac128_t
4516         }
4517  };
4518  
4519 -#define CMAC_DES3_EDE_TEST_VECTORS 4
4520 +static const struct hash_testvec aes_cbcmac_tv_template[] = {
4521 +       {
4522 +               .key            = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4523 +                                 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4524 +               .plaintext      = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4525 +                                 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a",
4526 +               .digest         = "\x3a\xd7\x7b\xb4\x0d\x7a\x36\x60"
4527 +                                 "\xa8\x9e\xca\xf3\x24\x66\xef\x97",
4528 +               .psize          = 16,
4529 +               .ksize          = 16,
4530 +       }, {
4531 +               .key            = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4532 +                                 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4533 +               .plaintext      = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4534 +                                 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4535 +                                 "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4536 +                                 "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4537 +                                 "\x30",
4538 +               .digest         = "\x9d\x0d\xd0\x63\xfb\xcb\x24\x43"
4539 +                                 "\xf8\xf2\x76\x03\xac\x39\xb0\x9d",
4540 +               .psize          = 33,
4541 +               .ksize          = 16,
4542 +               .np             = 2,
4543 +               .tap            = { 7, 26 },
4544 +       }, {
4545 +               .key            = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4546 +                                 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4547 +               .plaintext      = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4548 +                                 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4549 +                                 "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4550 +                                 "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4551 +                                 "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
4552 +                                 "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
4553 +                                 "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
4554 +                                 "\xad\x2b\x41\x7b\xe6\x6c\x37",
4555 +               .digest         = "\xc0\x71\x73\xb8\xa0\x2c\x11\x7c"
4556 +                                 "\xaf\xdc\xb2\xf8\x89\x32\xa3\x3a",
4557 +               .psize          = 63,
4558 +               .ksize          = 16,
4559 +       }, {
4560 +               .key            = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
4561 +                                 "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
4562 +                                 "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
4563 +                                 "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
4564 +               .plaintext      = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4565 +                                 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4566 +                                 "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4567 +                                 "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4568 +                                 "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
4569 +                                 "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
4570 +                                 "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
4571 +                                 "\xad\x2b\x41\x7b\xe6\x6c\x37\x10"
4572 +                                 "\x1c",
4573 +               .digest         = "\x6a\x4e\xdb\x21\x47\x51\xdf\x4f"
4574 +                                 "\xa8\x4d\x4c\x10\x3b\x72\x7d\xd6",
4575 +               .psize          = 65,
4576 +               .ksize          = 32,
4577 +       }
4578 +};
4579  
4580 -static struct hash_testvec des3_ede_cmac64_tv_template[] = {
4581 +static const struct hash_testvec des3_ede_cmac64_tv_template[] = {
4582  /*
4583   * From NIST Special Publication 800-38B, Three Key TDEA
4584   * Corrected test vectors from:
4585 @@ -3512,9 +3738,7 @@ static struct hash_testvec des3_ede_cmac
4586         }
4587  };
4588  
4589 -#define XCBC_AES_TEST_VECTORS 6
4590 -
4591 -static struct hash_testvec aes_xcbc128_tv_template[] = {
4592 +static const struct hash_testvec aes_xcbc128_tv_template[] = {
4593         {
4594                 .key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
4595                           "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
4596 @@ -3580,36 +3804,35 @@ static struct hash_testvec aes_xcbc128_t
4597         }
4598  };
4599  
4600 -#define VMAC_AES_TEST_VECTORS  11
4601 -static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
4602 -                               '\x02', '\x03', '\x02', '\x02',
4603 -                               '\x02', '\x04', '\x01', '\x07',
4604 -                               '\x04', '\x01', '\x04', '\x03',};
4605 -static char vmac_string2[128] = {'a', 'b', 'c',};
4606 -static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
4607 -                               'a', 'b', 'c', 'a', 'b', 'c',
4608 -                               'a', 'b', 'c', 'a', 'b', 'c',
4609 -                               'a', 'b', 'c', 'a', 'b', 'c',
4610 -                               'a', 'b', 'c', 'a', 'b', 'c',
4611 -                               'a', 'b', 'c', 'a', 'b', 'c',
4612 -                               'a', 'b', 'c', 'a', 'b', 'c',
4613 -                               'a', 'b', 'c', 'a', 'b', 'c',
4614 -                               };
4615 -
4616 -static char vmac_string4[17] = {'b', 'c', 'e', 'f',
4617 -                               'i', 'j', 'l', 'm',
4618 -                               'o', 'p', 'r', 's',
4619 -                               't', 'u', 'w', 'x', 'z'};
4620 -
4621 -static char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
4622 -                                'o', 'l', 'k', ']', '%',
4623 -                                '9', '2', '7', '!', 'A'};
4624 -
4625 -static char vmac_string6[129] = {'p', 't', '*', '7', 'l',
4626 -                                'i', '!', '#', 'w', '0',
4627 -                                'z', '/', '4', 'A', 'n'};
4628 +static const char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
4629 +                                      '\x02', '\x03', '\x02', '\x02',
4630 +                                      '\x02', '\x04', '\x01', '\x07',
4631 +                                      '\x04', '\x01', '\x04', '\x03',};
4632 +static const char vmac_string2[128] = {'a', 'b', 'c',};
4633 +static const char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
4634 +                                      'a', 'b', 'c', 'a', 'b', 'c',
4635 +                                      'a', 'b', 'c', 'a', 'b', 'c',
4636 +                                      'a', 'b', 'c', 'a', 'b', 'c',
4637 +                                      'a', 'b', 'c', 'a', 'b', 'c',
4638 +                                      'a', 'b', 'c', 'a', 'b', 'c',
4639 +                                      'a', 'b', 'c', 'a', 'b', 'c',
4640 +                                      'a', 'b', 'c', 'a', 'b', 'c',
4641 +                                     };
4642 +
4643 +static const char vmac_string4[17] = {'b', 'c', 'e', 'f',
4644 +                                     'i', 'j', 'l', 'm',
4645 +                                     'o', 'p', 'r', 's',
4646 +                                     't', 'u', 'w', 'x', 'z'};
4647 +
4648 +static const char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
4649 +                                      'o', 'l', 'k', ']', '%',
4650 +                                      '9', '2', '7', '!', 'A'};
4651 +
4652 +static const char vmac_string6[129] = {'p', 't', '*', '7', 'l',
4653 +                                      'i', '!', '#', 'w', '0',
4654 +                                      'z', '/', '4', 'A', 'n'};
4655  
4656 -static struct hash_testvec aes_vmac128_tv_template[] = {
4657 +static const struct hash_testvec aes_vmac128_tv_template[] = {
4658         {
4659                 .key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
4660                           "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
4661 @@ -3687,9 +3910,7 @@ static struct hash_testvec aes_vmac128_t
4662   * SHA384 HMAC test vectors from RFC4231
4663   */
4664  
4665 -#define HMAC_SHA384_TEST_VECTORS       4
4666 -
4667 -static struct hash_testvec hmac_sha384_tv_template[] = {
4668 +static const struct hash_testvec hmac_sha384_tv_template[] = {
4669         {
4670                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4671                           "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4672 @@ -3787,9 +4008,7 @@ static struct hash_testvec hmac_sha384_t
4673   * SHA512 HMAC test vectors from RFC4231
4674   */
4675  
4676 -#define HMAC_SHA512_TEST_VECTORS       4
4677 -
4678 -static struct hash_testvec hmac_sha512_tv_template[] = {
4679 +static const struct hash_testvec hmac_sha512_tv_template[] = {
4680         {
4681                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4682                           "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4683 @@ -3894,9 +4113,7 @@ static struct hash_testvec hmac_sha512_t
4684         },
4685  };
4686  
4687 -#define HMAC_SHA3_224_TEST_VECTORS     4
4688 -
4689 -static struct hash_testvec hmac_sha3_224_tv_template[] = {
4690 +static const struct hash_testvec hmac_sha3_224_tv_template[] = {
4691         {
4692                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4693                           "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4694 @@ -3985,9 +4202,7 @@ static struct hash_testvec hmac_sha3_224
4695         },
4696  };
4697  
4698 -#define HMAC_SHA3_256_TEST_VECTORS     4
4699 -
4700 -static struct hash_testvec hmac_sha3_256_tv_template[] = {
4701 +static const struct hash_testvec hmac_sha3_256_tv_template[] = {
4702         {
4703                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4704                           "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4705 @@ -4076,9 +4291,7 @@ static struct hash_testvec hmac_sha3_256
4706         },
4707  };
4708  
4709 -#define HMAC_SHA3_384_TEST_VECTORS     4
4710 -
4711 -static struct hash_testvec hmac_sha3_384_tv_template[] = {
4712 +static const struct hash_testvec hmac_sha3_384_tv_template[] = {
4713         {
4714                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4715                           "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4716 @@ -4175,9 +4388,7 @@ static struct hash_testvec hmac_sha3_384
4717         },
4718  };
4719  
4720 -#define HMAC_SHA3_512_TEST_VECTORS     4
4721 -
4722 -static struct hash_testvec hmac_sha3_512_tv_template[] = {
4723 +static const struct hash_testvec hmac_sha3_512_tv_template[] = {
4724         {
4725                 .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4726                           "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4727 @@ -4286,9 +4497,7 @@ static struct hash_testvec hmac_sha3_512
4728   * Poly1305 test vectors from RFC7539 A.3.
4729   */
4730  
4731 -#define POLY1305_TEST_VECTORS  11
4732 -
4733 -static struct hash_testvec poly1305_tv_template[] = {
4734 +static const struct hash_testvec poly1305_tv_template[] = {
4735         { /* Test Vector #1 */
4736                 .plaintext      = "\x00\x00\x00\x00\x00\x00\x00\x00"
4737                                   "\x00\x00\x00\x00\x00\x00\x00\x00"
4738 @@ -4533,20 +4742,7 @@ static struct hash_testvec poly1305_tv_t
4739  /*
4740   * DES test vectors.
4741   */
4742 -#define DES_ENC_TEST_VECTORS           11
4743 -#define DES_DEC_TEST_VECTORS           5
4744 -#define DES_CBC_ENC_TEST_VECTORS       6
4745 -#define DES_CBC_DEC_TEST_VECTORS       5
4746 -#define DES_CTR_ENC_TEST_VECTORS       2
4747 -#define DES_CTR_DEC_TEST_VECTORS       2
4748 -#define DES3_EDE_ENC_TEST_VECTORS      4
4749 -#define DES3_EDE_DEC_TEST_VECTORS      4
4750 -#define DES3_EDE_CBC_ENC_TEST_VECTORS  2
4751 -#define DES3_EDE_CBC_DEC_TEST_VECTORS  2
4752 -#define DES3_EDE_CTR_ENC_TEST_VECTORS  2
4753 -#define DES3_EDE_CTR_DEC_TEST_VECTORS  2
4754 -
4755 -static struct cipher_testvec des_enc_tv_template[] = {
4756 +static const struct cipher_testvec des_enc_tv_template[] = {
4757         { /* From Applied Cryptography */
4758                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4759                 .klen   = 8,
4760 @@ -4720,7 +4916,7 @@ static struct cipher_testvec des_enc_tv_
4761         },
4762  };
4763  
4764 -static struct cipher_testvec des_dec_tv_template[] = {
4765 +static const struct cipher_testvec des_dec_tv_template[] = {
4766         { /* From Applied Cryptography */
4767                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4768                 .klen   = 8,
4769 @@ -4830,7 +5026,7 @@ static struct cipher_testvec des_dec_tv_
4770         },
4771  };
4772  
4773 -static struct cipher_testvec des_cbc_enc_tv_template[] = {
4774 +static const struct cipher_testvec des_cbc_enc_tv_template[] = {
4775         { /* From OpenSSL */
4776                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4777                 .klen   = 8,
4778 @@ -4956,7 +5152,7 @@ static struct cipher_testvec des_cbc_enc
4779         },
4780  };
4781  
4782 -static struct cipher_testvec des_cbc_dec_tv_template[] = {
4783 +static const struct cipher_testvec des_cbc_dec_tv_template[] = {
4784         { /* FIPS Pub 81 */
4785                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4786                 .klen   = 8,
4787 @@ -5065,7 +5261,7 @@ static struct cipher_testvec des_cbc_dec
4788         },
4789  };
4790  
4791 -static struct cipher_testvec des_ctr_enc_tv_template[] = {
4792 +static const struct cipher_testvec des_ctr_enc_tv_template[] = {
4793         { /* Generated with Crypto++ */
4794                 .key    = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
4795                 .klen   = 8,
4796 @@ -5211,7 +5407,7 @@ static struct cipher_testvec des_ctr_enc
4797         },
4798  };
4799  
4800 -static struct cipher_testvec des_ctr_dec_tv_template[] = {
4801 +static const struct cipher_testvec des_ctr_dec_tv_template[] = {
4802         { /* Generated with Crypto++ */
4803                 .key    = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
4804                 .klen   = 8,
4805 @@ -5357,7 +5553,7 @@ static struct cipher_testvec des_ctr_dec
4806         },
4807  };
4808  
4809 -static struct cipher_testvec des3_ede_enc_tv_template[] = {
4810 +static const struct cipher_testvec des3_ede_enc_tv_template[] = {
4811         { /* These are from openssl */
4812                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4813                           "\x55\x55\x55\x55\x55\x55\x55\x55"
4814 @@ -5522,7 +5718,7 @@ static struct cipher_testvec des3_ede_en
4815         },
4816  };
4817  
4818 -static struct cipher_testvec des3_ede_dec_tv_template[] = {
4819 +static const struct cipher_testvec des3_ede_dec_tv_template[] = {
4820         { /* These are from openssl */
4821                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4822                           "\x55\x55\x55\x55\x55\x55\x55\x55"
4823 @@ -5687,7 +5883,7 @@ static struct cipher_testvec des3_ede_de
4824         },
4825  };
4826  
4827 -static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
4828 +static const struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
4829         { /* Generated from openssl */
4830                 .key    = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
4831                           "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
4832 @@ -5867,7 +6063,7 @@ static struct cipher_testvec des3_ede_cb
4833         },
4834  };
4835  
4836 -static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
4837 +static const struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
4838         { /* Generated from openssl */
4839                 .key    = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
4840                           "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
4841 @@ -6047,7 +6243,7 @@ static struct cipher_testvec des3_ede_cb
4842         },
4843  };
4844  
4845 -static struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
4846 +static const struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
4847         { /* Generated with Crypto++ */
4848                 .key    = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
4849                           "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
4850 @@ -6325,7 +6521,7 @@ static struct cipher_testvec des3_ede_ct
4851         },
4852  };
4853  
4854 -static struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
4855 +static const struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
4856         { /* Generated with Crypto++ */
4857                 .key    = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
4858                           "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
4859 @@ -6606,14 +6802,7 @@ static struct cipher_testvec des3_ede_ct
4860  /*
4861   * Blowfish test vectors.
4862   */
4863 -#define BF_ENC_TEST_VECTORS    7
4864 -#define BF_DEC_TEST_VECTORS    7
4865 -#define BF_CBC_ENC_TEST_VECTORS        2
4866 -#define BF_CBC_DEC_TEST_VECTORS        2
4867 -#define BF_CTR_ENC_TEST_VECTORS        2
4868 -#define BF_CTR_DEC_TEST_VECTORS        2
4869 -
4870 -static struct cipher_testvec bf_enc_tv_template[] = {
4871 +static const struct cipher_testvec bf_enc_tv_template[] = {
4872         { /* DES test vectors from OpenSSL */
4873                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00",
4874                 .klen   = 8,
4875 @@ -6805,7 +6994,7 @@ static struct cipher_testvec bf_enc_tv_t
4876         },
4877  };
4878  
4879 -static struct cipher_testvec bf_dec_tv_template[] = {
4880 +static const struct cipher_testvec bf_dec_tv_template[] = {
4881         { /* DES test vectors from OpenSSL */
4882                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00",
4883                 .klen   = 8,
4884 @@ -6997,7 +7186,7 @@ static struct cipher_testvec bf_dec_tv_t
4885         },
4886  };
4887  
4888 -static struct cipher_testvec bf_cbc_enc_tv_template[] = {
4889 +static const struct cipher_testvec bf_cbc_enc_tv_template[] = {
4890         { /* From OpenSSL */
4891                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4892                           "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
4893 @@ -7154,7 +7343,7 @@ static struct cipher_testvec bf_cbc_enc_
4894         },
4895  };
4896  
4897 -static struct cipher_testvec bf_cbc_dec_tv_template[] = {
4898 +static const struct cipher_testvec bf_cbc_dec_tv_template[] = {
4899         { /* From OpenSSL */
4900                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4901                           "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
4902 @@ -7311,7 +7500,7 @@ static struct cipher_testvec bf_cbc_dec_
4903         },
4904  };
4905  
4906 -static struct cipher_testvec bf_ctr_enc_tv_template[] = {
4907 +static const struct cipher_testvec bf_ctr_enc_tv_template[] = {
4908         { /* Generated with Crypto++ */
4909                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4910                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4911 @@ -7723,7 +7912,7 @@ static struct cipher_testvec bf_ctr_enc_
4912         },
4913  };
4914  
4915 -static struct cipher_testvec bf_ctr_dec_tv_template[] = {
4916 +static const struct cipher_testvec bf_ctr_dec_tv_template[] = {
4917         { /* Generated with Crypto++ */
4918                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4919                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4920 @@ -8138,18 +8327,7 @@ static struct cipher_testvec bf_ctr_dec_
4921  /*
4922   * Twofish test vectors.
4923   */
4924 -#define TF_ENC_TEST_VECTORS            4
4925 -#define TF_DEC_TEST_VECTORS            4
4926 -#define TF_CBC_ENC_TEST_VECTORS                5
4927 -#define TF_CBC_DEC_TEST_VECTORS                5
4928 -#define TF_CTR_ENC_TEST_VECTORS                2
4929 -#define TF_CTR_DEC_TEST_VECTORS                2
4930 -#define TF_LRW_ENC_TEST_VECTORS                8
4931 -#define TF_LRW_DEC_TEST_VECTORS                8
4932 -#define TF_XTS_ENC_TEST_VECTORS                5
4933 -#define TF_XTS_DEC_TEST_VECTORS                5
4934 -
4935 -static struct cipher_testvec tf_enc_tv_template[] = {
4936 +static const struct cipher_testvec tf_enc_tv_template[] = {
4937         {
4938                 .key    = zeroed_string,
4939                 .klen   = 16,
4940 @@ -8317,7 +8495,7 @@ static struct cipher_testvec tf_enc_tv_t
4941         },
4942  };
4943  
4944 -static struct cipher_testvec tf_dec_tv_template[] = {
4945 +static const struct cipher_testvec tf_dec_tv_template[] = {
4946         {
4947                 .key    = zeroed_string,
4948                 .klen   = 16,
4949 @@ -8485,7 +8663,7 @@ static struct cipher_testvec tf_dec_tv_t
4950         },
4951  };
4952  
4953 -static struct cipher_testvec tf_cbc_enc_tv_template[] = {
4954 +static const struct cipher_testvec tf_cbc_enc_tv_template[] = {
4955         { /* Generated with Nettle */
4956                 .key    = zeroed_string,
4957                 .klen   = 16,
4958 @@ -8668,7 +8846,7 @@ static struct cipher_testvec tf_cbc_enc_
4959         },
4960  };
4961  
4962 -static struct cipher_testvec tf_cbc_dec_tv_template[] = {
4963 +static const struct cipher_testvec tf_cbc_dec_tv_template[] = {
4964         { /* Reverse of the first four above */
4965                 .key    = zeroed_string,
4966                 .klen   = 16,
4967 @@ -8851,7 +9029,7 @@ static struct cipher_testvec tf_cbc_dec_
4968         },
4969  };
4970  
4971 -static struct cipher_testvec tf_ctr_enc_tv_template[] = {
4972 +static const struct cipher_testvec tf_ctr_enc_tv_template[] = {
4973         { /* Generated with Crypto++ */
4974                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4975                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4976 @@ -9262,7 +9440,7 @@ static struct cipher_testvec tf_ctr_enc_
4977         },
4978  };
4979  
4980 -static struct cipher_testvec tf_ctr_dec_tv_template[] = {
4981 +static const struct cipher_testvec tf_ctr_dec_tv_template[] = {
4982         { /* Generated with Crypto++ */
4983                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4984                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4985 @@ -9673,7 +9851,7 @@ static struct cipher_testvec tf_ctr_dec_
4986         },
4987  };
4988  
4989 -static struct cipher_testvec tf_lrw_enc_tv_template[] = {
4990 +static const struct cipher_testvec tf_lrw_enc_tv_template[] = {
4991         /* Generated from AES-LRW test vectors */
4992         {
4993                 .key    = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
4994 @@ -9925,7 +10103,7 @@ static struct cipher_testvec tf_lrw_enc_
4995         },
4996  };
4997  
4998 -static struct cipher_testvec tf_lrw_dec_tv_template[] = {
4999 +static const struct cipher_testvec tf_lrw_dec_tv_template[] = {
5000         /* Generated from AES-LRW test vectors */
5001         /* same as enc vectors with input and result reversed */
5002         {
5003 @@ -10178,7 +10356,7 @@ static struct cipher_testvec tf_lrw_dec_
5004         },
5005  };
5006  
5007 -static struct cipher_testvec tf_xts_enc_tv_template[] = {
5008 +static const struct cipher_testvec tf_xts_enc_tv_template[] = {
5009         /* Generated from AES-XTS test vectors */
5010  {
5011                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00"
5012 @@ -10520,7 +10698,7 @@ static struct cipher_testvec tf_xts_enc_
5013         },
5014  };
5015  
5016 -static struct cipher_testvec tf_xts_dec_tv_template[] = {
5017 +static const struct cipher_testvec tf_xts_dec_tv_template[] = {
5018         /* Generated from AES-XTS test vectors */
5019         /* same as enc vectors with input and result reversed */
5020         {
5021 @@ -10867,25 +11045,7 @@ static struct cipher_testvec tf_xts_dec_
5022   * Serpent test vectors.  These are backwards because Serpent writes
5023   * octet sequences in right-to-left mode.
5024   */
5025 -#define SERPENT_ENC_TEST_VECTORS       5
5026 -#define SERPENT_DEC_TEST_VECTORS       5
5027 -
5028 -#define TNEPRES_ENC_TEST_VECTORS       4
5029 -#define TNEPRES_DEC_TEST_VECTORS       4
5030 -
5031 -#define SERPENT_CBC_ENC_TEST_VECTORS   1
5032 -#define SERPENT_CBC_DEC_TEST_VECTORS   1
5033 -
5034 -#define SERPENT_CTR_ENC_TEST_VECTORS   2
5035 -#define SERPENT_CTR_DEC_TEST_VECTORS   2
5036 -
5037 -#define SERPENT_LRW_ENC_TEST_VECTORS   8
5038 -#define SERPENT_LRW_DEC_TEST_VECTORS   8
5039 -
5040 -#define SERPENT_XTS_ENC_TEST_VECTORS   5
5041 -#define SERPENT_XTS_DEC_TEST_VECTORS   5
5042 -
5043 -static struct cipher_testvec serpent_enc_tv_template[] = {
5044 +static const struct cipher_testvec serpent_enc_tv_template[] = {
5045         {
5046                 .input  = "\x00\x01\x02\x03\x04\x05\x06\x07"
5047                           "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5048 @@ -11061,7 +11221,7 @@ static struct cipher_testvec serpent_enc
5049         },
5050  };
5051  
5052 -static struct cipher_testvec tnepres_enc_tv_template[] = {
5053 +static const struct cipher_testvec tnepres_enc_tv_template[] = {
5054         { /* KeySize=128, PT=0, I=1 */
5055                 .input  = "\x00\x00\x00\x00\x00\x00\x00\x00"
5056                           "\x00\x00\x00\x00\x00\x00\x00\x00",
5057 @@ -11111,7 +11271,7 @@ static struct cipher_testvec tnepres_enc
5058  };
5059  
5060  
5061 -static struct cipher_testvec serpent_dec_tv_template[] = {
5062 +static const struct cipher_testvec serpent_dec_tv_template[] = {
5063         {
5064                 .input  = "\x12\x07\xfc\xce\x9b\xd0\xd6\x47"
5065                           "\x6a\xe9\x8f\xbe\xd1\x43\xa0\xe2",
5066 @@ -11287,7 +11447,7 @@ static struct cipher_testvec serpent_dec
5067         },
5068  };
5069  
5070 -static struct cipher_testvec tnepres_dec_tv_template[] = {
5071 +static const struct cipher_testvec tnepres_dec_tv_template[] = {
5072         {
5073                 .input  = "\x41\xcc\x6b\x31\x59\x31\x45\x97"
5074                           "\x6d\x6f\xbb\x38\x4b\x37\x21\x28",
5075 @@ -11328,7 +11488,7 @@ static struct cipher_testvec tnepres_dec
5076         },
5077  };
5078  
5079 -static struct cipher_testvec serpent_cbc_enc_tv_template[] = {
5080 +static const struct cipher_testvec serpent_cbc_enc_tv_template[] = {
5081         { /* Generated with Crypto++ */
5082                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5083                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5084 @@ -11469,7 +11629,7 @@ static struct cipher_testvec serpent_cbc
5085         },
5086  };
5087  
5088 -static struct cipher_testvec serpent_cbc_dec_tv_template[] = {
5089 +static const struct cipher_testvec serpent_cbc_dec_tv_template[] = {
5090         { /* Generated with Crypto++ */
5091                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5092                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5093 @@ -11610,7 +11770,7 @@ static struct cipher_testvec serpent_cbc
5094         },
5095  };
5096  
5097 -static struct cipher_testvec serpent_ctr_enc_tv_template[] = {
5098 +static const struct cipher_testvec serpent_ctr_enc_tv_template[] = {
5099         { /* Generated with Crypto++ */
5100                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5101                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5102 @@ -12021,7 +12181,7 @@ static struct cipher_testvec serpent_ctr
5103         },
5104  };
5105  
5106 -static struct cipher_testvec serpent_ctr_dec_tv_template[] = {
5107 +static const struct cipher_testvec serpent_ctr_dec_tv_template[] = {
5108         { /* Generated with Crypto++ */
5109                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5110                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5111 @@ -12432,7 +12592,7 @@ static struct cipher_testvec serpent_ctr
5112         },
5113  };
5114  
5115 -static struct cipher_testvec serpent_lrw_enc_tv_template[] = {
5116 +static const struct cipher_testvec serpent_lrw_enc_tv_template[] = {
5117         /* Generated from AES-LRW test vectors */
5118         {
5119                 .key    = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
5120 @@ -12684,7 +12844,7 @@ static struct cipher_testvec serpent_lrw
5121         },
5122  };
5123  
5124 -static struct cipher_testvec serpent_lrw_dec_tv_template[] = {
5125 +static const struct cipher_testvec serpent_lrw_dec_tv_template[] = {
5126         /* Generated from AES-LRW test vectors */
5127         /* same as enc vectors with input and result reversed */
5128         {
5129 @@ -12937,7 +13097,7 @@ static struct cipher_testvec serpent_lrw
5130         },
5131  };
5132  
5133 -static struct cipher_testvec serpent_xts_enc_tv_template[] = {
5134 +static const struct cipher_testvec serpent_xts_enc_tv_template[] = {
5135         /* Generated from AES-XTS test vectors */
5136         {
5137                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00"
5138 @@ -13279,7 +13439,7 @@ static struct cipher_testvec serpent_xts
5139         },
5140  };
5141  
5142 -static struct cipher_testvec serpent_xts_dec_tv_template[] = {
5143 +static const struct cipher_testvec serpent_xts_dec_tv_template[] = {
5144         /* Generated from AES-XTS test vectors */
5145         /* same as enc vectors with input and result reversed */
5146         {
5147 @@ -13623,18 +13783,7 @@ static struct cipher_testvec serpent_xts
5148  };
5149  
5150  /* Cast6 test vectors from RFC 2612 */
5151 -#define CAST6_ENC_TEST_VECTORS         4
5152 -#define CAST6_DEC_TEST_VECTORS         4
5153 -#define CAST6_CBC_ENC_TEST_VECTORS     1
5154 -#define CAST6_CBC_DEC_TEST_VECTORS     1
5155 -#define CAST6_CTR_ENC_TEST_VECTORS     2
5156 -#define CAST6_CTR_DEC_TEST_VECTORS     2
5157 -#define CAST6_LRW_ENC_TEST_VECTORS     1
5158 -#define CAST6_LRW_DEC_TEST_VECTORS     1
5159 -#define CAST6_XTS_ENC_TEST_VECTORS     1
5160 -#define CAST6_XTS_DEC_TEST_VECTORS     1
5161 -
5162 -static struct cipher_testvec cast6_enc_tv_template[] = {
5163 +static const struct cipher_testvec cast6_enc_tv_template[] = {
5164         {
5165                 .key    = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
5166                           "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d",
5167 @@ -13805,7 +13954,7 @@ static struct cipher_testvec cast6_enc_t
5168         },
5169  };
5170  
5171 -static struct cipher_testvec cast6_dec_tv_template[] = {
5172 +static const struct cipher_testvec cast6_dec_tv_template[] = {
5173         {
5174                 .key    = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
5175                           "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d",
5176 @@ -13976,7 +14125,7 @@ static struct cipher_testvec cast6_dec_t
5177         },
5178  };
5179  
5180 -static struct cipher_testvec cast6_cbc_enc_tv_template[] = {
5181 +static const struct cipher_testvec cast6_cbc_enc_tv_template[] = {
5182         { /* Generated from TF test vectors */
5183                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5184                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5185 @@ -14117,7 +14266,7 @@ static struct cipher_testvec cast6_cbc_e
5186         },
5187  };
5188  
5189 -static struct cipher_testvec cast6_cbc_dec_tv_template[] = {
5190 +static const struct cipher_testvec cast6_cbc_dec_tv_template[] = {
5191         { /* Generated from TF test vectors */
5192                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5193                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5194 @@ -14258,7 +14407,7 @@ static struct cipher_testvec cast6_cbc_d
5195         },
5196  };
5197  
5198 -static struct cipher_testvec cast6_ctr_enc_tv_template[] = {
5199 +static const struct cipher_testvec cast6_ctr_enc_tv_template[] = {
5200         { /* Generated from TF test vectors */
5201                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5202                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5203 @@ -14415,7 +14564,7 @@ static struct cipher_testvec cast6_ctr_e
5204         },
5205  };
5206  
5207 -static struct cipher_testvec cast6_ctr_dec_tv_template[] = {
5208 +static const struct cipher_testvec cast6_ctr_dec_tv_template[] = {
5209         { /* Generated from TF test vectors */
5210                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5211                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5212 @@ -14572,7 +14721,7 @@ static struct cipher_testvec cast6_ctr_d
5213         },
5214  };
5215  
5216 -static struct cipher_testvec cast6_lrw_enc_tv_template[] = {
5217 +static const struct cipher_testvec cast6_lrw_enc_tv_template[] = {
5218         { /* Generated from TF test vectors */
5219                 .key    = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
5220                           "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
5221 @@ -14719,7 +14868,7 @@ static struct cipher_testvec cast6_lrw_e
5222         },
5223  };
5224  
5225 -static struct cipher_testvec cast6_lrw_dec_tv_template[] = {
5226 +static const struct cipher_testvec cast6_lrw_dec_tv_template[] = {
5227         { /* Generated from TF test vectors */
5228                 .key    = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
5229                           "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
5230 @@ -14866,7 +15015,7 @@ static struct cipher_testvec cast6_lrw_d
5231         },
5232  };
5233  
5234 -static struct cipher_testvec cast6_xts_enc_tv_template[] = {
5235 +static const struct cipher_testvec cast6_xts_enc_tv_template[] = {
5236         { /* Generated from TF test vectors */
5237                 .key    = "\x27\x18\x28\x18\x28\x45\x90\x45"
5238                           "\x23\x53\x60\x28\x74\x71\x35\x26"
5239 @@ -15015,7 +15164,7 @@ static struct cipher_testvec cast6_xts_e
5240         },
5241  };
5242  
5243 -static struct cipher_testvec cast6_xts_dec_tv_template[] = {
5244 +static const struct cipher_testvec cast6_xts_dec_tv_template[] = {
5245         { /* Generated from TF test vectors */
5246                 .key    = "\x27\x18\x28\x18\x28\x45\x90\x45"
5247                           "\x23\x53\x60\x28\x74\x71\x35\x26"
5248 @@ -15168,39 +15317,7 @@ static struct cipher_testvec cast6_xts_d
5249  /*
5250   * AES test vectors.
5251   */
5252 -#define AES_ENC_TEST_VECTORS 4
5253 -#define AES_DEC_TEST_VECTORS 4
5254 -#define AES_CBC_ENC_TEST_VECTORS 5
5255 -#define AES_CBC_DEC_TEST_VECTORS 5
5256 -#define HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS 2
5257 -#define HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS 2
5258 -#define HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC 2
5259 -#define HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC 2
5260 -#define HMAC_SHA1_AES_CBC_ENC_TEST_VEC 7
5261 -#define HMAC_SHA256_AES_CBC_ENC_TEST_VEC 7
5262 -#define HMAC_SHA512_AES_CBC_ENC_TEST_VEC 7
5263 -#define AES_LRW_ENC_TEST_VECTORS 8
5264 -#define AES_LRW_DEC_TEST_VECTORS 8
5265 -#define AES_XTS_ENC_TEST_VECTORS 5
5266 -#define AES_XTS_DEC_TEST_VECTORS 5
5267 -#define AES_CTR_ENC_TEST_VECTORS 5
5268 -#define AES_CTR_DEC_TEST_VECTORS 5
5269 -#define AES_OFB_ENC_TEST_VECTORS 1
5270 -#define AES_OFB_DEC_TEST_VECTORS 1
5271 -#define AES_CTR_3686_ENC_TEST_VECTORS 7
5272 -#define AES_CTR_3686_DEC_TEST_VECTORS 6
5273 -#define AES_GCM_ENC_TEST_VECTORS 9
5274 -#define AES_GCM_DEC_TEST_VECTORS 8
5275 -#define AES_GCM_4106_ENC_TEST_VECTORS 23
5276 -#define AES_GCM_4106_DEC_TEST_VECTORS 23
5277 -#define AES_GCM_4543_ENC_TEST_VECTORS 1
5278 -#define AES_GCM_4543_DEC_TEST_VECTORS 2
5279 -#define AES_CCM_ENC_TEST_VECTORS 8
5280 -#define AES_CCM_DEC_TEST_VECTORS 7
5281 -#define AES_CCM_4309_ENC_TEST_VECTORS 7
5282 -#define AES_CCM_4309_DEC_TEST_VECTORS 10
5283 -
5284 -static struct cipher_testvec aes_enc_tv_template[] = {
5285 +static const struct cipher_testvec aes_enc_tv_template[] = {
5286         { /* From FIPS-197 */
5287                 .key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
5288                           "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5289 @@ -15372,7 +15489,7 @@ static struct cipher_testvec aes_enc_tv_
5290         },
5291  };
5292  
5293 -static struct cipher_testvec aes_dec_tv_template[] = {
5294 +static const struct cipher_testvec aes_dec_tv_template[] = {
5295         { /* From FIPS-197 */
5296                 .key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
5297                           "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5298 @@ -15544,7 +15661,7 @@ static struct cipher_testvec aes_dec_tv_
5299         },
5300  };
5301  
5302 -static struct cipher_testvec aes_cbc_enc_tv_template[] = {
5303 +static const struct cipher_testvec aes_cbc_enc_tv_template[] = {
5304         { /* From RFC 3602 */
5305                 .key    = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
5306                           "\x51\x2e\x03\xd5\x34\x12\x00\x06",
5307 @@ -15766,7 +15883,7 @@ static struct cipher_testvec aes_cbc_enc
5308         },
5309  };
5310  
5311 -static struct cipher_testvec aes_cbc_dec_tv_template[] = {
5312 +static const struct cipher_testvec aes_cbc_dec_tv_template[] = {
5313         { /* From RFC 3602 */
5314                 .key    = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
5315                           "\x51\x2e\x03\xd5\x34\x12\x00\x06",
5316 @@ -15988,7 +16105,7 @@ static struct cipher_testvec aes_cbc_dec
5317         },
5318  };
5319  
5320 -static struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
5321 +static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
5322         { /* Input data from RFC 2410 Case 1 */
5323  #ifdef __LITTLE_ENDIAN
5324                 .key    = "\x08\x00"            /* rta length */
5325 @@ -16030,7 +16147,7 @@ static struct aead_testvec hmac_md5_ecb_
5326         },
5327  };
5328  
5329 -static struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
5330 +static const struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
5331         {
5332  #ifdef __LITTLE_ENDIAN
5333                 .key    = "\x08\x00"            /* rta length */
5334 @@ -16072,7 +16189,7 @@ static struct aead_testvec hmac_md5_ecb_
5335         },
5336  };
5337  
5338 -static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
5339 +static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
5340         { /* RFC 3602 Case 1 */
5341  #ifdef __LITTLE_ENDIAN
5342                 .key    = "\x08\x00"            /* rta length */
5343 @@ -16341,7 +16458,7 @@ static struct aead_testvec hmac_sha1_aes
5344         },
5345  };
5346  
5347 -static struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
5348 +static const struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
5349         { /* Input data from RFC 2410 Case 1 */
5350  #ifdef __LITTLE_ENDIAN
5351                 .key    = "\x08\x00"            /* rta length */
5352 @@ -16387,7 +16504,7 @@ static struct aead_testvec hmac_sha1_ecb
5353         },
5354  };
5355  
5356 -static struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = {
5357 +static const struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = {
5358         {
5359  #ifdef __LITTLE_ENDIAN
5360                 .key    = "\x08\x00"            /* rta length */
5361 @@ -16433,7 +16550,7 @@ static struct aead_testvec hmac_sha1_ecb
5362         },
5363  };
5364  
5365 -static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
5366 +static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
5367         { /* RFC 3602 Case 1 */
5368  #ifdef __LITTLE_ENDIAN
5369                 .key    = "\x08\x00"            /* rta length */
5370 @@ -16716,7 +16833,7 @@ static struct aead_testvec hmac_sha256_a
5371         },
5372  };
5373  
5374 -static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
5375 +static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
5376         { /* RFC 3602 Case 1 */
5377  #ifdef __LITTLE_ENDIAN
5378                 .key    = "\x08\x00"            /* rta length */
5379 @@ -17055,9 +17172,7 @@ static struct aead_testvec hmac_sha512_a
5380         },
5381  };
5382  
5383 -#define HMAC_SHA1_DES_CBC_ENC_TEST_VEC 1
5384 -
5385 -static struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
5386 +static const struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
5387         { /*Generated with cryptopp*/
5388  #ifdef __LITTLE_ENDIAN
5389                 .key    = "\x08\x00"            /* rta length */
5390 @@ -17116,9 +17231,7 @@ static struct aead_testvec hmac_sha1_des
5391         },
5392  };
5393  
5394 -#define HMAC_SHA224_DES_CBC_ENC_TEST_VEC       1
5395 -
5396 -static struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
5397 +static const struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
5398         { /*Generated with cryptopp*/
5399  #ifdef __LITTLE_ENDIAN
5400                 .key    = "\x08\x00"            /* rta length */
5401 @@ -17177,9 +17290,7 @@ static struct aead_testvec hmac_sha224_d
5402         },
5403  };
5404  
5405 -#define HMAC_SHA256_DES_CBC_ENC_TEST_VEC       1
5406 -
5407 -static struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
5408 +static const struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
5409         { /*Generated with cryptopp*/
5410  #ifdef __LITTLE_ENDIAN
5411                 .key    = "\x08\x00"            /* rta length */
5412 @@ -17240,9 +17351,7 @@ static struct aead_testvec hmac_sha256_d
5413         },
5414  };
5415  
5416 -#define HMAC_SHA384_DES_CBC_ENC_TEST_VEC       1
5417 -
5418 -static struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
5419 +static const struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
5420         { /*Generated with cryptopp*/
5421  #ifdef __LITTLE_ENDIAN
5422                 .key    = "\x08\x00"            /* rta length */
5423 @@ -17307,9 +17416,7 @@ static struct aead_testvec hmac_sha384_d
5424         },
5425  };
5426  
5427 -#define HMAC_SHA512_DES_CBC_ENC_TEST_VEC       1
5428 -
5429 -static struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
5430 +static const struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
5431         { /*Generated with cryptopp*/
5432  #ifdef __LITTLE_ENDIAN
5433                 .key    = "\x08\x00"            /* rta length */
5434 @@ -17378,9 +17485,7 @@ static struct aead_testvec hmac_sha512_d
5435         },
5436  };
5437  
5438 -#define HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC    1
5439 -
5440 -static struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
5441 +static const struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
5442         { /*Generated with cryptopp*/
5443  #ifdef __LITTLE_ENDIAN
5444                 .key    = "\x08\x00"            /* rta length */
5445 @@ -17441,9 +17546,7 @@ static struct aead_testvec hmac_sha1_des
5446         },
5447  };
5448  
5449 -#define HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC  1
5450 -
5451 -static struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
5452 +static const struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
5453         { /*Generated with cryptopp*/
5454  #ifdef __LITTLE_ENDIAN
5455                 .key    = "\x08\x00"            /* rta length */
5456 @@ -17504,9 +17607,7 @@ static struct aead_testvec hmac_sha224_d
5457         },
5458  };
5459  
5460 -#define HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC  1
5461 -
5462 -static struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
5463 +static const struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
5464         { /*Generated with cryptopp*/
5465  #ifdef __LITTLE_ENDIAN
5466                 .key    = "\x08\x00"            /* rta length */
5467 @@ -17569,9 +17670,7 @@ static struct aead_testvec hmac_sha256_d
5468         },
5469  };
5470  
5471 -#define HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC  1
5472 -
5473 -static struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
5474 +static const struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
5475         { /*Generated with cryptopp*/
5476  #ifdef __LITTLE_ENDIAN
5477                 .key    = "\x08\x00"            /* rta length */
5478 @@ -17638,9 +17737,7 @@ static struct aead_testvec hmac_sha384_d
5479         },
5480  };
5481  
5482 -#define HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC  1
5483 -
5484 -static struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
5485 +static const struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
5486         { /*Generated with cryptopp*/
5487  #ifdef __LITTLE_ENDIAN
5488                 .key    = "\x08\x00"            /* rta length */
5489 @@ -17711,7 +17808,7 @@ static struct aead_testvec hmac_sha512_d
5490         },
5491  };
5492  
5493 -static struct cipher_testvec aes_lrw_enc_tv_template[] = {
5494 +static const struct cipher_testvec aes_lrw_enc_tv_template[] = {
5495         /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
5496         { /* LRW-32-AES 1 */
5497                 .key    = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
5498 @@ -17964,7 +18061,7 @@ static struct cipher_testvec aes_lrw_enc
5499         }
5500  };
5501  
5502 -static struct cipher_testvec aes_lrw_dec_tv_template[] = {
5503 +static const struct cipher_testvec aes_lrw_dec_tv_template[] = {
5504         /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
5505         /* same as enc vectors with input and result reversed */
5506         { /* LRW-32-AES 1 */
5507 @@ -18218,7 +18315,7 @@ static struct cipher_testvec aes_lrw_dec
5508         }
5509  };
5510  
5511 -static struct cipher_testvec aes_xts_enc_tv_template[] = {
5512 +static const struct cipher_testvec aes_xts_enc_tv_template[] = {
5513         /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */
5514         { /* XTS-AES 1 */
5515                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00"
5516 @@ -18561,7 +18658,7 @@ static struct cipher_testvec aes_xts_enc
5517         }
5518  };
5519  
5520 -static struct cipher_testvec aes_xts_dec_tv_template[] = {
5521 +static const struct cipher_testvec aes_xts_dec_tv_template[] = {
5522         /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */
5523         { /* XTS-AES 1 */
5524                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00"
5525 @@ -18905,7 +19002,7 @@ static struct cipher_testvec aes_xts_dec
5526  };
5527  
5528  
5529 -static struct cipher_testvec aes_ctr_enc_tv_template[] = {
5530 +static const struct cipher_testvec aes_ctr_enc_tv_template[] = {
5531         { /* From NIST Special Publication 800-38A, Appendix F.5 */
5532                 .key    = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5533                           "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
5534 @@ -19260,7 +19357,7 @@ static struct cipher_testvec aes_ctr_enc
5535         },
5536  };
5537  
5538 -static struct cipher_testvec aes_ctr_dec_tv_template[] = {
5539 +static const struct cipher_testvec aes_ctr_dec_tv_template[] = {
5540         { /* From NIST Special Publication 800-38A, Appendix F.5 */
5541                 .key    = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5542                           "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
5543 @@ -19615,7 +19712,7 @@ static struct cipher_testvec aes_ctr_dec
5544         },
5545  };
5546  
5547 -static struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
5548 +static const struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
5549         { /* From RFC 3686 */
5550                 .key    = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
5551                           "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
5552 @@ -20747,7 +20844,7 @@ static struct cipher_testvec aes_ctr_rfc
5553         },
5554  };
5555  
5556 -static struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
5557 +static const struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
5558         { /* From RFC 3686 */
5559                 .key    = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
5560                           "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
5561 @@ -20838,7 +20935,7 @@ static struct cipher_testvec aes_ctr_rfc
5562         },
5563  };
5564  
5565 -static struct cipher_testvec aes_ofb_enc_tv_template[] = {
5566 +static const struct cipher_testvec aes_ofb_enc_tv_template[] = {
5567          /* From NIST Special Publication 800-38A, Appendix F.5 */
5568         {
5569                 .key    = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5570 @@ -20867,7 +20964,7 @@ static struct cipher_testvec aes_ofb_enc
5571         }
5572  };
5573  
5574 -static struct cipher_testvec aes_ofb_dec_tv_template[] = {
5575 +static const struct cipher_testvec aes_ofb_dec_tv_template[] = {
5576          /* From NIST Special Publication 800-38A, Appendix F.5 */
5577         {
5578                 .key    = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5579 @@ -20896,7 +20993,7 @@ static struct cipher_testvec aes_ofb_dec
5580         }
5581  };
5582  
5583 -static struct aead_testvec aes_gcm_enc_tv_template[] = {
5584 +static const struct aead_testvec aes_gcm_enc_tv_template[] = {
5585         { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
5586                 .key    = zeroed_string,
5587                 .klen   = 16,
5588 @@ -21056,7 +21153,7 @@ static struct aead_testvec aes_gcm_enc_t
5589         }
5590  };
5591  
5592 -static struct aead_testvec aes_gcm_dec_tv_template[] = {
5593 +static const struct aead_testvec aes_gcm_dec_tv_template[] = {
5594         { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
5595                 .key    = zeroed_string,
5596                 .klen   = 32,
5597 @@ -21258,7 +21355,7 @@ static struct aead_testvec aes_gcm_dec_t
5598         }
5599  };
5600  
5601 -static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
5602 +static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
5603         { /* Generated using Crypto++ */
5604                 .key    = zeroed_string,
5605                 .klen   = 20,
5606 @@ -21871,7 +21968,7 @@ static struct aead_testvec aes_gcm_rfc41
5607         }
5608  };
5609  
5610 -static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
5611 +static const struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
5612         { /* Generated using Crypto++ */
5613                 .key    = zeroed_string,
5614                 .klen   = 20,
5615 @@ -22485,7 +22582,7 @@ static struct aead_testvec aes_gcm_rfc41
5616         }
5617  };
5618  
5619 -static struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
5620 +static const struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
5621         { /* From draft-mcgrew-gcm-test-01 */
5622                 .key    = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda"
5623                           "\x90\x6a\xc7\x3c\x36\x13\xa6\x34"
5624 @@ -22516,7 +22613,7 @@ static struct aead_testvec aes_gcm_rfc45
5625         }
5626  };
5627  
5628 -static struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
5629 +static const struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
5630         { /* From draft-mcgrew-gcm-test-01 */
5631                 .key    = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda"
5632                           "\x90\x6a\xc7\x3c\x36\x13\xa6\x34"
5633 @@ -22575,7 +22672,7 @@ static struct aead_testvec aes_gcm_rfc45
5634         },
5635  };
5636  
5637 -static struct aead_testvec aes_ccm_enc_tv_template[] = {
5638 +static const struct aead_testvec aes_ccm_enc_tv_template[] = {
5639         { /* From RFC 3610 */
5640                 .key    = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5641                           "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5642 @@ -22859,7 +22956,7 @@ static struct aead_testvec aes_ccm_enc_t
5643         }
5644  };
5645  
5646 -static struct aead_testvec aes_ccm_dec_tv_template[] = {
5647 +static const struct aead_testvec aes_ccm_dec_tv_template[] = {
5648         { /* From RFC 3610 */
5649                 .key    = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5650                           "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5651 @@ -23191,7 +23288,7 @@ static struct aead_testvec aes_ccm_dec_t
5652   * These vectors are copied/generated from the ones for rfc4106 with
5653   * the key truncated by one byte..
5654   */
5655 -static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
5656 +static const struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
5657         { /* Generated using Crypto++ */
5658                 .key    = zeroed_string,
5659                 .klen   = 19,
5660 @@ -23804,7 +23901,7 @@ static struct aead_testvec aes_ccm_rfc43
5661         }
5662  };
5663  
5664 -static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[]   = {
5665 +static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[]     = {
5666         { /* Generated using Crypto++ */
5667                 .key    = zeroed_string,
5668                 .klen   = 19,
5669 @@ -24420,9 +24517,7 @@ static struct aead_testvec aes_ccm_rfc43
5670  /*
5671   * ChaCha20-Poly1305 AEAD test vectors from RFC7539 2.8.2./A.5.
5672   */
5673 -#define RFC7539_ENC_TEST_VECTORS 2
5674 -#define RFC7539_DEC_TEST_VECTORS 2
5675 -static struct aead_testvec rfc7539_enc_tv_template[] = {
5676 +static const struct aead_testvec rfc7539_enc_tv_template[] = {
5677         {
5678                 .key    = "\x80\x81\x82\x83\x84\x85\x86\x87"
5679                           "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
5680 @@ -24554,7 +24649,7 @@ static struct aead_testvec rfc7539_enc_t
5681         },
5682  };
5683  
5684 -static struct aead_testvec rfc7539_dec_tv_template[] = {
5685 +static const struct aead_testvec rfc7539_dec_tv_template[] = {
5686         {
5687                 .key    = "\x80\x81\x82\x83\x84\x85\x86\x87"
5688                           "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
5689 @@ -24689,9 +24784,7 @@ static struct aead_testvec rfc7539_dec_t
5690  /*
5691   * draft-irtf-cfrg-chacha20-poly1305
5692   */
5693 -#define RFC7539ESP_DEC_TEST_VECTORS 1
5694 -#define RFC7539ESP_ENC_TEST_VECTORS 1
5695 -static struct aead_testvec rfc7539esp_enc_tv_template[] = {
5696 +static const struct aead_testvec rfc7539esp_enc_tv_template[] = {
5697         {
5698                 .key    = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
5699                           "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
5700 @@ -24779,7 +24872,7 @@ static struct aead_testvec rfc7539esp_en
5701         },
5702  };
5703  
5704 -static struct aead_testvec rfc7539esp_dec_tv_template[] = {
5705 +static const struct aead_testvec rfc7539esp_dec_tv_template[] = {
5706         {
5707                 .key    = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
5708                           "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
5709 @@ -24875,7 +24968,7 @@ static struct aead_testvec rfc7539esp_de
5710   * semiblock of the ciphertext from the test vector. For decryption, iv is
5711   * the first semiblock of the ciphertext.
5712   */
5713 -static struct cipher_testvec aes_kw_enc_tv_template[] = {
5714 +static const struct cipher_testvec aes_kw_enc_tv_template[] = {
5715         {
5716                 .key    = "\x75\x75\xda\x3a\x93\x60\x7c\xc2"
5717                           "\xbf\xd8\xce\xc7\xaa\xdf\xd9\xa6",
5718 @@ -24890,7 +24983,7 @@ static struct cipher_testvec aes_kw_enc_
5719         },
5720  };
5721  
5722 -static struct cipher_testvec aes_kw_dec_tv_template[] = {
5723 +static const struct cipher_testvec aes_kw_dec_tv_template[] = {
5724         {
5725                 .key    = "\x80\xaa\x99\x73\x27\xa4\x80\x6b"
5726                           "\x6a\x7a\x41\xa5\x2b\x86\xc3\x71"
5727 @@ -24913,9 +25006,7 @@ static struct cipher_testvec aes_kw_dec_
5728   *     http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf
5729   * Only AES-128 is supported at this time.
5730   */
5731 -#define ANSI_CPRNG_AES_TEST_VECTORS    6
5732 -
5733 -static struct cprng_testvec ansi_cprng_aes_tv_template[] = {
5734 +static const struct cprng_testvec ansi_cprng_aes_tv_template[] = {
5735         {
5736                 .key    = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
5737                           "\xed\x06\x1c\xab\xb8\xd4\x62\x02",
5738 @@ -25011,7 +25102,7 @@ static struct cprng_testvec ansi_cprng_a
5739   * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
5740   * w/o personalization string, w/ and w/o additional input string).
5741   */
5742 -static struct drbg_testvec drbg_pr_sha256_tv_template[] = {
5743 +static const struct drbg_testvec drbg_pr_sha256_tv_template[] = {
5744         {
5745                 .entropy = (unsigned char *)
5746                         "\x72\x88\x4c\xcd\x6c\x85\x57\x70\xf7\x0b\x8b\x86"
5747 @@ -25169,7 +25260,7 @@ static struct drbg_testvec drbg_pr_sha25
5748         },
5749  };
5750  
5751 -static struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
5752 +static const struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
5753         {
5754                 .entropy = (unsigned char *)
5755                         "\x99\x69\xe5\x4b\x47\x03\xff\x31\x78\x5b\x87\x9a"
5756 @@ -25327,7 +25418,7 @@ static struct drbg_testvec drbg_pr_hmac_
5757         },
5758  };
5759  
5760 -static struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
5761 +static const struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
5762         {
5763                 .entropy = (unsigned char *)
5764                         "\xd1\x44\xc6\x61\x81\x6d\xca\x9d\x15\x28\x8a\x42"
5765 @@ -25451,7 +25542,7 @@ static struct drbg_testvec drbg_pr_ctr_a
5766   * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
5767   * w/o personalization string, w/ and w/o additional input string).
5768   */
5769 -static struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
5770 +static const struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
5771         {
5772                 .entropy = (unsigned char *)
5773                         "\xa6\x5a\xd0\xf3\x45\xdb\x4e\x0e\xff\xe8\x75\xc3"
5774 @@ -25573,7 +25664,7 @@ static struct drbg_testvec drbg_nopr_sha
5775         },
5776  };
5777  
5778 -static struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
5779 +static const struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
5780         {
5781                 .entropy = (unsigned char *)
5782                         "\xca\x85\x19\x11\x34\x93\x84\xbf\xfe\x89\xde\x1c"
5783 @@ -25695,7 +25786,7 @@ static struct drbg_testvec drbg_nopr_hma
5784         },
5785  };
5786  
5787 -static struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
5788 +static const struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
5789         {
5790                 .entropy = (unsigned char *)
5791                         "\xc3\x5c\x2f\xa2\xa8\x9d\x52\xa1\x1f\xa3\x2a\xa9"
5792 @@ -25719,7 +25810,7 @@ static struct drbg_testvec drbg_nopr_ctr
5793         },
5794  };
5795  
5796 -static struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
5797 +static const struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
5798         {
5799                 .entropy = (unsigned char *)
5800                         "\x36\x40\x19\x40\xfa\x8b\x1f\xba\x91\xa1\x66\x1f"
5801 @@ -25743,7 +25834,7 @@ static struct drbg_testvec drbg_nopr_ctr
5802         },
5803  };
5804  
5805 -static struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
5806 +static const struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
5807         {
5808                 .entropy = (unsigned char *)
5809                         "\x87\xe1\xc5\x32\x99\x7f\x57\xa3\x5c\x28\x6d\xe8"
5810 @@ -25832,14 +25923,7 @@ static struct drbg_testvec drbg_nopr_ctr
5811  };
5812  
5813  /* Cast5 test vectors from RFC 2144 */
5814 -#define CAST5_ENC_TEST_VECTORS         4
5815 -#define CAST5_DEC_TEST_VECTORS         4
5816 -#define CAST5_CBC_ENC_TEST_VECTORS     1
5817 -#define CAST5_CBC_DEC_TEST_VECTORS     1
5818 -#define CAST5_CTR_ENC_TEST_VECTORS     2
5819 -#define CAST5_CTR_DEC_TEST_VECTORS     2
5820 -
5821 -static struct cipher_testvec cast5_enc_tv_template[] = {
5822 +static const struct cipher_testvec cast5_enc_tv_template[] = {
5823         {
5824                 .key    = "\x01\x23\x45\x67\x12\x34\x56\x78"
5825                           "\x23\x45\x67\x89\x34\x56\x78\x9a",
5826 @@ -26000,7 +26084,7 @@ static struct cipher_testvec cast5_enc_t
5827         },
5828  };
5829  
5830 -static struct cipher_testvec cast5_dec_tv_template[] = {
5831 +static const struct cipher_testvec cast5_dec_tv_template[] = {
5832         {
5833                 .key    = "\x01\x23\x45\x67\x12\x34\x56\x78"
5834                           "\x23\x45\x67\x89\x34\x56\x78\x9a",
5835 @@ -26161,7 +26245,7 @@ static struct cipher_testvec cast5_dec_t
5836         },
5837  };
5838  
5839 -static struct cipher_testvec cast5_cbc_enc_tv_template[] = {
5840 +static const struct cipher_testvec cast5_cbc_enc_tv_template[] = {
5841         { /* Generated from TF test vectors */
5842                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5843                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5844 @@ -26299,7 +26383,7 @@ static struct cipher_testvec cast5_cbc_e
5845         },
5846  };
5847  
5848 -static struct cipher_testvec cast5_cbc_dec_tv_template[] = {
5849 +static const struct cipher_testvec cast5_cbc_dec_tv_template[] = {
5850         { /* Generated from TF test vectors */
5851                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5852                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5853 @@ -26437,7 +26521,7 @@ static struct cipher_testvec cast5_cbc_d
5854         },
5855  };
5856  
5857 -static struct cipher_testvec cast5_ctr_enc_tv_template[] = {
5858 +static const struct cipher_testvec cast5_ctr_enc_tv_template[] = {
5859         { /* Generated from TF test vectors */
5860                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5861                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5862 @@ -26588,7 +26672,7 @@ static struct cipher_testvec cast5_ctr_e
5863         },
5864  };
5865  
5866 -static struct cipher_testvec cast5_ctr_dec_tv_template[] = {
5867 +static const struct cipher_testvec cast5_ctr_dec_tv_template[] = {
5868         { /* Generated from TF test vectors */
5869                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5870                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5871 @@ -26742,10 +26826,7 @@ static struct cipher_testvec cast5_ctr_d
5872  /*
5873   * ARC4 test vectors from OpenSSL
5874   */
5875 -#define ARC4_ENC_TEST_VECTORS  7
5876 -#define ARC4_DEC_TEST_VECTORS  7
5877 -
5878 -static struct cipher_testvec arc4_enc_tv_template[] = {
5879 +static const struct cipher_testvec arc4_enc_tv_template[] = {
5880         {
5881                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5882                 .klen   = 8,
5883 @@ -26811,7 +26892,7 @@ static struct cipher_testvec arc4_enc_tv
5884         },
5885  };
5886  
5887 -static struct cipher_testvec arc4_dec_tv_template[] = {
5888 +static const struct cipher_testvec arc4_dec_tv_template[] = {
5889         {
5890                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5891                 .klen   = 8,
5892 @@ -26880,10 +26961,7 @@ static struct cipher_testvec arc4_dec_tv
5893  /*
5894   * TEA test vectors
5895   */
5896 -#define TEA_ENC_TEST_VECTORS   4
5897 -#define TEA_DEC_TEST_VECTORS   4
5898 -
5899 -static struct cipher_testvec tea_enc_tv_template[] = {
5900 +static const struct cipher_testvec tea_enc_tv_template[] = {
5901         {
5902                 .key    = zeroed_string,
5903                 .klen   = 16,
5904 @@ -26926,7 +27004,7 @@ static struct cipher_testvec tea_enc_tv_
5905         }
5906  };
5907  
5908 -static struct cipher_testvec tea_dec_tv_template[] = {
5909 +static const struct cipher_testvec tea_dec_tv_template[] = {
5910         {
5911                 .key    = zeroed_string,
5912                 .klen   = 16,
5913 @@ -26972,10 +27050,7 @@ static struct cipher_testvec tea_dec_tv_
5914  /*
5915   * XTEA test vectors
5916   */
5917 -#define XTEA_ENC_TEST_VECTORS  4
5918 -#define XTEA_DEC_TEST_VECTORS  4
5919 -
5920 -static struct cipher_testvec xtea_enc_tv_template[] = {
5921 +static const struct cipher_testvec xtea_enc_tv_template[] = {
5922         {
5923                 .key    = zeroed_string,
5924                 .klen   = 16,
5925 @@ -27018,7 +27093,7 @@ static struct cipher_testvec xtea_enc_tv
5926         }
5927  };
5928  
5929 -static struct cipher_testvec xtea_dec_tv_template[] = {
5930 +static const struct cipher_testvec xtea_dec_tv_template[] = {
5931         {
5932                 .key    = zeroed_string,
5933                 .klen   = 16,
5934 @@ -27064,10 +27139,7 @@ static struct cipher_testvec xtea_dec_tv
5935  /*
5936   * KHAZAD test vectors.
5937   */
5938 -#define KHAZAD_ENC_TEST_VECTORS 5
5939 -#define KHAZAD_DEC_TEST_VECTORS 5
5940 -
5941 -static struct cipher_testvec khazad_enc_tv_template[] = {
5942 +static const struct cipher_testvec khazad_enc_tv_template[] = {
5943         {
5944                 .key    = "\x80\x00\x00\x00\x00\x00\x00\x00"
5945                           "\x00\x00\x00\x00\x00\x00\x00\x00",
5946 @@ -27113,7 +27185,7 @@ static struct cipher_testvec khazad_enc_
5947         },
5948  };
5949  
5950 -static struct cipher_testvec khazad_dec_tv_template[] = {
5951 +static const struct cipher_testvec khazad_dec_tv_template[] = {
5952         {
5953                 .key    = "\x80\x00\x00\x00\x00\x00\x00\x00"
5954                           "\x00\x00\x00\x00\x00\x00\x00\x00",
5955 @@ -27163,12 +27235,7 @@ static struct cipher_testvec khazad_dec_
5956   * Anubis test vectors.
5957   */
5958  
5959 -#define ANUBIS_ENC_TEST_VECTORS                        5
5960 -#define ANUBIS_DEC_TEST_VECTORS                        5
5961 -#define ANUBIS_CBC_ENC_TEST_VECTORS            2
5962 -#define ANUBIS_CBC_DEC_TEST_VECTORS            2
5963 -
5964 -static struct cipher_testvec anubis_enc_tv_template[] = {
5965 +static const struct cipher_testvec anubis_enc_tv_template[] = {
5966         {
5967                 .key    = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5968                           "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5969 @@ -27231,7 +27298,7 @@ static struct cipher_testvec anubis_enc_
5970         },
5971  };
5972  
5973 -static struct cipher_testvec anubis_dec_tv_template[] = {
5974 +static const struct cipher_testvec anubis_dec_tv_template[] = {
5975         {
5976                 .key    = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5977                           "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5978 @@ -27294,7 +27361,7 @@ static struct cipher_testvec anubis_dec_
5979         },
5980  };
5981  
5982 -static struct cipher_testvec anubis_cbc_enc_tv_template[] = {
5983 +static const struct cipher_testvec anubis_cbc_enc_tv_template[] = {
5984         {
5985                 .key    = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5986                           "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5987 @@ -27329,7 +27396,7 @@ static struct cipher_testvec anubis_cbc_
5988         },
5989  };
5990  
5991 -static struct cipher_testvec anubis_cbc_dec_tv_template[] = {
5992 +static const struct cipher_testvec anubis_cbc_dec_tv_template[] = {
5993         {
5994                 .key    = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5995                           "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5996 @@ -27367,10 +27434,7 @@ static struct cipher_testvec anubis_cbc_
5997  /*
5998   * XETA test vectors
5999   */
6000 -#define XETA_ENC_TEST_VECTORS  4
6001 -#define XETA_DEC_TEST_VECTORS  4
6002 -
6003 -static struct cipher_testvec xeta_enc_tv_template[] = {
6004 +static const struct cipher_testvec xeta_enc_tv_template[] = {
6005         {
6006                 .key    = zeroed_string,
6007                 .klen   = 16,
6008 @@ -27413,7 +27477,7 @@ static struct cipher_testvec xeta_enc_tv
6009         }
6010  };
6011  
6012 -static struct cipher_testvec xeta_dec_tv_template[] = {
6013 +static const struct cipher_testvec xeta_dec_tv_template[] = {
6014         {
6015                 .key    = zeroed_string,
6016                 .klen   = 16,
6017 @@ -27459,10 +27523,7 @@ static struct cipher_testvec xeta_dec_tv
6018  /*
6019   * FCrypt test vectors
6020   */
6021 -#define FCRYPT_ENC_TEST_VECTORS        ARRAY_SIZE(fcrypt_pcbc_enc_tv_template)
6022 -#define FCRYPT_DEC_TEST_VECTORS        ARRAY_SIZE(fcrypt_pcbc_dec_tv_template)
6023 -
6024 -static struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
6025 +static const struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
6026         { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
6027                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00",
6028                 .klen   = 8,
6029 @@ -27523,7 +27584,7 @@ static struct cipher_testvec fcrypt_pcbc
6030         }
6031  };
6032  
6033 -static struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
6034 +static const struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
6035         { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
6036                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00",
6037                 .klen   = 8,
6038 @@ -27587,18 +27648,7 @@ static struct cipher_testvec fcrypt_pcbc
6039  /*
6040   * CAMELLIA test vectors.
6041   */
6042 -#define CAMELLIA_ENC_TEST_VECTORS 4
6043 -#define CAMELLIA_DEC_TEST_VECTORS 4
6044 -#define CAMELLIA_CBC_ENC_TEST_VECTORS 3
6045 -#define CAMELLIA_CBC_DEC_TEST_VECTORS 3
6046 -#define CAMELLIA_CTR_ENC_TEST_VECTORS 2
6047 -#define CAMELLIA_CTR_DEC_TEST_VECTORS 2
6048 -#define CAMELLIA_LRW_ENC_TEST_VECTORS 8
6049 -#define CAMELLIA_LRW_DEC_TEST_VECTORS 8
6050 -#define CAMELLIA_XTS_ENC_TEST_VECTORS 5
6051 -#define CAMELLIA_XTS_DEC_TEST_VECTORS 5
6052 -
6053 -static struct cipher_testvec camellia_enc_tv_template[] = {
6054 +static const struct cipher_testvec camellia_enc_tv_template[] = {
6055         {
6056                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6057                           "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6058 @@ -27898,7 +27948,7 @@ static struct cipher_testvec camellia_en
6059         },
6060  };
6061  
6062 -static struct cipher_testvec camellia_dec_tv_template[] = {
6063 +static const struct cipher_testvec camellia_dec_tv_template[] = {
6064         {
6065                 .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6066                           "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6067 @@ -28198,7 +28248,7 @@ static struct cipher_testvec camellia_de
6068         },
6069  };
6070  
6071 -static struct cipher_testvec camellia_cbc_enc_tv_template[] = {
6072 +static const struct cipher_testvec camellia_cbc_enc_tv_template[] = {
6073         {
6074                 .key    = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
6075                           "\x51\x2e\x03\xd5\x34\x12\x00\x06",
6076 @@ -28494,7 +28544,7 @@ static struct cipher_testvec camellia_cb
6077         },
6078  };
6079  
6080 -static struct cipher_testvec camellia_cbc_dec_tv_template[] = {
6081 +static const struct cipher_testvec camellia_cbc_dec_tv_template[] = {
6082         {
6083                 .key    = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
6084                           "\x51\x2e\x03\xd5\x34\x12\x00\x06",
6085 @@ -28790,7 +28840,7 @@ static struct cipher_testvec camellia_cb
6086         },
6087  };
6088  
6089 -static struct cipher_testvec camellia_ctr_enc_tv_template[] = {
6090 +static const struct cipher_testvec camellia_ctr_enc_tv_template[] = {
6091         { /* Generated with Crypto++ */
6092                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
6093                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
6094 @@ -29457,7 +29507,7 @@ static struct cipher_testvec camellia_ct
6095         },
6096  };
6097  
6098 -static struct cipher_testvec camellia_ctr_dec_tv_template[] = {
6099 +static const struct cipher_testvec camellia_ctr_dec_tv_template[] = {
6100         { /* Generated with Crypto++ */
6101                 .key    = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
6102                           "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
6103 @@ -30124,7 +30174,7 @@ static struct cipher_testvec camellia_ct
6104         },
6105  };
6106  
6107 -static struct cipher_testvec camellia_lrw_enc_tv_template[] = {
6108 +static const struct cipher_testvec camellia_lrw_enc_tv_template[] = {
6109         /* Generated from AES-LRW test vectors */
6110         {
6111                 .key    = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
6112 @@ -30376,7 +30426,7 @@ static struct cipher_testvec camellia_lr
6113         },
6114  };
6115  
6116 -static struct cipher_testvec camellia_lrw_dec_tv_template[] = {
6117 +static const struct cipher_testvec camellia_lrw_dec_tv_template[] = {
6118         /* Generated from AES-LRW test vectors */
6119         /* same as enc vectors with input and result reversed */
6120         {
6121 @@ -30629,7 +30679,7 @@ static struct cipher_testvec camellia_lr
6122         },
6123  };
6124  
6125 -static struct cipher_testvec camellia_xts_enc_tv_template[] = {
6126 +static const struct cipher_testvec camellia_xts_enc_tv_template[] = {
6127         /* Generated from AES-XTS test vectors */
6128         {
6129                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00"
6130 @@ -30971,7 +31021,7 @@ static struct cipher_testvec camellia_xt
6131         },
6132  };
6133  
6134 -static struct cipher_testvec camellia_xts_dec_tv_template[] = {
6135 +static const struct cipher_testvec camellia_xts_dec_tv_template[] = {
6136         /* Generated from AES-XTS test vectors */
6137         /* same as enc vectors with input and result reversed */
6138         {
6139 @@ -31317,10 +31367,7 @@ static struct cipher_testvec camellia_xt
6140  /*
6141   * SEED test vectors
6142   */
6143 -#define SEED_ENC_TEST_VECTORS  4
6144 -#define SEED_DEC_TEST_VECTORS  4
6145 -
6146 -static struct cipher_testvec seed_enc_tv_template[] = {
6147 +static const struct cipher_testvec seed_enc_tv_template[] = {
6148         {
6149                 .key    = zeroed_string,
6150                 .klen   = 16,
6151 @@ -31362,7 +31409,7 @@ static struct cipher_testvec seed_enc_tv
6152         }
6153  };
6154  
6155 -static struct cipher_testvec seed_dec_tv_template[] = {
6156 +static const struct cipher_testvec seed_dec_tv_template[] = {
6157         {
6158                 .key    = zeroed_string,
6159                 .klen   = 16,
6160 @@ -31404,8 +31451,7 @@ static struct cipher_testvec seed_dec_tv
6161         }
6162  };
6163  
6164 -#define SALSA20_STREAM_ENC_TEST_VECTORS 5
6165 -static struct cipher_testvec salsa20_stream_enc_tv_template[] = {
6166 +static const struct cipher_testvec salsa20_stream_enc_tv_template[] = {
6167         /*
6168         * Testvectors from verified.test-vectors submitted to ECRYPT.
6169         * They are truncated to size 39, 64, 111, 129 to test a variety
6170 @@ -32574,8 +32620,7 @@ static struct cipher_testvec salsa20_str
6171         },
6172  };
6173  
6174 -#define CHACHA20_ENC_TEST_VECTORS 4
6175 -static struct cipher_testvec chacha20_enc_tv_template[] = {
6176 +static const struct cipher_testvec chacha20_enc_tv_template[] = {
6177         { /* RFC7539 A.2. Test Vector #1 */
6178                 .key    = "\x00\x00\x00\x00\x00\x00\x00\x00"
6179                           "\x00\x00\x00\x00\x00\x00\x00\x00"
6180 @@ -33086,9 +33131,7 @@ static struct cipher_testvec chacha20_en
6181  /*
6182   * CTS (Cipher Text Stealing) mode tests
6183   */
6184 -#define CTS_MODE_ENC_TEST_VECTORS 6
6185 -#define CTS_MODE_DEC_TEST_VECTORS 6
6186 -static struct cipher_testvec cts_mode_enc_tv_template[] = {
6187 +static const struct cipher_testvec cts_mode_enc_tv_template[] = {
6188         { /* from rfc3962 */
6189                 .klen   = 16,
6190                 .key    = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
6191 @@ -33190,7 +33233,7 @@ static struct cipher_testvec cts_mode_en
6192         }
6193  };
6194  
6195 -static struct cipher_testvec cts_mode_dec_tv_template[] = {
6196 +static const struct cipher_testvec cts_mode_dec_tv_template[] = {
6197         { /* from rfc3962 */
6198                 .klen   = 16,
6199                 .key    = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
6200 @@ -33308,10 +33351,7 @@ struct comp_testvec {
6201   * Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL.
6202   */
6203  
6204 -#define DEFLATE_COMP_TEST_VECTORS 2
6205 -#define DEFLATE_DECOMP_TEST_VECTORS 2
6206 -
6207 -static struct comp_testvec deflate_comp_tv_template[] = {
6208 +static const struct comp_testvec deflate_comp_tv_template[] = {
6209         {
6210                 .inlen  = 70,
6211                 .outlen = 38,
6212 @@ -33347,7 +33387,7 @@ static struct comp_testvec deflate_comp_
6213         },
6214  };
6215  
6216 -static struct comp_testvec deflate_decomp_tv_template[] = {
6217 +static const struct comp_testvec deflate_decomp_tv_template[] = {
6218         {
6219                 .inlen  = 122,
6220                 .outlen = 191,
6221 @@ -33386,10 +33426,7 @@ static struct comp_testvec deflate_decom
6222  /*
6223   * LZO test vectors (null-terminated strings).
6224   */
6225 -#define LZO_COMP_TEST_VECTORS 2
6226 -#define LZO_DECOMP_TEST_VECTORS 2
6227 -
6228 -static struct comp_testvec lzo_comp_tv_template[] = {
6229 +static const struct comp_testvec lzo_comp_tv_template[] = {
6230         {
6231                 .inlen  = 70,
6232                 .outlen = 57,
6233 @@ -33429,7 +33466,7 @@ static struct comp_testvec lzo_comp_tv_t
6234         },
6235  };
6236  
6237 -static struct comp_testvec lzo_decomp_tv_template[] = {
6238 +static const struct comp_testvec lzo_decomp_tv_template[] = {
6239         {
6240                 .inlen  = 133,
6241                 .outlen = 159,
6242 @@ -33472,7 +33509,7 @@ static struct comp_testvec lzo_decomp_tv
6243   */
6244  #define MICHAEL_MIC_TEST_VECTORS 6
6245  
6246 -static struct hash_testvec michael_mic_tv_template[] = {
6247 +static const struct hash_testvec michael_mic_tv_template[] = {
6248         {
6249                 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
6250                 .ksize = 8,
6251 @@ -33520,9 +33557,7 @@ static struct hash_testvec michael_mic_t
6252  /*
6253   * CRC32 test vectors
6254   */
6255 -#define CRC32_TEST_VECTORS 14
6256 -
6257 -static struct hash_testvec crc32_tv_template[] = {
6258 +static const struct hash_testvec crc32_tv_template[] = {
6259         {
6260                 .key = "\x87\xa9\xcb\xed",
6261                 .ksize = 4,
6262 @@ -33954,9 +33989,7 @@ static struct hash_testvec crc32_tv_temp
6263  /*
6264   * CRC32C test vectors
6265   */
6266 -#define CRC32C_TEST_VECTORS 15
6267 -
6268 -static struct hash_testvec crc32c_tv_template[] = {
6269 +static const struct hash_testvec crc32c_tv_template[] = {
6270         {
6271                 .psize = 0,
6272                 .digest = "\x00\x00\x00\x00",
6273 @@ -34392,9 +34425,7 @@ static struct hash_testvec crc32c_tv_tem
6274  /*
6275   * Blakcifn CRC test vectors
6276   */
6277 -#define BFIN_CRC_TEST_VECTORS 6
6278 -
6279 -static struct hash_testvec bfin_crc_tv_template[] = {
6280 +static const struct hash_testvec bfin_crc_tv_template[] = {
6281         {
6282                 .psize = 0,
6283                 .digest = "\x00\x00\x00\x00",
6284 @@ -34479,9 +34510,6 @@ static struct hash_testvec bfin_crc_tv_t
6285  
6286  };
6287  
6288 -#define LZ4_COMP_TEST_VECTORS 1
6289 -#define LZ4_DECOMP_TEST_VECTORS 1
6290 -
6291  static struct comp_testvec lz4_comp_tv_template[] = {
6292         {
6293                 .inlen  = 70,
6294 @@ -34512,9 +34540,6 @@ static struct comp_testvec lz4_decomp_tv
6295         },
6296  };
6297  
6298 -#define LZ4HC_COMP_TEST_VECTORS 1
6299 -#define LZ4HC_DECOMP_TEST_VECTORS 1
6300 -
6301  static struct comp_testvec lz4hc_comp_tv_template[] = {
6302         {
6303                 .inlen  = 70,
6304 --- /dev/null
6305 +++ b/crypto/tls.c
6306 @@ -0,0 +1,607 @@
6307 +/*
6308 + * Copyright 2013 Freescale Semiconductor, Inc.
6309 + * Copyright 2017 NXP Semiconductor, Inc.
6310 + *
6311 + * This program is free software; you can redistribute it and/or modify it
6312 + * under the terms of the GNU General Public License as published by the Free
6313 + * Software Foundation; either version 2 of the License, or (at your option)
6314 + * any later version.
6315 + *
6316 + */
6317 +
6318 +#include <crypto/internal/aead.h>
6319 +#include <crypto/internal/hash.h>
6320 +#include <crypto/internal/skcipher.h>
6321 +#include <crypto/authenc.h>
6322 +#include <crypto/null.h>
6323 +#include <crypto/scatterwalk.h>
6324 +#include <linux/err.h>
6325 +#include <linux/init.h>
6326 +#include <linux/module.h>
6327 +#include <linux/rtnetlink.h>
6328 +
6329 +struct tls_instance_ctx {
6330 +       struct crypto_ahash_spawn auth;
6331 +       struct crypto_skcipher_spawn enc;
6332 +};
6333 +
6334 +struct crypto_tls_ctx {
6335 +       unsigned int reqoff;
6336 +       struct crypto_ahash *auth;
6337 +       struct crypto_skcipher *enc;
6338 +       struct crypto_skcipher *null;
6339 +};
6340 +
6341 +struct tls_request_ctx {
6342 +       /*
6343 +        * cryptlen holds the payload length in the case of encryption or
6344 +        * payload_len + icv_len + padding_len in case of decryption
6345 +        */
6346 +       unsigned int cryptlen;
6347 +       /* working space for partial results */
6348 +       struct scatterlist tmp[2];
6349 +       struct scatterlist cipher[2];
6350 +       struct scatterlist dst[2];
6351 +       char tail[];
6352 +};
6353 +
6354 +struct async_op {
6355 +       struct completion completion;
6356 +       int err;
6357 +};
6358 +
6359 +static void tls_async_op_done(struct crypto_async_request *req, int err)
6360 +{
6361 +       struct async_op *areq = req->data;
6362 +
6363 +       if (err == -EINPROGRESS)
6364 +               return;
6365 +
6366 +       areq->err = err;
6367 +       complete(&areq->completion);
6368 +}
6369 +
6370 +static int crypto_tls_setkey(struct crypto_aead *tls, const u8 *key,
6371 +                            unsigned int keylen)
6372 +{
6373 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6374 +       struct crypto_ahash *auth = ctx->auth;
6375 +       struct crypto_skcipher *enc = ctx->enc;
6376 +       struct crypto_authenc_keys keys;
6377 +       int err = -EINVAL;
6378 +
6379 +       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
6380 +               goto badkey;
6381 +
6382 +       crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
6383 +       crypto_ahash_set_flags(auth, crypto_aead_get_flags(tls) &
6384 +                                   CRYPTO_TFM_REQ_MASK);
6385 +       err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
6386 +       crypto_aead_set_flags(tls, crypto_ahash_get_flags(auth) &
6387 +                                      CRYPTO_TFM_RES_MASK);
6388 +
6389 +       if (err)
6390 +               goto out;
6391 +
6392 +       crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
6393 +       crypto_skcipher_set_flags(enc, crypto_aead_get_flags(tls) &
6394 +                                        CRYPTO_TFM_REQ_MASK);
6395 +       err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
6396 +       crypto_aead_set_flags(tls, crypto_skcipher_get_flags(enc) &
6397 +                                      CRYPTO_TFM_RES_MASK);
6398 +
6399 +out:
6400 +       return err;
6401 +
6402 +badkey:
6403 +       crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
6404 +       goto out;
6405 +}
6406 +
6407 +/**
6408 + * crypto_tls_genicv - Calculate hmac digest for a TLS record
6409 + * @hash:      (output) buffer to save the digest into
6410 + * @src:       (input) scatterlist with the assoc and payload data
6411 + * @srclen:    (input) size of the source buffer (assoclen + cryptlen)
6412 + * @req:       (input) aead request
6413 + **/
6414 +static int crypto_tls_genicv(u8 *hash, struct scatterlist *src,
6415 +                            unsigned int srclen, struct aead_request *req)
6416 +{
6417 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
6418 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6419 +       struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6420 +       struct async_op ahash_op;
6421 +       struct ahash_request *ahreq = (void *)(treq_ctx->tail + ctx->reqoff);
6422 +       unsigned int flags = CRYPTO_TFM_REQ_MAY_SLEEP;
6423 +       int err = -EBADMSG;
6424 +
6425 +        /* Bail out if the request assoc len is 0 */
6426 +       if (!req->assoclen)
6427 +               return err;
6428 +
6429 +       init_completion(&ahash_op.completion);
6430 +
6431 +       /* the hash transform to be executed comes from the original request */
6432 +       ahash_request_set_tfm(ahreq, ctx->auth);
6433 +       /* prepare the hash request with input data and result pointer */
6434 +       ahash_request_set_crypt(ahreq, src, hash, srclen);
6435 +       /* set the notifier for when the async hash function returns */
6436 +       ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
6437 +                                  tls_async_op_done, &ahash_op);
6438 +
6439 +       /* Calculate the digest on the given data. The result is put in hash */
6440 +       err = crypto_ahash_digest(ahreq);
6441 +       if (err == -EINPROGRESS) {
6442 +               err = wait_for_completion_interruptible(&ahash_op.completion);
6443 +               if (!err)
6444 +                       err = ahash_op.err;
6445 +       }
6446 +
6447 +       return err;
6448 +}
6449 +
6450 +/**
6451 + * crypto_tls_gen_padicv - Calculate and pad hmac digest for a TLS record
6452 + * @hash:      (output) buffer to save the digest and padding into
6453 + * @phashlen:  (output) the size of digest + padding
6454 + * @req:       (input) aead request
6455 + **/
6456 +static int crypto_tls_gen_padicv(u8 *hash, unsigned int *phashlen,
6457 +                                struct aead_request *req)
6458 +{
6459 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
6460 +       unsigned int hash_size = crypto_aead_authsize(tls);
6461 +       unsigned int block_size = crypto_aead_blocksize(tls);
6462 +       unsigned int srclen = req->cryptlen + hash_size;
6463 +       unsigned int icvlen = req->cryptlen + req->assoclen;
6464 +       unsigned int padlen;
6465 +       int err;
6466 +
6467 +       err = crypto_tls_genicv(hash, req->src, icvlen, req);
6468 +       if (err)
6469 +               goto out;
6470 +
6471 +       /* add padding after digest */
6472 +       padlen = block_size - (srclen % block_size);
6473 +       memset(hash + hash_size, padlen - 1, padlen);
6474 +
6475 +       *phashlen = hash_size + padlen;
6476 +out:
6477 +       return err;
6478 +}
6479 +
6480 +static int crypto_tls_copy_data(struct aead_request *req,
6481 +                               struct scatterlist *src,
6482 +                               struct scatterlist *dst,
6483 +                               unsigned int len)
6484 +{
6485 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
6486 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6487 +       SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
6488 +
6489 +       skcipher_request_set_tfm(skreq, ctx->null);
6490 +       skcipher_request_set_callback(skreq, aead_request_flags(req),
6491 +                                     NULL, NULL);
6492 +       skcipher_request_set_crypt(skreq, src, dst, len, NULL);
6493 +
6494 +       return crypto_skcipher_encrypt(skreq);
6495 +}
6496 +
6497 +static int crypto_tls_encrypt(struct aead_request *req)
6498 +{
6499 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
6500 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6501 +       struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6502 +       struct skcipher_request *skreq;
6503 +       struct scatterlist *cipher = treq_ctx->cipher;
6504 +       struct scatterlist *tmp = treq_ctx->tmp;
6505 +       struct scatterlist *sg, *src, *dst;
6506 +       unsigned int cryptlen, phashlen;
6507 +       u8 *hash = treq_ctx->tail;
6508 +       int err;
6509 +
6510 +       /*
6511 +        * The hash result is saved at the beginning of the tls request ctx
6512 +        * and is aligned as required by the hash transform. Enough space was
6513 +        * allocated in crypto_tls_init_tfm to accommodate the difference. The
6514 +        * requests themselves start later at treq_ctx->tail + ctx->reqoff so
6515 +        * the result is not overwritten by the second (cipher) request.
6516 +        */
6517 +       hash = (u8 *)ALIGN((unsigned long)hash +
6518 +                          crypto_ahash_alignmask(ctx->auth),
6519 +                          crypto_ahash_alignmask(ctx->auth) + 1);
6520 +
6521 +       /*
6522 +        * STEP 1: create ICV together with necessary padding
6523 +        */
6524 +       err = crypto_tls_gen_padicv(hash, &phashlen, req);
6525 +       if (err)
6526 +               return err;
6527 +
6528 +       /*
6529 +        * STEP 2: Hash and padding are combined with the payload
6530 +        * depending on the form it arrives. Scatter tables must have at least
6531 +        * one page of data before chaining with another table and can't have
6532 +        * an empty data page. The following code addresses these requirements.
6533 +        *
6534 +        * If the payload is empty, only the hash is encrypted, otherwise the
6535 +        * payload scatterlist is merged with the hash. A special merging case
6536 +        * is when the payload has only one page of data. In that case the
6537 +        * payload page is moved to another scatterlist and prepared there for
6538 +        * encryption.
6539 +        */
6540 +       if (req->cryptlen) {
6541 +               src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
6542 +
6543 +               sg_init_table(cipher, 2);
6544 +               sg_set_buf(cipher + 1, hash, phashlen);
6545 +
6546 +               if (sg_is_last(src)) {
6547 +                       sg_set_page(cipher, sg_page(src), req->cryptlen,
6548 +                                   src->offset);
6549 +                       src = cipher;
6550 +               } else {
6551 +                       unsigned int rem_len = req->cryptlen;
6552 +
6553 +                       for (sg = src; rem_len > sg->length; sg = sg_next(sg))
6554 +                               rem_len -= min(rem_len, sg->length);
6555 +
6556 +                       sg_set_page(cipher, sg_page(sg), rem_len, sg->offset);
6557 +                       sg_chain(sg, 1, cipher);
6558 +               }
6559 +       } else {
6560 +               sg_init_one(cipher, hash, phashlen);
6561 +               src = cipher;
6562 +       }
6563 +
6564 +       /**
6565 +        * If src != dst copy the associated data from source to destination.
6566 +        * In both cases fast-forward passed the associated data in the dest.
6567 +        */
6568 +       if (req->src != req->dst) {
6569 +               err = crypto_tls_copy_data(req, req->src, req->dst,
6570 +                                          req->assoclen);
6571 +               if (err)
6572 +                       return err;
6573 +       }
6574 +       dst = scatterwalk_ffwd(treq_ctx->dst, req->dst, req->assoclen);
6575 +
6576 +       /*
6577 +        * STEP 3: encrypt the frame and return the result
6578 +        */
6579 +       cryptlen = req->cryptlen + phashlen;
6580 +
6581 +       /*
6582 +        * The hash and the cipher are applied at different times and their
6583 +        * requests can use the same memory space without interference
6584 +        */
6585 +       skreq = (void *)(treq_ctx->tail + ctx->reqoff);
6586 +       skcipher_request_set_tfm(skreq, ctx->enc);
6587 +       skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
6588 +       skcipher_request_set_callback(skreq, aead_request_flags(req),
6589 +                                     req->base.complete, req->base.data);
6590 +       /*
6591 +        * Apply the cipher transform. The result will be in req->dst when the
6592 +        * asynchronuous call terminates
6593 +        */
6594 +       err = crypto_skcipher_encrypt(skreq);
6595 +
6596 +       return err;
6597 +}
6598 +
6599 +static int crypto_tls_decrypt(struct aead_request *req)
6600 +{
6601 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
6602 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6603 +       struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6604 +       unsigned int cryptlen = req->cryptlen;
6605 +       unsigned int hash_size = crypto_aead_authsize(tls);
6606 +       unsigned int block_size = crypto_aead_blocksize(tls);
6607 +       struct skcipher_request *skreq = (void *)(treq_ctx->tail + ctx->reqoff);
6608 +       struct scatterlist *tmp = treq_ctx->tmp;
6609 +       struct scatterlist *src, *dst;
6610 +
6611 +       u8 padding[255]; /* padding can be 0-255 bytes */
6612 +       u8 pad_size;
6613 +       u16 *len_field;
6614 +       u8 *ihash, *hash = treq_ctx->tail;
6615 +
6616 +       int paderr = 0;
6617 +       int err = -EINVAL;
6618 +       int i;
6619 +       struct async_op ciph_op;
6620 +
6621 +       /*
6622 +        * Rule out bad packets. The input packet length must be at least one
6623 +        * byte more than the hash_size
6624 +        */
6625 +       if (cryptlen <= hash_size || cryptlen % block_size)
6626 +               goto out;
6627 +
6628 +       /*
6629 +        * Step 1 - Decrypt the source. Fast-forward past the associated data
6630 +        * to the encrypted data. The result will be overwritten in place so
6631 +        * that the decrypted data will be adjacent to the associated data. The
6632 +        * last step (computing the hash) will have it's input data already
6633 +        * prepared and ready to be accessed at req->src.
6634 +        */
6635 +       src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
6636 +       dst = src;
6637 +
6638 +       init_completion(&ciph_op.completion);
6639 +       skcipher_request_set_tfm(skreq, ctx->enc);
6640 +       skcipher_request_set_callback(skreq, aead_request_flags(req),
6641 +                                     tls_async_op_done, &ciph_op);
6642 +       skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
6643 +       err = crypto_skcipher_decrypt(skreq);
6644 +       if (err == -EINPROGRESS) {
6645 +               err = wait_for_completion_interruptible(&ciph_op.completion);
6646 +               if (!err)
6647 +                       err = ciph_op.err;
6648 +       }
6649 +       if (err)
6650 +               goto out;
6651 +
6652 +       /*
6653 +        * Step 2 - Verify padding
6654 +        * Retrieve the last byte of the payload; this is the padding size.
6655 +        */
6656 +       cryptlen -= 1;
6657 +       scatterwalk_map_and_copy(&pad_size, dst, cryptlen, 1, 0);
6658 +
6659 +       /* RFC recommendation for invalid padding size. */
6660 +       if (cryptlen < pad_size + hash_size) {
6661 +               pad_size = 0;
6662 +               paderr = -EBADMSG;
6663 +       }
6664 +       cryptlen -= pad_size;
6665 +       scatterwalk_map_and_copy(padding, dst, cryptlen, pad_size, 0);
6666 +
6667 +       /* Padding content must be equal with pad_size. We verify it all */
6668 +       for (i = 0; i < pad_size; i++)
6669 +               if (padding[i] != pad_size)
6670 +                       paderr = -EBADMSG;
6671 +
6672 +       /*
6673 +        * Step 3 - Verify hash
6674 +        * Align the digest result as required by the hash transform. Enough
6675 +        * space was allocated in crypto_tls_init_tfm
6676 +        */
6677 +       hash = (u8 *)ALIGN((unsigned long)hash +
6678 +                          crypto_ahash_alignmask(ctx->auth),
6679 +                          crypto_ahash_alignmask(ctx->auth) + 1);
6680 +       /*
6681 +        * Two bytes at the end of the associated data make the length field.
6682 +        * It must be updated with the length of the cleartext message before
6683 +        * the hash is calculated.
6684 +        */
6685 +       len_field = sg_virt(req->src) + req->assoclen - 2;
6686 +       cryptlen -= hash_size;
6687 +       *len_field = htons(cryptlen);
6688 +
6689 +       /* This is the hash from the decrypted packet. Save it for later */
6690 +       ihash = hash + hash_size;
6691 +       scatterwalk_map_and_copy(ihash, dst, cryptlen, hash_size, 0);
6692 +
6693 +       /* Now compute and compare our ICV with the one from the packet */
6694 +       err = crypto_tls_genicv(hash, req->src, cryptlen + req->assoclen, req);
6695 +       if (!err)
6696 +               err = memcmp(hash, ihash, hash_size) ? -EBADMSG : 0;
6697 +
6698 +       if (req->src != req->dst) {
6699 +               err = crypto_tls_copy_data(req, req->src, req->dst, cryptlen +
6700 +                                          req->assoclen);
6701 +               if (err)
6702 +                       goto out;
6703 +       }
6704 +
6705 +       /* return the first found error */
6706 +       if (paderr)
6707 +               err = paderr;
6708 +
6709 +out:
6710 +       aead_request_complete(req, err);
6711 +       return err;
6712 +}
6713 +
6714 +static int crypto_tls_init_tfm(struct crypto_aead *tfm)
6715 +{
6716 +       struct aead_instance *inst = aead_alg_instance(tfm);
6717 +       struct tls_instance_ctx *ictx = aead_instance_ctx(inst);
6718 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
6719 +       struct crypto_ahash *auth;
6720 +       struct crypto_skcipher *enc;
6721 +       struct crypto_skcipher *null;
6722 +       int err;
6723 +
6724 +       auth = crypto_spawn_ahash(&ictx->auth);
6725 +       if (IS_ERR(auth))
6726 +               return PTR_ERR(auth);
6727 +
6728 +       enc = crypto_spawn_skcipher(&ictx->enc);
6729 +       err = PTR_ERR(enc);
6730 +       if (IS_ERR(enc))
6731 +               goto err_free_ahash;
6732 +
6733 +       null = crypto_get_default_null_skcipher2();
6734 +       err = PTR_ERR(null);
6735 +       if (IS_ERR(null))
6736 +               goto err_free_skcipher;
6737 +
6738 +       ctx->auth = auth;
6739 +       ctx->enc = enc;
6740 +       ctx->null = null;
6741 +
6742 +       /*
6743 +        * Allow enough space for two digests. The two digests will be compared
6744 +        * during the decryption phase. One will come from the decrypted packet
6745 +        * and the other will be calculated. For encryption, one digest is
6746 +        * padded (up to a cipher blocksize) and chained with the payload
6747 +        */
6748 +       ctx->reqoff = ALIGN(crypto_ahash_digestsize(auth) +
6749 +                           crypto_ahash_alignmask(auth),
6750 +                           crypto_ahash_alignmask(auth) + 1) +
6751 +                           max(crypto_ahash_digestsize(auth),
6752 +                               crypto_skcipher_blocksize(enc));
6753 +
6754 +       crypto_aead_set_reqsize(tfm,
6755 +                               sizeof(struct tls_request_ctx) +
6756 +                               ctx->reqoff +
6757 +                               max_t(unsigned int,
6758 +                                     crypto_ahash_reqsize(auth) +
6759 +                                     sizeof(struct ahash_request),
6760 +                                     crypto_skcipher_reqsize(enc) +
6761 +                                     sizeof(struct skcipher_request)));
6762 +
6763 +       return 0;
6764 +
6765 +err_free_skcipher:
6766 +       crypto_free_skcipher(enc);
6767 +err_free_ahash:
6768 +       crypto_free_ahash(auth);
6769 +       return err;
6770 +}
6771 +
6772 +static void crypto_tls_exit_tfm(struct crypto_aead *tfm)
6773 +{
6774 +       struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
6775 +
6776 +       crypto_free_ahash(ctx->auth);
6777 +       crypto_free_skcipher(ctx->enc);
6778 +       crypto_put_default_null_skcipher2();
6779 +}
6780 +
6781 +static void crypto_tls_free(struct aead_instance *inst)
6782 +{
6783 +       struct tls_instance_ctx *ctx = aead_instance_ctx(inst);
6784 +
6785 +       crypto_drop_skcipher(&ctx->enc);
6786 +       crypto_drop_ahash(&ctx->auth);
6787 +       kfree(inst);
6788 +}
6789 +
6790 +static int crypto_tls_create(struct crypto_template *tmpl, struct rtattr **tb)
6791 +{
6792 +       struct crypto_attr_type *algt;
6793 +       struct aead_instance *inst;
6794 +       struct hash_alg_common *auth;
6795 +       struct crypto_alg *auth_base;
6796 +       struct skcipher_alg *enc;
6797 +       struct tls_instance_ctx *ctx;
6798 +       const char *enc_name;
6799 +       int err;
6800 +
6801 +       algt = crypto_get_attr_type(tb);
6802 +       if (IS_ERR(algt))
6803 +               return PTR_ERR(algt);
6804 +
6805 +       if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
6806 +               return -EINVAL;
6807 +
6808 +       auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
6809 +                             CRYPTO_ALG_TYPE_AHASH_MASK |
6810 +                             crypto_requires_sync(algt->type, algt->mask));
6811 +       if (IS_ERR(auth))
6812 +               return PTR_ERR(auth);
6813 +
6814 +       auth_base = &auth->base;
6815 +
6816 +       enc_name = crypto_attr_alg_name(tb[2]);
6817 +       err = PTR_ERR(enc_name);
6818 +       if (IS_ERR(enc_name))
6819 +               goto out_put_auth;
6820 +
6821 +       inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
6822 +       err = -ENOMEM;
6823 +       if (!inst)
6824 +               goto out_put_auth;
6825 +
6826 +       ctx = aead_instance_ctx(inst);
6827 +
6828 +       err = crypto_init_ahash_spawn(&ctx->auth, auth,
6829 +                                     aead_crypto_instance(inst));
6830 +       if (err)
6831 +               goto err_free_inst;
6832 +
6833 +       crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
6834 +       err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
6835 +                                  crypto_requires_sync(algt->type,
6836 +                                                       algt->mask));
6837 +       if (err)
6838 +               goto err_drop_auth;
6839 +
6840 +       enc = crypto_spawn_skcipher_alg(&ctx->enc);
6841 +
6842 +       err = -ENAMETOOLONG;
6843 +       if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
6844 +                    "tls10(%s,%s)", auth_base->cra_name,
6845 +                    enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
6846 +               goto err_drop_enc;
6847 +
6848 +       if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
6849 +                    "tls10(%s,%s)", auth_base->cra_driver_name,
6850 +                    enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
6851 +               goto err_drop_enc;
6852 +
6853 +       inst->alg.base.cra_flags = (auth_base->cra_flags |
6854 +                                       enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
6855 +       inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
6856 +                                       auth_base->cra_priority;
6857 +       inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
6858 +       inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
6859 +                                       enc->base.cra_alignmask;
6860 +       inst->alg.base.cra_ctxsize = sizeof(struct crypto_tls_ctx);
6861 +
6862 +       inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
6863 +       inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
6864 +       inst->alg.maxauthsize = auth->digestsize;
6865 +
6866 +       inst->alg.init = crypto_tls_init_tfm;
6867 +       inst->alg.exit = crypto_tls_exit_tfm;
6868 +
6869 +       inst->alg.setkey = crypto_tls_setkey;
6870 +       inst->alg.encrypt = crypto_tls_encrypt;
6871 +       inst->alg.decrypt = crypto_tls_decrypt;
6872 +
6873 +       inst->free = crypto_tls_free;
6874 +
6875 +       err = aead_register_instance(tmpl, inst);
6876 +       if (err)
6877 +               goto err_drop_enc;
6878 +
6879 +out:
6880 +       crypto_mod_put(auth_base);
6881 +       return err;
6882 +
6883 +err_drop_enc:
6884 +       crypto_drop_skcipher(&ctx->enc);
6885 +err_drop_auth:
6886 +       crypto_drop_ahash(&ctx->auth);
6887 +err_free_inst:
6888 +       kfree(inst);
6889 +out_put_auth:
6890 +       goto out;
6891 +}
6892 +
6893 +static struct crypto_template crypto_tls_tmpl = {
6894 +       .name = "tls10",
6895 +       .create = crypto_tls_create,
6896 +       .module = THIS_MODULE,
6897 +};
6898 +
6899 +static int __init crypto_tls_module_init(void)
6900 +{
6901 +       return crypto_register_template(&crypto_tls_tmpl);
6902 +}
6903 +
6904 +static void __exit crypto_tls_module_exit(void)
6905 +{
6906 +       crypto_unregister_template(&crypto_tls_tmpl);
6907 +}
6908 +
6909 +module_init(crypto_tls_module_init);
6910 +module_exit(crypto_tls_module_exit);
6911 +
6912 +MODULE_LICENSE("GPL");
6913 +MODULE_DESCRIPTION("TLS 1.0 record encryption");
6914 --- a/drivers/crypto/caam/Kconfig
6915 +++ b/drivers/crypto/caam/Kconfig
6916 @@ -1,6 +1,11 @@
6917 +config CRYPTO_DEV_FSL_CAAM_COMMON
6918 +       tristate
6919 +
6920  config CRYPTO_DEV_FSL_CAAM
6921 -       tristate "Freescale CAAM-Multicore driver backend"
6922 +       tristate "Freescale CAAM-Multicore platform driver backend"
6923         depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
6924 +       select CRYPTO_DEV_FSL_CAAM_COMMON
6925 +       select SOC_BUS
6926         help
6927           Enables the driver module for Freescale's Cryptographic Accelerator
6928           and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
6929 @@ -11,9 +16,16 @@ config CRYPTO_DEV_FSL_CAAM
6930           To compile this driver as a module, choose M here: the module
6931           will be called caam.
6932  
6933 +if CRYPTO_DEV_FSL_CAAM
6934 +
6935 +config CRYPTO_DEV_FSL_CAAM_DEBUG
6936 +       bool "Enable debug output in CAAM driver"
6937 +       help
6938 +         Selecting this will enable printing of various debug
6939 +         information in the CAAM driver.
6940 +
6941  config CRYPTO_DEV_FSL_CAAM_JR
6942         tristate "Freescale CAAM Job Ring driver backend"
6943 -       depends on CRYPTO_DEV_FSL_CAAM
6944         default y
6945         help
6946           Enables the driver module for Job Rings which are part of
6947 @@ -24,9 +36,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
6948           To compile this driver as a module, choose M here: the module
6949           will be called caam_jr.
6950  
6951 +if CRYPTO_DEV_FSL_CAAM_JR
6952 +
6953  config CRYPTO_DEV_FSL_CAAM_RINGSIZE
6954         int "Job Ring size"
6955 -       depends on CRYPTO_DEV_FSL_CAAM_JR
6956         range 2 9
6957         default "9"
6958         help
6959 @@ -44,7 +57,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
6960  
6961  config CRYPTO_DEV_FSL_CAAM_INTC
6962         bool "Job Ring interrupt coalescing"
6963 -       depends on CRYPTO_DEV_FSL_CAAM_JR
6964         help
6965           Enable the Job Ring's interrupt coalescing feature.
6966  
6967 @@ -74,7 +86,6 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THL
6968  
6969  config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
6970         tristate "Register algorithm implementations with the Crypto API"
6971 -       depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
6972         default y
6973         select CRYPTO_AEAD
6974         select CRYPTO_AUTHENC
6975 @@ -87,9 +98,25 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
6976           To compile this as a module, choose M here: the module
6977           will be called caamalg.
6978  
6979 +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
6980 +       tristate "Queue Interface as Crypto API backend"
6981 +       depends on FSL_SDK_DPA && NET
6982 +       default y
6983 +       select CRYPTO_AUTHENC
6984 +       select CRYPTO_BLKCIPHER
6985 +       help
6986 +         Selecting this will use CAAM Queue Interface (QI) for sending
6987 +         & receiving crypto jobs to/from CAAM. This gives better performance
6988 +         than job ring interface when the number of cores are more than the
6989 +         number of job rings assigned to the kernel. The number of portals
6990 +         assigned to the kernel should also be more than the number of
6991 +         job rings.
6992 +
6993 +         To compile this as a module, choose M here: the module
6994 +         will be called caamalg_qi.
6995 +
6996  config CRYPTO_DEV_FSL_CAAM_AHASH_API
6997         tristate "Register hash algorithm implementations with Crypto API"
6998 -       depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
6999         default y
7000         select CRYPTO_HASH
7001         help
7002 @@ -101,7 +128,6 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
7003  
7004  config CRYPTO_DEV_FSL_CAAM_PKC_API
7005          tristate "Register public key cryptography implementations with Crypto API"
7006 -        depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
7007          default y
7008          select CRYPTO_RSA
7009          help
7010 @@ -113,7 +139,6 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
7011  
7012  config CRYPTO_DEV_FSL_CAAM_RNG_API
7013         tristate "Register caam device for hwrng API"
7014 -       depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
7015         default y
7016         select CRYPTO_RNG
7017         select HW_RANDOM
7018 @@ -124,13 +149,31 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
7019           To compile this as a module, choose M here: the module
7020           will be called caamrng.
7021  
7022 -config CRYPTO_DEV_FSL_CAAM_IMX
7023 -       def_bool SOC_IMX6 || SOC_IMX7D
7024 -       depends on CRYPTO_DEV_FSL_CAAM
7025 +endif # CRYPTO_DEV_FSL_CAAM_JR
7026  
7027 -config CRYPTO_DEV_FSL_CAAM_DEBUG
7028 -       bool "Enable debug output in CAAM driver"
7029 -       depends on CRYPTO_DEV_FSL_CAAM
7030 -       help
7031 -         Selecting this will enable printing of various debug
7032 -         information in the CAAM driver.
7033 +endif # CRYPTO_DEV_FSL_CAAM
7034 +
7035 +config CRYPTO_DEV_FSL_DPAA2_CAAM
7036 +       tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
7037 +       depends on FSL_MC_DPIO
7038 +       select CRYPTO_DEV_FSL_CAAM_COMMON
7039 +       select CRYPTO_BLKCIPHER
7040 +       select CRYPTO_AUTHENC
7041 +       select CRYPTO_AEAD
7042 +       select CRYPTO_HASH
7043 +       ---help---
7044 +         CAAM driver for QorIQ Data Path Acceleration Architecture 2.
7045 +         It handles DPSECI DPAA2 objects that sit on the Management Complex
7046 +         (MC) fsl-mc bus.
7047 +
7048 +         To compile this as a module, choose M here: the module
7049 +         will be called dpaa2_caam.
7050 +
7051 +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
7052 +       def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
7053 +                     CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
7054 +                     CRYPTO_DEV_FSL_DPAA2_CAAM)
7055 +
7056 +config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
7057 +       def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \
7058 +                     CRYPTO_DEV_FSL_DPAA2_CAAM)
7059 --- a/drivers/crypto/caam/Makefile
7060 +++ b/drivers/crypto/caam/Makefile
7061 @@ -5,13 +5,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG
7062         ccflags-y := -DDEBUG
7063  endif
7064  
7065 +ccflags-y += -DVERSION=\"\"
7066 +
7067 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
7068  obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
7069  obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
7070  obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
7071 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
7072 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
7073  obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
7074 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
7075  obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
7076  obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
7077  
7078  caam-objs := ctrl.o
7079 -caam_jr-objs := jr.o key_gen.o error.o
7080 +caam_jr-objs := jr.o key_gen.o
7081  caam_pkc-y := caampkc.o pkc_desc.o
7082 +ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
7083 +       ccflags-y += -DCONFIG_CAAM_QI
7084 +       caam-objs += qi.o
7085 +endif
7086 +
7087 +obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
7088 +
7089 +dpaa2_caam-y    := caamalg_qi2.o dpseci.o
7090 --- a/drivers/crypto/caam/caamalg.c
7091 +++ b/drivers/crypto/caam/caamalg.c
7092 @@ -2,6 +2,7 @@
7093   * caam - Freescale FSL CAAM support for crypto API
7094   *
7095   * Copyright 2008-2011 Freescale Semiconductor, Inc.
7096 + * Copyright 2016 NXP
7097   *
7098   * Based on talitos crypto API driver.
7099   *
7100 @@ -53,6 +54,7 @@
7101  #include "error.h"
7102  #include "sg_sw_sec4.h"
7103  #include "key_gen.h"
7104 +#include "caamalg_desc.h"
7105  
7106  /*
7107   * crypto alg
7108 @@ -62,8 +64,6 @@
7109  #define CAAM_MAX_KEY_SIZE              (AES_MAX_KEY_SIZE + \
7110                                          CTR_RFC3686_NONCE_SIZE + \
7111                                          SHA512_DIGEST_SIZE * 2)
7112 -/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
7113 -#define CAAM_MAX_IV_LENGTH             16
7114  
7115  #define AEAD_DESC_JOB_IO_LEN           (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
7116  #define GCM_DESC_JOB_IO_LEN            (AEAD_DESC_JOB_IO_LEN + \
7117 @@ -71,37 +71,6 @@
7118  #define AUTHENC_DESC_JOB_IO_LEN                (AEAD_DESC_JOB_IO_LEN + \
7119                                          CAAM_CMD_SZ * 5)
7120  
7121 -/* length of descriptors text */
7122 -#define DESC_AEAD_BASE                 (4 * CAAM_CMD_SZ)
7123 -#define DESC_AEAD_ENC_LEN              (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
7124 -#define DESC_AEAD_DEC_LEN              (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
7125 -#define DESC_AEAD_GIVENC_LEN           (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
7126 -
7127 -/* Note: Nonce is counted in enckeylen */
7128 -#define DESC_AEAD_CTR_RFC3686_LEN      (4 * CAAM_CMD_SZ)
7129 -
7130 -#define DESC_AEAD_NULL_BASE            (3 * CAAM_CMD_SZ)
7131 -#define DESC_AEAD_NULL_ENC_LEN         (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
7132 -#define DESC_AEAD_NULL_DEC_LEN         (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
7133 -
7134 -#define DESC_GCM_BASE                  (3 * CAAM_CMD_SZ)
7135 -#define DESC_GCM_ENC_LEN               (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
7136 -#define DESC_GCM_DEC_LEN               (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
7137 -
7138 -#define DESC_RFC4106_BASE              (3 * CAAM_CMD_SZ)
7139 -#define DESC_RFC4106_ENC_LEN           (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
7140 -#define DESC_RFC4106_DEC_LEN           (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
7141 -
7142 -#define DESC_RFC4543_BASE              (3 * CAAM_CMD_SZ)
7143 -#define DESC_RFC4543_ENC_LEN           (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
7144 -#define DESC_RFC4543_DEC_LEN           (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
7145 -
7146 -#define DESC_ABLKCIPHER_BASE           (3 * CAAM_CMD_SZ)
7147 -#define DESC_ABLKCIPHER_ENC_LEN                (DESC_ABLKCIPHER_BASE + \
7148 -                                        20 * CAAM_CMD_SZ)
7149 -#define DESC_ABLKCIPHER_DEC_LEN                (DESC_ABLKCIPHER_BASE + \
7150 -                                        15 * CAAM_CMD_SZ)
7151 -
7152  #define DESC_MAX_USED_BYTES            (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
7153  #define DESC_MAX_USED_LEN              (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
7154  
7155 @@ -112,47 +81,11 @@
7156  #define debug(format, arg...)
7157  #endif
7158  
7159 -#ifdef DEBUG
7160 -#include <linux/highmem.h>
7161 -
7162 -static void dbg_dump_sg(const char *level, const char *prefix_str,
7163 -                       int prefix_type, int rowsize, int groupsize,
7164 -                       struct scatterlist *sg, size_t tlen, bool ascii,
7165 -                       bool may_sleep)
7166 -{
7167 -       struct scatterlist *it;
7168 -       void *it_page;
7169 -       size_t len;
7170 -       void *buf;
7171 -
7172 -       for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
7173 -               /*
7174 -                * make sure the scatterlist's page
7175 -                * has a valid virtual memory mapping
7176 -                */
7177 -               it_page = kmap_atomic(sg_page(it));
7178 -               if (unlikely(!it_page)) {
7179 -                       printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
7180 -                       return;
7181 -               }
7182 -
7183 -               buf = it_page + it->offset;
7184 -               len = min_t(size_t, tlen, it->length);
7185 -               print_hex_dump(level, prefix_str, prefix_type, rowsize,
7186 -                              groupsize, buf, len, ascii);
7187 -               tlen -= len;
7188 -
7189 -               kunmap_atomic(it_page);
7190 -       }
7191 -}
7192 -#endif
7193 -
7194  static struct list_head alg_list;
7195  
7196  struct caam_alg_entry {
7197         int class1_alg_type;
7198         int class2_alg_type;
7199 -       int alg_op;
7200         bool rfc3686;
7201         bool geniv;
7202  };
7203 @@ -163,302 +96,70 @@ struct caam_aead_alg {
7204         bool registered;
7205  };
7206  
7207 -/* Set DK bit in class 1 operation if shared */
7208 -static inline void append_dec_op1(u32 *desc, u32 type)
7209 -{
7210 -       u32 *jump_cmd, *uncond_jump_cmd;
7211 -
7212 -       /* DK bit is valid only for AES */
7213 -       if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
7214 -               append_operation(desc, type | OP_ALG_AS_INITFINAL |
7215 -                                OP_ALG_DECRYPT);
7216 -               return;
7217 -       }
7218 -
7219 -       jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
7220 -       append_operation(desc, type | OP_ALG_AS_INITFINAL |
7221 -                        OP_ALG_DECRYPT);
7222 -       uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
7223 -       set_jump_tgt_here(desc, jump_cmd);
7224 -       append_operation(desc, type | OP_ALG_AS_INITFINAL |
7225 -                        OP_ALG_DECRYPT | OP_ALG_AAI_DK);
7226 -       set_jump_tgt_here(desc, uncond_jump_cmd);
7227 -}
7228 -
7229 -/*
7230 - * For aead functions, read payload and write payload,
7231 - * both of which are specified in req->src and req->dst
7232 - */
7233 -static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
7234 -{
7235 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7236 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
7237 -                            KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
7238 -}
7239 -
7240 -/*
7241 - * For ablkcipher encrypt and decrypt, read from req->src and
7242 - * write to req->dst
7243 - */
7244 -static inline void ablkcipher_append_src_dst(u32 *desc)
7245 -{
7246 -       append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7247 -       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7248 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
7249 -                            KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
7250 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7251 -}
7252 -
7253  /*
7254   * per-session context
7255   */
7256  struct caam_ctx {
7257 -       struct device *jrdev;
7258         u32 sh_desc_enc[DESC_MAX_USED_LEN];
7259         u32 sh_desc_dec[DESC_MAX_USED_LEN];
7260         u32 sh_desc_givenc[DESC_MAX_USED_LEN];
7261 +       u8 key[CAAM_MAX_KEY_SIZE];
7262         dma_addr_t sh_desc_enc_dma;
7263         dma_addr_t sh_desc_dec_dma;
7264         dma_addr_t sh_desc_givenc_dma;
7265 -       u32 class1_alg_type;
7266 -       u32 class2_alg_type;
7267 -       u32 alg_op;
7268 -       u8 key[CAAM_MAX_KEY_SIZE];
7269         dma_addr_t key_dma;
7270 -       unsigned int enckeylen;
7271 -       unsigned int split_key_len;
7272 -       unsigned int split_key_pad_len;
7273 +       struct device *jrdev;
7274 +       struct alginfo adata;
7275 +       struct alginfo cdata;
7276         unsigned int authsize;
7277  };
7278  
7279 -static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
7280 -                           int keys_fit_inline, bool is_rfc3686)
7281 -{
7282 -       u32 *nonce;
7283 -       unsigned int enckeylen = ctx->enckeylen;
7284 -
7285 -       /*
7286 -        * RFC3686 specific:
7287 -        *      | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
7288 -        *      | enckeylen = encryption key size + nonce size
7289 -        */
7290 -       if (is_rfc3686)
7291 -               enckeylen -= CTR_RFC3686_NONCE_SIZE;
7292 -
7293 -       if (keys_fit_inline) {
7294 -               append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7295 -                                 ctx->split_key_len, CLASS_2 |
7296 -                                 KEY_DEST_MDHA_SPLIT | KEY_ENC);
7297 -               append_key_as_imm(desc, (void *)ctx->key +
7298 -                                 ctx->split_key_pad_len, enckeylen,
7299 -                                 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7300 -       } else {
7301 -               append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7302 -                          KEY_DEST_MDHA_SPLIT | KEY_ENC);
7303 -               append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
7304 -                          enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7305 -       }
7306 -
7307 -       /* Load Counter into CONTEXT1 reg */
7308 -       if (is_rfc3686) {
7309 -               nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
7310 -                              enckeylen);
7311 -               append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
7312 -                                  LDST_CLASS_IND_CCB |
7313 -                                  LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
7314 -               append_move(desc,
7315 -                           MOVE_SRC_OUTFIFO |
7316 -                           MOVE_DEST_CLASS1CTX |
7317 -                           (16 << MOVE_OFFSET_SHIFT) |
7318 -                           (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
7319 -       }
7320 -}
7321 -
7322 -static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
7323 -                                 int keys_fit_inline, bool is_rfc3686)
7324 -{
7325 -       u32 *key_jump_cmd;
7326 -
7327 -       /* Note: Context registers are saved. */
7328 -       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
7329 -
7330 -       /* Skip if already shared */
7331 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7332 -                                  JUMP_COND_SHRD);
7333 -
7334 -       append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7335 -
7336 -       set_jump_tgt_here(desc, key_jump_cmd);
7337 -}
7338 -
7339  static int aead_null_set_sh_desc(struct crypto_aead *aead)
7340  {
7341         struct caam_ctx *ctx = crypto_aead_ctx(aead);
7342         struct device *jrdev = ctx->jrdev;
7343 -       bool keys_fit_inline = false;
7344 -       u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
7345 +       struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
7346         u32 *desc;
7347 +       int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
7348 +                       ctx->adata.keylen_pad;
7349  
7350         /*
7351          * Job Descriptor and Shared Descriptors
7352          * must all fit into the 64-word Descriptor h/w Buffer
7353          */
7354 -       if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
7355 -           ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
7356 -               keys_fit_inline = true;
7357 +       if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
7358 +               ctx->adata.key_inline = true;
7359 +               ctx->adata.key_virt = ctx->key;
7360 +       } else {
7361 +               ctx->adata.key_inline = false;
7362 +               ctx->adata.key_dma = ctx->key_dma;
7363 +       }
7364  
7365         /* aead_encrypt shared descriptor */
7366         desc = ctx->sh_desc_enc;
7367 -
7368 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
7369 -
7370 -       /* Skip if already shared */
7371 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7372 -                                  JUMP_COND_SHRD);
7373 -       if (keys_fit_inline)
7374 -               append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7375 -                                 ctx->split_key_len, CLASS_2 |
7376 -                                 KEY_DEST_MDHA_SPLIT | KEY_ENC);
7377 -       else
7378 -               append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7379 -                          KEY_DEST_MDHA_SPLIT | KEY_ENC);
7380 -       set_jump_tgt_here(desc, key_jump_cmd);
7381 -
7382 -       /* assoclen + cryptlen = seqinlen */
7383 -       append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
7384 -
7385 -       /* Prepare to read and write cryptlen + assoclen bytes */
7386 -       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7387 -       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7388 -
7389 -       /*
7390 -        * MOVE_LEN opcode is not available in all SEC HW revisions,
7391 -        * thus need to do some magic, i.e. self-patch the descriptor
7392 -        * buffer.
7393 -        */
7394 -       read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
7395 -                                   MOVE_DEST_MATH3 |
7396 -                                   (0x6 << MOVE_LEN_SHIFT));
7397 -       write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
7398 -                                    MOVE_DEST_DESCBUF |
7399 -                                    MOVE_WAITCOMP |
7400 -                                    (0x8 << MOVE_LEN_SHIFT));
7401 -
7402 -       /* Class 2 operation */
7403 -       append_operation(desc, ctx->class2_alg_type |
7404 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7405 -
7406 -       /* Read and write cryptlen bytes */
7407 -       aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
7408 -
7409 -       set_move_tgt_here(desc, read_move_cmd);
7410 -       set_move_tgt_here(desc, write_move_cmd);
7411 -       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7412 -       append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
7413 -                   MOVE_AUX_LS);
7414 -
7415 -       /* Write ICV */
7416 -       append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7417 -                        LDST_SRCDST_BYTE_CONTEXT);
7418 -
7419 -       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7420 -                                             desc_bytes(desc),
7421 -                                             DMA_TO_DEVICE);
7422 -       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7423 -               dev_err(jrdev, "unable to map shared descriptor\n");
7424 -               return -ENOMEM;
7425 -       }
7426 -#ifdef DEBUG
7427 -       print_hex_dump(KERN_ERR,
7428 -                      "aead null enc shdesc@"__stringify(__LINE__)": ",
7429 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
7430 -                      desc_bytes(desc), 1);
7431 -#endif
7432 +       cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
7433 +                                   ctrlpriv->era);
7434 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7435 +                                  desc_bytes(desc), DMA_TO_DEVICE);
7436  
7437         /*
7438          * Job Descriptor and Shared Descriptors
7439          * must all fit into the 64-word Descriptor h/w Buffer
7440          */
7441 -       keys_fit_inline = false;
7442 -       if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
7443 -           ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
7444 -               keys_fit_inline = true;
7445 -
7446 -       desc = ctx->sh_desc_dec;
7447 +       if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
7448 +               ctx->adata.key_inline = true;
7449 +               ctx->adata.key_virt = ctx->key;
7450 +       } else {
7451 +               ctx->adata.key_inline = false;
7452 +               ctx->adata.key_dma = ctx->key_dma;
7453 +       }
7454  
7455         /* aead_decrypt shared descriptor */
7456 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
7457 -
7458 -       /* Skip if already shared */
7459 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7460 -                                  JUMP_COND_SHRD);
7461 -       if (keys_fit_inline)
7462 -               append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7463 -                                 ctx->split_key_len, CLASS_2 |
7464 -                                 KEY_DEST_MDHA_SPLIT | KEY_ENC);
7465 -       else
7466 -               append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7467 -                          KEY_DEST_MDHA_SPLIT | KEY_ENC);
7468 -       set_jump_tgt_here(desc, key_jump_cmd);
7469 -
7470 -       /* Class 2 operation */
7471 -       append_operation(desc, ctx->class2_alg_type |
7472 -                        OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
7473 -
7474 -       /* assoclen + cryptlen = seqoutlen */
7475 -       append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7476 -
7477 -       /* Prepare to read and write cryptlen + assoclen bytes */
7478 -       append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
7479 -       append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
7480 -
7481 -       /*
7482 -        * MOVE_LEN opcode is not available in all SEC HW revisions,
7483 -        * thus need to do some magic, i.e. self-patch the descriptor
7484 -        * buffer.
7485 -        */
7486 -       read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
7487 -                                   MOVE_DEST_MATH2 |
7488 -                                   (0x6 << MOVE_LEN_SHIFT));
7489 -       write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
7490 -                                    MOVE_DEST_DESCBUF |
7491 -                                    MOVE_WAITCOMP |
7492 -                                    (0x8 << MOVE_LEN_SHIFT));
7493 -
7494 -       /* Read and write cryptlen bytes */
7495 -       aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
7496 -
7497 -       /*
7498 -        * Insert a NOP here, since we need at least 4 instructions between
7499 -        * code patching the descriptor buffer and the location being patched.
7500 -        */
7501 -       jump_cmd = append_jump(desc, JUMP_TEST_ALL);
7502 -       set_jump_tgt_here(desc, jump_cmd);
7503 -
7504 -       set_move_tgt_here(desc, read_move_cmd);
7505 -       set_move_tgt_here(desc, write_move_cmd);
7506 -       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7507 -       append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
7508 -                   MOVE_AUX_LS);
7509 -       append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
7510 -
7511 -       /* Load ICV */
7512 -       append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
7513 -                            FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
7514 -
7515 -       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
7516 -                                             desc_bytes(desc),
7517 -                                             DMA_TO_DEVICE);
7518 -       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
7519 -               dev_err(jrdev, "unable to map shared descriptor\n");
7520 -               return -ENOMEM;
7521 -       }
7522 -#ifdef DEBUG
7523 -       print_hex_dump(KERN_ERR,
7524 -                      "aead null dec shdesc@"__stringify(__LINE__)": ",
7525 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
7526 -                      desc_bytes(desc), 1);
7527 -#endif
7528 +       desc = ctx->sh_desc_dec;
7529 +       cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
7530 +                                   ctrlpriv->era);
7531 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
7532 +                                  desc_bytes(desc), DMA_TO_DEVICE);
7533  
7534         return 0;
7535  }
7536 @@ -470,11 +171,12 @@ static int aead_set_sh_desc(struct crypt
7537         unsigned int ivsize = crypto_aead_ivsize(aead);
7538         struct caam_ctx *ctx = crypto_aead_ctx(aead);
7539         struct device *jrdev = ctx->jrdev;
7540 -       bool keys_fit_inline;
7541 -       u32 geniv, moveiv;
7542 +       struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
7543         u32 ctx1_iv_off = 0;
7544 -       u32 *desc;
7545 -       const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
7546 +       u32 *desc, *nonce = NULL;
7547 +       u32 inl_mask;
7548 +       unsigned int data_len[2];
7549 +       const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
7550                                OP_ALG_AAI_CTR_MOD128);
7551         const bool is_rfc3686 = alg->caam.rfc3686;
7552  
7553 @@ -482,7 +184,7 @@ static int aead_set_sh_desc(struct crypt
7554                 return 0;
7555  
7556         /* NULL encryption / decryption */
7557 -       if (!ctx->enckeylen)
7558 +       if (!ctx->cdata.keylen)
7559                 return aead_null_set_sh_desc(aead);
7560  
7561         /*
7562 @@ -497,8 +199,14 @@ static int aead_set_sh_desc(struct crypt
7563          * RFC3686 specific:
7564          *      CONTEXT1[255:128] = {NONCE, IV, COUNTER}
7565          */
7566 -       if (is_rfc3686)
7567 +       if (is_rfc3686) {
7568                 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
7569 +               nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
7570 +                               ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
7571 +       }
7572 +
7573 +       data_len[0] = ctx->adata.keylen_pad;
7574 +       data_len[1] = ctx->cdata.keylen;
7575  
7576         if (alg->caam.geniv)
7577                 goto skip_enc;
7578 @@ -507,146 +215,64 @@ static int aead_set_sh_desc(struct crypt
7579          * Job Descriptor and Shared Descriptors
7580          * must all fit into the 64-word Descriptor h/w Buffer
7581          */
7582 -       keys_fit_inline = false;
7583 -       if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7584 -           ctx->split_key_pad_len + ctx->enckeylen +
7585 -           (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7586 -           CAAM_DESC_BYTES_MAX)
7587 -               keys_fit_inline = true;
7588 -
7589 -       /* aead_encrypt shared descriptor */
7590 -       desc = ctx->sh_desc_enc;
7591 -
7592 -       /* Note: Context registers are saved. */
7593 -       init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7594 -
7595 -       /* Class 2 operation */
7596 -       append_operation(desc, ctx->class2_alg_type |
7597 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7598 -
7599 -       /* Read and write assoclen bytes */
7600 -       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7601 -       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7602 +       if (desc_inline_query(DESC_AEAD_ENC_LEN +
7603 +                             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7604 +                             AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7605 +                             ARRAY_SIZE(data_len)) < 0)
7606 +               return -EINVAL;
7607  
7608 -       /* Skip assoc data */
7609 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7610 +       if (inl_mask & 1)
7611 +               ctx->adata.key_virt = ctx->key;
7612 +       else
7613 +               ctx->adata.key_dma = ctx->key_dma;
7614  
7615 -       /* read assoc before reading payload */
7616 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7617 -                                     FIFOLDST_VLF);
7618 +       if (inl_mask & 2)
7619 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7620 +       else
7621 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7622  
7623 -       /* Load Counter into CONTEXT1 reg */
7624 -       if (is_rfc3686)
7625 -               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7626 -                                    LDST_SRCDST_BYTE_CONTEXT |
7627 -                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7628 -                                     LDST_OFFSET_SHIFT));
7629 -
7630 -       /* Class 1 operation */
7631 -       append_operation(desc, ctx->class1_alg_type |
7632 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7633 -
7634 -       /* Read and write cryptlen bytes */
7635 -       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7636 -       append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7637 -       aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
7638 -
7639 -       /* Write ICV */
7640 -       append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7641 -                        LDST_SRCDST_BYTE_CONTEXT);
7642 +       ctx->adata.key_inline = !!(inl_mask & 1);
7643 +       ctx->cdata.key_inline = !!(inl_mask & 2);
7644  
7645 -       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7646 -                                             desc_bytes(desc),
7647 -                                             DMA_TO_DEVICE);
7648 -       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7649 -               dev_err(jrdev, "unable to map shared descriptor\n");
7650 -               return -ENOMEM;
7651 -       }
7652 -#ifdef DEBUG
7653 -       print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
7654 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
7655 -                      desc_bytes(desc), 1);
7656 -#endif
7657 +       /* aead_encrypt shared descriptor */
7658 +       desc = ctx->sh_desc_enc;
7659 +       cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
7660 +                              ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
7661 +                              false, ctrlpriv->era);
7662 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7663 +                                  desc_bytes(desc), DMA_TO_DEVICE);
7664  
7665  skip_enc:
7666         /*
7667          * Job Descriptor and Shared Descriptors
7668          * must all fit into the 64-word Descriptor h/w Buffer
7669          */
7670 -       keys_fit_inline = false;
7671 -       if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7672 -           ctx->split_key_pad_len + ctx->enckeylen +
7673 -           (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7674 -           CAAM_DESC_BYTES_MAX)
7675 -               keys_fit_inline = true;
7676 -
7677 -       /* aead_decrypt shared descriptor */
7678 -       desc = ctx->sh_desc_dec;
7679 -
7680 -       /* Note: Context registers are saved. */
7681 -       init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7682 -
7683 -       /* Class 2 operation */
7684 -       append_operation(desc, ctx->class2_alg_type |
7685 -                        OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
7686 +       if (desc_inline_query(DESC_AEAD_DEC_LEN +
7687 +                             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7688 +                             AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7689 +                             ARRAY_SIZE(data_len)) < 0)
7690 +               return -EINVAL;
7691  
7692 -       /* Read and write assoclen bytes */
7693 -       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7694 -       if (alg->caam.geniv)
7695 -               append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
7696 +       if (inl_mask & 1)
7697 +               ctx->adata.key_virt = ctx->key;
7698         else
7699 -               append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7700 -
7701 -       /* Skip assoc data */
7702 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7703 -
7704 -       /* read assoc before reading payload */
7705 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7706 -                            KEY_VLF);
7707 -
7708 -       if (alg->caam.geniv) {
7709 -               append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
7710 -                               LDST_SRCDST_BYTE_CONTEXT |
7711 -                               (ctx1_iv_off << LDST_OFFSET_SHIFT));
7712 -               append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
7713 -                           (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
7714 -       }
7715 -
7716 -       /* Load Counter into CONTEXT1 reg */
7717 -       if (is_rfc3686)
7718 -               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7719 -                                    LDST_SRCDST_BYTE_CONTEXT |
7720 -                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7721 -                                     LDST_OFFSET_SHIFT));
7722 +               ctx->adata.key_dma = ctx->key_dma;
7723  
7724 -       /* Choose operation */
7725 -       if (ctr_mode)
7726 -               append_operation(desc, ctx->class1_alg_type |
7727 -                                OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
7728 +       if (inl_mask & 2)
7729 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7730         else
7731 -               append_dec_op1(desc, ctx->class1_alg_type);
7732 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7733  
7734 -       /* Read and write cryptlen bytes */
7735 -       append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7736 -       append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7737 -       aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
7738 -
7739 -       /* Load ICV */
7740 -       append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
7741 -                            FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
7742 +       ctx->adata.key_inline = !!(inl_mask & 1);
7743 +       ctx->cdata.key_inline = !!(inl_mask & 2);
7744  
7745 -       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
7746 -                                             desc_bytes(desc),
7747 -                                             DMA_TO_DEVICE);
7748 -       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
7749 -               dev_err(jrdev, "unable to map shared descriptor\n");
7750 -               return -ENOMEM;
7751 -       }
7752 -#ifdef DEBUG
7753 -       print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
7754 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
7755 -                      desc_bytes(desc), 1);
7756 -#endif
7757 +       /* aead_decrypt shared descriptor */
7758 +       desc = ctx->sh_desc_dec;
7759 +       cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
7760 +                              ctx->authsize, alg->caam.geniv, is_rfc3686,
7761 +                              nonce, ctx1_iv_off, false, ctrlpriv->era);
7762 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
7763 +                                  desc_bytes(desc), DMA_TO_DEVICE);
7764  
7765         if (!alg->caam.geniv)
7766                 goto skip_givenc;
7767 @@ -655,107 +281,32 @@ skip_enc:
7768          * Job Descriptor and Shared Descriptors
7769          * must all fit into the 64-word Descriptor h/w Buffer
7770          */
7771 -       keys_fit_inline = false;
7772 -       if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7773 -           ctx->split_key_pad_len + ctx->enckeylen +
7774 -           (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7775 -           CAAM_DESC_BYTES_MAX)
7776 -               keys_fit_inline = true;
7777 -
7778 -       /* aead_givencrypt shared descriptor */
7779 -       desc = ctx->sh_desc_enc;
7780 -
7781 -       /* Note: Context registers are saved. */
7782 -       init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7783 +       if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
7784 +                             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7785 +                             AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7786 +                             ARRAY_SIZE(data_len)) < 0)
7787 +               return -EINVAL;
7788  
7789 -       if (is_rfc3686)
7790 -               goto copy_iv;
7791 +       if (inl_mask & 1)
7792 +               ctx->adata.key_virt = ctx->key;
7793 +       else
7794 +               ctx->adata.key_dma = ctx->key_dma;
7795  
7796 -       /* Generate IV */
7797 -       geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
7798 -               NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
7799 -               NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
7800 -       append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
7801 -                           LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
7802 -       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7803 -       append_move(desc, MOVE_WAITCOMP |
7804 -                   MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
7805 -                   (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
7806 -                   (ivsize << MOVE_LEN_SHIFT));
7807 -       append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
7808 -
7809 -copy_iv:
7810 -       /* Copy IV to class 1 context */
7811 -       append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
7812 -                   (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
7813 -                   (ivsize << MOVE_LEN_SHIFT));
7814 -
7815 -       /* Return to encryption */
7816 -       append_operation(desc, ctx->class2_alg_type |
7817 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7818 -
7819 -       /* Read and write assoclen bytes */
7820 -       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7821 -       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7822 -
7823 -       /* ivsize + cryptlen = seqoutlen - authsize */
7824 -       append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
7825 -
7826 -       /* Skip assoc data */
7827 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7828 -
7829 -       /* read assoc before reading payload */
7830 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7831 -                            KEY_VLF);
7832 -
7833 -       /* Copy iv from outfifo to class 2 fifo */
7834 -       moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
7835 -                NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
7836 -       append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
7837 -                           LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
7838 -       append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
7839 -                           LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
7840 +       if (inl_mask & 2)
7841 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7842 +       else
7843 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7844  
7845 -       /* Load Counter into CONTEXT1 reg */
7846 -       if (is_rfc3686)
7847 -               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7848 -                                    LDST_SRCDST_BYTE_CONTEXT |
7849 -                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7850 -                                     LDST_OFFSET_SHIFT));
7851 -
7852 -       /* Class 1 operation */
7853 -       append_operation(desc, ctx->class1_alg_type |
7854 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7855 -
7856 -       /* Will write ivsize + cryptlen */
7857 -       append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7858 -
7859 -       /* Not need to reload iv */
7860 -       append_seq_fifo_load(desc, ivsize,
7861 -                            FIFOLD_CLASS_SKIP);
7862 -
7863 -       /* Will read cryptlen */
7864 -       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7865 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
7866 -                            FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
7867 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7868 -
7869 -       /* Write ICV */
7870 -       append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7871 -                        LDST_SRCDST_BYTE_CONTEXT);
7872 +       ctx->adata.key_inline = !!(inl_mask & 1);
7873 +       ctx->cdata.key_inline = !!(inl_mask & 2);
7874  
7875 -       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7876 -                                             desc_bytes(desc),
7877 -                                             DMA_TO_DEVICE);
7878 -       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7879 -               dev_err(jrdev, "unable to map shared descriptor\n");
7880 -               return -ENOMEM;
7881 -       }
7882 -#ifdef DEBUG
7883 -       print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
7884 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
7885 -                      desc_bytes(desc), 1);
7886 -#endif
7887 +       /* aead_givencrypt shared descriptor */
7888 +       desc = ctx->sh_desc_enc;
7889 +       cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
7890 +                                 ctx->authsize, is_rfc3686, nonce,
7891 +                                 ctx1_iv_off, false, ctrlpriv->era);
7892 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7893 +                                  desc_bytes(desc), DMA_TO_DEVICE);
7894  
7895  skip_givenc:
7896         return 0;
7897 @@ -776,12 +327,12 @@ static int gcm_set_sh_desc(struct crypto
7898  {
7899         struct caam_ctx *ctx = crypto_aead_ctx(aead);
7900         struct device *jrdev = ctx->jrdev;
7901 -       bool keys_fit_inline = false;
7902 -       u32 *key_jump_cmd, *zero_payload_jump_cmd,
7903 -           *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
7904 +       unsigned int ivsize = crypto_aead_ivsize(aead);
7905         u32 *desc;
7906 +       int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
7907 +                       ctx->cdata.keylen;
7908  
7909 -       if (!ctx->enckeylen || !ctx->authsize)
7910 +       if (!ctx->cdata.keylen || !ctx->authsize)
7911                 return 0;
7912  
7913         /*
7914 @@ -789,175 +340,35 @@ static int gcm_set_sh_desc(struct crypto
7915          * Job Descriptor and Shared Descriptor
7916          * must fit into the 64-word Descriptor h/w Buffer
7917          */
7918 -       if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
7919 -           ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
7920 -               keys_fit_inline = true;
7921 +       if (rem_bytes >= DESC_GCM_ENC_LEN) {
7922 +               ctx->cdata.key_inline = true;
7923 +               ctx->cdata.key_virt = ctx->key;
7924 +       } else {
7925 +               ctx->cdata.key_inline = false;
7926 +               ctx->cdata.key_dma = ctx->key_dma;
7927 +       }
7928  
7929         desc = ctx->sh_desc_enc;
7930 -
7931 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
7932 -
7933 -       /* skip key loading if they are loaded due to sharing */
7934 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7935 -                                  JUMP_COND_SHRD | JUMP_COND_SELF);
7936 -       if (keys_fit_inline)
7937 -               append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
7938 -                                 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7939 -       else
7940 -               append_key(desc, ctx->key_dma, ctx->enckeylen,
7941 -                          CLASS_1 | KEY_DEST_CLASS_REG);
7942 -       set_jump_tgt_here(desc, key_jump_cmd);
7943 -
7944 -       /* class 1 operation */
7945 -       append_operation(desc, ctx->class1_alg_type |
7946 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7947 -
7948 -       /* if assoclen + cryptlen is ZERO, skip to ICV write */
7949 -       append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7950 -       zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
7951 -                                                JUMP_COND_MATH_Z);
7952 -
7953 -       /* if assoclen is ZERO, skip reading the assoc data */
7954 -       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7955 -       zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
7956 -                                                JUMP_COND_MATH_Z);
7957 -
7958 -       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7959 -
7960 -       /* skip assoc data */
7961 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7962 -
7963 -       /* cryptlen = seqinlen - assoclen */
7964 -       append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
7965 -
7966 -       /* if cryptlen is ZERO jump to zero-payload commands */
7967 -       zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
7968 -                                           JUMP_COND_MATH_Z);
7969 -
7970 -       /* read assoc data */
7971 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
7972 -                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
7973 -       set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
7974 -
7975 -       append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7976 -
7977 -       /* write encrypted data */
7978 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
7979 -
7980 -       /* read payload data */
7981 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
7982 -                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
7983 -
7984 -       /* jump the zero-payload commands */
7985 -       append_jump(desc, JUMP_TEST_ALL | 2);
7986 -
7987 -       /* zero-payload commands */
7988 -       set_jump_tgt_here(desc, zero_payload_jump_cmd);
7989 -
7990 -       /* read assoc data */
7991 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
7992 -                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
7993 -
7994 -       /* There is no input data */
7995 -       set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
7996 -
7997 -       /* write ICV */
7998 -       append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
7999 -                        LDST_SRCDST_BYTE_CONTEXT);
8000 -
8001 -       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8002 -                                             desc_bytes(desc),
8003 -                                             DMA_TO_DEVICE);
8004 -       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8005 -               dev_err(jrdev, "unable to map shared descriptor\n");
8006 -               return -ENOMEM;
8007 -       }
8008 -#ifdef DEBUG
8009 -       print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
8010 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
8011 -                      desc_bytes(desc), 1);
8012 -#endif
8013 +       cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
8014 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8015 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8016  
8017         /*
8018          * Job Descriptor and Shared Descriptors
8019          * must all fit into the 64-word Descriptor h/w Buffer
8020          */
8021 -       keys_fit_inline = false;
8022 -       if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
8023 -           ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8024 -               keys_fit_inline = true;
8025 +       if (rem_bytes >= DESC_GCM_DEC_LEN) {
8026 +               ctx->cdata.key_inline = true;
8027 +               ctx->cdata.key_virt = ctx->key;
8028 +       } else {
8029 +               ctx->cdata.key_inline = false;
8030 +               ctx->cdata.key_dma = ctx->key_dma;
8031 +       }
8032  
8033         desc = ctx->sh_desc_dec;
8034 -
8035 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
8036 -
8037 -       /* skip key loading if they are loaded due to sharing */
8038 -       key_jump_cmd = append_jump(desc, JUMP_JSL |
8039 -                                  JUMP_TEST_ALL | JUMP_COND_SHRD |
8040 -                                  JUMP_COND_SELF);
8041 -       if (keys_fit_inline)
8042 -               append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8043 -                                 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8044 -       else
8045 -               append_key(desc, ctx->key_dma, ctx->enckeylen,
8046 -                          CLASS_1 | KEY_DEST_CLASS_REG);
8047 -       set_jump_tgt_here(desc, key_jump_cmd);
8048 -
8049 -       /* class 1 operation */
8050 -       append_operation(desc, ctx->class1_alg_type |
8051 -                        OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8052 -
8053 -       /* if assoclen is ZERO, skip reading the assoc data */
8054 -       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
8055 -       zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
8056 -                                                JUMP_COND_MATH_Z);
8057 -
8058 -       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8059 -
8060 -       /* skip assoc data */
8061 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8062 -
8063 -       /* read assoc data */
8064 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8065 -                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8066 -
8067 -       set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
8068 -
8069 -       /* cryptlen = seqoutlen - assoclen */
8070 -       append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8071 -
8072 -       /* jump to zero-payload command if cryptlen is zero */
8073 -       zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
8074 -                                           JUMP_COND_MATH_Z);
8075 -
8076 -       append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8077 -
8078 -       /* store encrypted data */
8079 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8080 -
8081 -       /* read payload data */
8082 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8083 -                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
8084 -
8085 -       /* zero-payload command */
8086 -       set_jump_tgt_here(desc, zero_payload_jump_cmd);
8087 -
8088 -       /* read ICV */
8089 -       append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8090 -                            FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8091 -
8092 -       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8093 -                                             desc_bytes(desc),
8094 -                                             DMA_TO_DEVICE);
8095 -       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8096 -               dev_err(jrdev, "unable to map shared descriptor\n");
8097 -               return -ENOMEM;
8098 -       }
8099 -#ifdef DEBUG
8100 -       print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
8101 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
8102 -                      desc_bytes(desc), 1);
8103 -#endif
8104 +       cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
8105 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8106 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8107  
8108         return 0;
8109  }
8110 @@ -976,11 +387,12 @@ static int rfc4106_set_sh_desc(struct cr
8111  {
8112         struct caam_ctx *ctx = crypto_aead_ctx(aead);
8113         struct device *jrdev = ctx->jrdev;
8114 -       bool keys_fit_inline = false;
8115 -       u32 *key_jump_cmd;
8116 +       unsigned int ivsize = crypto_aead_ivsize(aead);
8117         u32 *desc;
8118 +       int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
8119 +                       ctx->cdata.keylen;
8120  
8121 -       if (!ctx->enckeylen || !ctx->authsize)
8122 +       if (!ctx->cdata.keylen || !ctx->authsize)
8123                 return 0;
8124  
8125         /*
8126 @@ -988,148 +400,37 @@ static int rfc4106_set_sh_desc(struct cr
8127          * Job Descriptor and Shared Descriptor
8128          * must fit into the 64-word Descriptor h/w Buffer
8129          */
8130 -       if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
8131 -           ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8132 -               keys_fit_inline = true;
8133 +       if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
8134 +               ctx->cdata.key_inline = true;
8135 +               ctx->cdata.key_virt = ctx->key;
8136 +       } else {
8137 +               ctx->cdata.key_inline = false;
8138 +               ctx->cdata.key_dma = ctx->key_dma;
8139 +       }
8140  
8141         desc = ctx->sh_desc_enc;
8142 -
8143 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
8144 -
8145 -       /* Skip key loading if it is loaded due to sharing */
8146 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8147 -                                  JUMP_COND_SHRD);
8148 -       if (keys_fit_inline)
8149 -               append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8150 -                                 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8151 -       else
8152 -               append_key(desc, ctx->key_dma, ctx->enckeylen,
8153 -                          CLASS_1 | KEY_DEST_CLASS_REG);
8154 -       set_jump_tgt_here(desc, key_jump_cmd);
8155 -
8156 -       /* Class 1 operation */
8157 -       append_operation(desc, ctx->class1_alg_type |
8158 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8159 -
8160 -       append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
8161 -       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8162 -
8163 -       /* Read assoc data */
8164 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8165 -                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8166 -
8167 -       /* Skip IV */
8168 -       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
8169 -
8170 -       /* Will read cryptlen bytes */
8171 -       append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8172 -
8173 -       /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
8174 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
8175 -
8176 -       /* Skip assoc data */
8177 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8178 -
8179 -       /* cryptlen = seqoutlen - assoclen */
8180 -       append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
8181 -
8182 -       /* Write encrypted data */
8183 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8184 -
8185 -       /* Read payload data */
8186 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8187 -                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
8188 -
8189 -       /* Write ICV */
8190 -       append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
8191 -                        LDST_SRCDST_BYTE_CONTEXT);
8192 -
8193 -       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8194 -                                             desc_bytes(desc),
8195 -                                             DMA_TO_DEVICE);
8196 -       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8197 -               dev_err(jrdev, "unable to map shared descriptor\n");
8198 -               return -ENOMEM;
8199 -       }
8200 -#ifdef DEBUG
8201 -       print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
8202 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
8203 -                      desc_bytes(desc), 1);
8204 -#endif
8205 +       cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
8206 +                                 false);
8207 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8208 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8209  
8210         /*
8211          * Job Descriptor and Shared Descriptors
8212          * must all fit into the 64-word Descriptor h/w Buffer
8213          */
8214 -       keys_fit_inline = false;
8215 -       if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
8216 -           ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8217 -               keys_fit_inline = true;
8218 +       if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
8219 +               ctx->cdata.key_inline = true;
8220 +               ctx->cdata.key_virt = ctx->key;
8221 +       } else {
8222 +               ctx->cdata.key_inline = false;
8223 +               ctx->cdata.key_dma = ctx->key_dma;
8224 +       }
8225  
8226         desc = ctx->sh_desc_dec;
8227 -
8228 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
8229 -
8230 -       /* Skip key loading if it is loaded due to sharing */
8231 -       key_jump_cmd = append_jump(desc, JUMP_JSL |
8232 -                                  JUMP_TEST_ALL | JUMP_COND_SHRD);
8233 -       if (keys_fit_inline)
8234 -               append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8235 -                                 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8236 -       else
8237 -               append_key(desc, ctx->key_dma, ctx->enckeylen,
8238 -                          CLASS_1 | KEY_DEST_CLASS_REG);
8239 -       set_jump_tgt_here(desc, key_jump_cmd);
8240 -
8241 -       /* Class 1 operation */
8242 -       append_operation(desc, ctx->class1_alg_type |
8243 -                        OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8244 -
8245 -       append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
8246 -       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8247 -
8248 -       /* Read assoc data */
8249 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8250 -                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8251 -
8252 -       /* Skip IV */
8253 -       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
8254 -
8255 -       /* Will read cryptlen bytes */
8256 -       append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
8257 -
8258 -       /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
8259 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
8260 -
8261 -       /* Skip assoc data */
8262 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8263 -
8264 -       /* Will write cryptlen bytes */
8265 -       append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8266 -
8267 -       /* Store payload data */
8268 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8269 -
8270 -       /* Read encrypted data */
8271 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8272 -                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
8273 -
8274 -       /* Read ICV */
8275 -       append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8276 -                            FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8277 -
8278 -       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8279 -                                             desc_bytes(desc),
8280 -                                             DMA_TO_DEVICE);
8281 -       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8282 -               dev_err(jrdev, "unable to map shared descriptor\n");
8283 -               return -ENOMEM;
8284 -       }
8285 -#ifdef DEBUG
8286 -       print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
8287 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
8288 -                      desc_bytes(desc), 1);
8289 -#endif
8290 +       cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
8291 +                                 false);
8292 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8293 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8294  
8295         return 0;
8296  }
8297 @@ -1149,12 +450,12 @@ static int rfc4543_set_sh_desc(struct cr
8298  {
8299         struct caam_ctx *ctx = crypto_aead_ctx(aead);
8300         struct device *jrdev = ctx->jrdev;
8301 -       bool keys_fit_inline = false;
8302 -       u32 *key_jump_cmd;
8303 -       u32 *read_move_cmd, *write_move_cmd;
8304 +       unsigned int ivsize = crypto_aead_ivsize(aead);
8305         u32 *desc;
8306 +       int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
8307 +                       ctx->cdata.keylen;
8308  
8309 -       if (!ctx->enckeylen || !ctx->authsize)
8310 +       if (!ctx->cdata.keylen || !ctx->authsize)
8311                 return 0;
8312  
8313         /*
8314 @@ -1162,151 +463,37 @@ static int rfc4543_set_sh_desc(struct cr
8315          * Job Descriptor and Shared Descriptor
8316          * must fit into the 64-word Descriptor h/w Buffer
8317          */
8318 -       if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
8319 -           ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8320 -               keys_fit_inline = true;
8321 +       if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
8322 +               ctx->cdata.key_inline = true;
8323 +               ctx->cdata.key_virt = ctx->key;
8324 +       } else {
8325 +               ctx->cdata.key_inline = false;
8326 +               ctx->cdata.key_dma = ctx->key_dma;
8327 +       }
8328  
8329         desc = ctx->sh_desc_enc;
8330 -
8331 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
8332 -
8333 -       /* Skip key loading if it is loaded due to sharing */
8334 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8335 -                                  JUMP_COND_SHRD);
8336 -       if (keys_fit_inline)
8337 -               append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8338 -                                 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8339 -       else
8340 -               append_key(desc, ctx->key_dma, ctx->enckeylen,
8341 -                          CLASS_1 | KEY_DEST_CLASS_REG);
8342 -       set_jump_tgt_here(desc, key_jump_cmd);
8343 -
8344 -       /* Class 1 operation */
8345 -       append_operation(desc, ctx->class1_alg_type |
8346 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8347 -
8348 -       /* assoclen + cryptlen = seqinlen */
8349 -       append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
8350 -
8351 -       /*
8352 -        * MOVE_LEN opcode is not available in all SEC HW revisions,
8353 -        * thus need to do some magic, i.e. self-patch the descriptor
8354 -        * buffer.
8355 -        */
8356 -       read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
8357 -                                   (0x6 << MOVE_LEN_SHIFT));
8358 -       write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
8359 -                                    (0x8 << MOVE_LEN_SHIFT));
8360 -
8361 -       /* Will read assoclen + cryptlen bytes */
8362 -       append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8363 -
8364 -       /* Will write assoclen + cryptlen bytes */
8365 -       append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8366 -
8367 -       /* Read and write assoclen + cryptlen bytes */
8368 -       aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
8369 -
8370 -       set_move_tgt_here(desc, read_move_cmd);
8371 -       set_move_tgt_here(desc, write_move_cmd);
8372 -       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8373 -       /* Move payload data to OFIFO */
8374 -       append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
8375 -
8376 -       /* Write ICV */
8377 -       append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
8378 -                        LDST_SRCDST_BYTE_CONTEXT);
8379 -
8380 -       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8381 -                                             desc_bytes(desc),
8382 -                                             DMA_TO_DEVICE);
8383 -       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8384 -               dev_err(jrdev, "unable to map shared descriptor\n");
8385 -               return -ENOMEM;
8386 -       }
8387 -#ifdef DEBUG
8388 -       print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
8389 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
8390 -                      desc_bytes(desc), 1);
8391 -#endif
8392 +       cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
8393 +                                 false);
8394 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8395 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8396  
8397         /*
8398          * Job Descriptor and Shared Descriptors
8399          * must all fit into the 64-word Descriptor h/w Buffer
8400          */
8401 -       keys_fit_inline = false;
8402 -       if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
8403 -           ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8404 -               keys_fit_inline = true;
8405 +       if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
8406 +               ctx->cdata.key_inline = true;
8407 +               ctx->cdata.key_virt = ctx->key;
8408 +       } else {
8409 +               ctx->cdata.key_inline = false;
8410 +               ctx->cdata.key_dma = ctx->key_dma;
8411 +       }
8412  
8413         desc = ctx->sh_desc_dec;
8414 -
8415 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
8416 -
8417 -       /* Skip key loading if it is loaded due to sharing */
8418 -       key_jump_cmd = append_jump(desc, JUMP_JSL |
8419 -                                  JUMP_TEST_ALL | JUMP_COND_SHRD);
8420 -       if (keys_fit_inline)
8421 -               append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8422 -                                 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8423 -       else
8424 -               append_key(desc, ctx->key_dma, ctx->enckeylen,
8425 -                          CLASS_1 | KEY_DEST_CLASS_REG);
8426 -       set_jump_tgt_here(desc, key_jump_cmd);
8427 -
8428 -       /* Class 1 operation */
8429 -       append_operation(desc, ctx->class1_alg_type |
8430 -                        OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8431 -
8432 -       /* assoclen + cryptlen = seqoutlen */
8433 -       append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8434 -
8435 -       /*
8436 -        * MOVE_LEN opcode is not available in all SEC HW revisions,
8437 -        * thus need to do some magic, i.e. self-patch the descriptor
8438 -        * buffer.
8439 -        */
8440 -       read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
8441 -                                   (0x6 << MOVE_LEN_SHIFT));
8442 -       write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
8443 -                                    (0x8 << MOVE_LEN_SHIFT));
8444 -
8445 -       /* Will read assoclen + cryptlen bytes */
8446 -       append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8447 -
8448 -       /* Will write assoclen + cryptlen bytes */
8449 -       append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8450 -
8451 -       /* Store payload data */
8452 -       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8453 -
8454 -       /* In-snoop assoclen + cryptlen data */
8455 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
8456 -                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
8457 -
8458 -       set_move_tgt_here(desc, read_move_cmd);
8459 -       set_move_tgt_here(desc, write_move_cmd);
8460 -       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8461 -       /* Move payload data to OFIFO */
8462 -       append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
8463 -       append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
8464 -
8465 -       /* Read ICV */
8466 -       append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8467 -                            FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8468 -
8469 -       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8470 -                                             desc_bytes(desc),
8471 -                                             DMA_TO_DEVICE);
8472 -       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8473 -               dev_err(jrdev, "unable to map shared descriptor\n");
8474 -               return -ENOMEM;
8475 -       }
8476 -#ifdef DEBUG
8477 -       print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
8478 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
8479 -                      desc_bytes(desc), 1);
8480 -#endif
8481 +       cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
8482 +                                 false);
8483 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8484 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8485  
8486         return 0;
8487  }
8488 @@ -1322,74 +509,67 @@ static int rfc4543_setauthsize(struct cr
8489         return 0;
8490  }
8491  
8492 -static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
8493 -                             u32 authkeylen)
8494 -{
8495 -       return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
8496 -                              ctx->split_key_pad_len, key_in, authkeylen,
8497 -                              ctx->alg_op);
8498 -}
8499 -
8500  static int aead_setkey(struct crypto_aead *aead,
8501                                const u8 *key, unsigned int keylen)
8502  {
8503 -       /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
8504 -       static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
8505         struct caam_ctx *ctx = crypto_aead_ctx(aead);
8506         struct device *jrdev = ctx->jrdev;
8507 +       struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
8508         struct crypto_authenc_keys keys;
8509         int ret = 0;
8510  
8511         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
8512                 goto badkey;
8513  
8514 -       /* Pick class 2 key length from algorithm submask */
8515 -       ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
8516 -                                     OP_ALG_ALGSEL_SHIFT] * 2;
8517 -       ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
8518 -
8519 -       if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
8520 -               goto badkey;
8521 -
8522  #ifdef DEBUG
8523         printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
8524                keys.authkeylen + keys.enckeylen, keys.enckeylen,
8525                keys.authkeylen);
8526 -       printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
8527 -              ctx->split_key_len, ctx->split_key_pad_len);
8528         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8529                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
8530  #endif
8531  
8532 -       ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
8533 +       /*
8534 +        * If DKP is supported, use it in the shared descriptor to generate
8535 +        * the split key.
8536 +        */
8537 +       if (ctrlpriv->era >= 6) {
8538 +               ctx->adata.keylen = keys.authkeylen;
8539 +               ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
8540 +                                                     OP_ALG_ALGSEL_MASK);
8541 +
8542 +               if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
8543 +                       goto badkey;
8544 +
8545 +               memcpy(ctx->key, keys.authkey, keys.authkeylen);
8546 +               memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
8547 +                      keys.enckeylen);
8548 +               dma_sync_single_for_device(jrdev, ctx->key_dma,
8549 +                                          ctx->adata.keylen_pad +
8550 +                                          keys.enckeylen, DMA_TO_DEVICE);
8551 +               goto skip_split_key;
8552 +       }
8553 +
8554 +       ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
8555 +                           keys.authkeylen, CAAM_MAX_KEY_SIZE -
8556 +                           keys.enckeylen);
8557         if (ret) {
8558                 goto badkey;
8559         }
8560  
8561         /* postpend encryption key to auth split key */
8562 -       memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
8563 -
8564 -       ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
8565 -                                     keys.enckeylen, DMA_TO_DEVICE);
8566 -       if (dma_mapping_error(jrdev, ctx->key_dma)) {
8567 -               dev_err(jrdev, "unable to map key i/o memory\n");
8568 -               return -ENOMEM;
8569 -       }
8570 +       memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
8571 +       dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
8572 +                                  keys.enckeylen, DMA_TO_DEVICE);
8573  #ifdef DEBUG
8574         print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
8575                        DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
8576 -                      ctx->split_key_pad_len + keys.enckeylen, 1);
8577 +                      ctx->adata.keylen_pad + keys.enckeylen, 1);
8578  #endif
8579  
8580 -       ctx->enckeylen = keys.enckeylen;
8581 -
8582 -       ret = aead_set_sh_desc(aead);
8583 -       if (ret) {
8584 -               dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
8585 -                                keys.enckeylen, DMA_TO_DEVICE);
8586 -       }
8587 -
8588 -       return ret;
8589 +skip_split_key:
8590 +       ctx->cdata.keylen = keys.enckeylen;
8591 +       return aead_set_sh_desc(aead);
8592  badkey:
8593         crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
8594         return -EINVAL;
8595 @@ -1400,7 +580,6 @@ static int gcm_setkey(struct crypto_aead
8596  {
8597         struct caam_ctx *ctx = crypto_aead_ctx(aead);
8598         struct device *jrdev = ctx->jrdev;
8599 -       int ret = 0;
8600  
8601  #ifdef DEBUG
8602         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8603 @@ -1408,21 +587,10 @@ static int gcm_setkey(struct crypto_aead
8604  #endif
8605  
8606         memcpy(ctx->key, key, keylen);
8607 -       ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
8608 -                                     DMA_TO_DEVICE);
8609 -       if (dma_mapping_error(jrdev, ctx->key_dma)) {
8610 -               dev_err(jrdev, "unable to map key i/o memory\n");
8611 -               return -ENOMEM;
8612 -       }
8613 -       ctx->enckeylen = keylen;
8614 +       dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
8615 +       ctx->cdata.keylen = keylen;
8616  
8617 -       ret = gcm_set_sh_desc(aead);
8618 -       if (ret) {
8619 -               dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8620 -                                DMA_TO_DEVICE);
8621 -       }
8622 -
8623 -       return ret;
8624 +       return gcm_set_sh_desc(aead);
8625  }
8626  
8627  static int rfc4106_setkey(struct crypto_aead *aead,
8628 @@ -1430,7 +598,6 @@ static int rfc4106_setkey(struct crypto_
8629  {
8630         struct caam_ctx *ctx = crypto_aead_ctx(aead);
8631         struct device *jrdev = ctx->jrdev;
8632 -       int ret = 0;
8633  
8634         if (keylen < 4)
8635                 return -EINVAL;
8636 @@ -1446,22 +613,10 @@ static int rfc4106_setkey(struct crypto_
8637          * The last four bytes of the key material are used as the salt value
8638          * in the nonce. Update the AES key length.
8639          */
8640 -       ctx->enckeylen = keylen - 4;
8641 -
8642 -       ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
8643 -                                     DMA_TO_DEVICE);
8644 -       if (dma_mapping_error(jrdev, ctx->key_dma)) {
8645 -               dev_err(jrdev, "unable to map key i/o memory\n");
8646 -               return -ENOMEM;
8647 -       }
8648 -
8649 -       ret = rfc4106_set_sh_desc(aead);
8650 -       if (ret) {
8651 -               dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8652 -                                DMA_TO_DEVICE);
8653 -       }
8654 -
8655 -       return ret;
8656 +       ctx->cdata.keylen = keylen - 4;
8657 +       dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
8658 +                                  DMA_TO_DEVICE);
8659 +       return rfc4106_set_sh_desc(aead);
8660  }
8661  
8662  static int rfc4543_setkey(struct crypto_aead *aead,
8663 @@ -1469,7 +624,6 @@ static int rfc4543_setkey(struct crypto_
8664  {
8665         struct caam_ctx *ctx = crypto_aead_ctx(aead);
8666         struct device *jrdev = ctx->jrdev;
8667 -       int ret = 0;
8668  
8669         if (keylen < 4)
8670                 return -EINVAL;
8671 @@ -1485,43 +639,28 @@ static int rfc4543_setkey(struct crypto_
8672          * The last four bytes of the key material are used as the salt value
8673          * in the nonce. Update the AES key length.
8674          */
8675 -       ctx->enckeylen = keylen - 4;
8676 -
8677 -       ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
8678 -                                     DMA_TO_DEVICE);
8679 -       if (dma_mapping_error(jrdev, ctx->key_dma)) {
8680 -               dev_err(jrdev, "unable to map key i/o memory\n");
8681 -               return -ENOMEM;
8682 -       }
8683 -
8684 -       ret = rfc4543_set_sh_desc(aead);
8685 -       if (ret) {
8686 -               dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8687 -                                DMA_TO_DEVICE);
8688 -       }
8689 -
8690 -       return ret;
8691 +       ctx->cdata.keylen = keylen - 4;
8692 +       dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
8693 +                                  DMA_TO_DEVICE);
8694 +       return rfc4543_set_sh_desc(aead);
8695  }
8696  
8697  static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
8698                              const u8 *key, unsigned int keylen)
8699  {
8700         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
8701 -       struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
8702         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
8703         const char *alg_name = crypto_tfm_alg_name(tfm);
8704         struct device *jrdev = ctx->jrdev;
8705 -       int ret = 0;
8706 -       u32 *key_jump_cmd;
8707 +       unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
8708         u32 *desc;
8709 -       u8 *nonce;
8710 -       u32 geniv;
8711         u32 ctx1_iv_off = 0;
8712 -       const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
8713 +       const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
8714                                OP_ALG_AAI_CTR_MOD128);
8715         const bool is_rfc3686 = (ctr_mode &&
8716                                  (strstr(alg_name, "rfc3686") != NULL));
8717  
8718 +       memcpy(ctx->key, key, keylen);
8719  #ifdef DEBUG
8720         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8721                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
8722 @@ -1544,215 +683,33 @@ static int ablkcipher_setkey(struct cryp
8723                 keylen -= CTR_RFC3686_NONCE_SIZE;
8724         }
8725  
8726 -       memcpy(ctx->key, key, keylen);
8727 -       ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
8728 -                                     DMA_TO_DEVICE);
8729 -       if (dma_mapping_error(jrdev, ctx->key_dma)) {
8730 -               dev_err(jrdev, "unable to map key i/o memory\n");
8731 -               return -ENOMEM;
8732 -       }
8733 -       ctx->enckeylen = keylen;
8734 +       dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
8735 +       ctx->cdata.keylen = keylen;
8736 +       ctx->cdata.key_virt = ctx->key;
8737 +       ctx->cdata.key_inline = true;
8738  
8739         /* ablkcipher_encrypt shared descriptor */
8740         desc = ctx->sh_desc_enc;
8741 -       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8742 -       /* Skip if already shared */
8743 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8744 -                                  JUMP_COND_SHRD);
8745 -
8746 -       /* Load class1 key only */
8747 -       append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8748 -                         ctx->enckeylen, CLASS_1 |
8749 -                         KEY_DEST_CLASS_REG);
8750 -
8751 -       /* Load nonce into CONTEXT1 reg */
8752 -       if (is_rfc3686) {
8753 -               nonce = (u8 *)key + keylen;
8754 -               append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8755 -                                  LDST_CLASS_IND_CCB |
8756 -                                  LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8757 -               append_move(desc, MOVE_WAITCOMP |
8758 -                           MOVE_SRC_OUTFIFO |
8759 -                           MOVE_DEST_CLASS1CTX |
8760 -                           (16 << MOVE_OFFSET_SHIFT) |
8761 -                           (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8762 -       }
8763 -
8764 -       set_jump_tgt_here(desc, key_jump_cmd);
8765 -
8766 -       /* Load iv */
8767 -       append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
8768 -                       LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
8769 +       cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
8770 +                                    ctx1_iv_off);
8771 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8772 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8773  
8774 -       /* Load counter into CONTEXT1 reg */
8775 -       if (is_rfc3686)
8776 -               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8777 -                                    LDST_SRCDST_BYTE_CONTEXT |
8778 -                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8779 -                                     LDST_OFFSET_SHIFT));
8780 -
8781 -       /* Load operation */
8782 -       append_operation(desc, ctx->class1_alg_type |
8783 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8784 -
8785 -       /* Perform operation */
8786 -       ablkcipher_append_src_dst(desc);
8787 -
8788 -       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8789 -                                             desc_bytes(desc),
8790 -                                             DMA_TO_DEVICE);
8791 -       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8792 -               dev_err(jrdev, "unable to map shared descriptor\n");
8793 -               return -ENOMEM;
8794 -       }
8795 -#ifdef DEBUG
8796 -       print_hex_dump(KERN_ERR,
8797 -                      "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
8798 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
8799 -                      desc_bytes(desc), 1);
8800 -#endif
8801         /* ablkcipher_decrypt shared descriptor */
8802         desc = ctx->sh_desc_dec;
8803 +       cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
8804 +                                    ctx1_iv_off);
8805 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8806 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8807  
8808 -       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8809 -       /* Skip if already shared */
8810 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8811 -                                  JUMP_COND_SHRD);
8812 -
8813 -       /* Load class1 key only */
8814 -       append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8815 -                         ctx->enckeylen, CLASS_1 |
8816 -                         KEY_DEST_CLASS_REG);
8817 -
8818 -       /* Load nonce into CONTEXT1 reg */
8819 -       if (is_rfc3686) {
8820 -               nonce = (u8 *)key + keylen;
8821 -               append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8822 -                                  LDST_CLASS_IND_CCB |
8823 -                                  LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8824 -               append_move(desc, MOVE_WAITCOMP |
8825 -                           MOVE_SRC_OUTFIFO |
8826 -                           MOVE_DEST_CLASS1CTX |
8827 -                           (16 << MOVE_OFFSET_SHIFT) |
8828 -                           (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8829 -       }
8830 -
8831 -       set_jump_tgt_here(desc, key_jump_cmd);
8832 -
8833 -       /* load IV */
8834 -       append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
8835 -                       LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
8836 -
8837 -       /* Load counter into CONTEXT1 reg */
8838 -       if (is_rfc3686)
8839 -               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8840 -                                    LDST_SRCDST_BYTE_CONTEXT |
8841 -                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8842 -                                     LDST_OFFSET_SHIFT));
8843 -
8844 -       /* Choose operation */
8845 -       if (ctr_mode)
8846 -               append_operation(desc, ctx->class1_alg_type |
8847 -                                OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
8848 -       else
8849 -               append_dec_op1(desc, ctx->class1_alg_type);
8850 -
8851 -       /* Perform operation */
8852 -       ablkcipher_append_src_dst(desc);
8853 -
8854 -       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8855 -                                             desc_bytes(desc),
8856 -                                             DMA_TO_DEVICE);
8857 -       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8858 -               dev_err(jrdev, "unable to map shared descriptor\n");
8859 -               return -ENOMEM;
8860 -       }
8861 -
8862 -#ifdef DEBUG
8863 -       print_hex_dump(KERN_ERR,
8864 -                      "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
8865 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
8866 -                      desc_bytes(desc), 1);
8867 -#endif
8868         /* ablkcipher_givencrypt shared descriptor */
8869         desc = ctx->sh_desc_givenc;
8870 +       cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
8871 +                                       ctx1_iv_off);
8872 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
8873 +                                  desc_bytes(desc), DMA_TO_DEVICE);
8874  
8875 -       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8876 -       /* Skip if already shared */
8877 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8878 -                                  JUMP_COND_SHRD);
8879 -
8880 -       /* Load class1 key only */
8881 -       append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8882 -                         ctx->enckeylen, CLASS_1 |
8883 -                         KEY_DEST_CLASS_REG);
8884 -
8885 -       /* Load Nonce into CONTEXT1 reg */
8886 -       if (is_rfc3686) {
8887 -               nonce = (u8 *)key + keylen;
8888 -               append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8889 -                                  LDST_CLASS_IND_CCB |
8890 -                                  LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8891 -               append_move(desc, MOVE_WAITCOMP |
8892 -                           MOVE_SRC_OUTFIFO |
8893 -                           MOVE_DEST_CLASS1CTX |
8894 -                           (16 << MOVE_OFFSET_SHIFT) |
8895 -                           (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8896 -       }
8897 -       set_jump_tgt_here(desc, key_jump_cmd);
8898 -
8899 -       /* Generate IV */
8900 -       geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
8901 -               NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
8902 -               NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
8903 -       append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
8904 -                           LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
8905 -       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8906 -       append_move(desc, MOVE_WAITCOMP |
8907 -                   MOVE_SRC_INFIFO |
8908 -                   MOVE_DEST_CLASS1CTX |
8909 -                   (crt->ivsize << MOVE_LEN_SHIFT) |
8910 -                   (ctx1_iv_off << MOVE_OFFSET_SHIFT));
8911 -       append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
8912 -
8913 -       /* Copy generated IV to memory */
8914 -       append_seq_store(desc, crt->ivsize,
8915 -                        LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
8916 -                        (ctx1_iv_off << LDST_OFFSET_SHIFT));
8917 -
8918 -       /* Load Counter into CONTEXT1 reg */
8919 -       if (is_rfc3686)
8920 -               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8921 -                                    LDST_SRCDST_BYTE_CONTEXT |
8922 -                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8923 -                                     LDST_OFFSET_SHIFT));
8924 -
8925 -       if (ctx1_iv_off)
8926 -               append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
8927 -                           (1 << JUMP_OFFSET_SHIFT));
8928 -
8929 -       /* Load operation */
8930 -       append_operation(desc, ctx->class1_alg_type |
8931 -                        OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8932 -
8933 -       /* Perform operation */
8934 -       ablkcipher_append_src_dst(desc);
8935 -
8936 -       ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
8937 -                                                desc_bytes(desc),
8938 -                                                DMA_TO_DEVICE);
8939 -       if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
8940 -               dev_err(jrdev, "unable to map shared descriptor\n");
8941 -               return -ENOMEM;
8942 -       }
8943 -#ifdef DEBUG
8944 -       print_hex_dump(KERN_ERR,
8945 -                      "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
8946 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
8947 -                      desc_bytes(desc), 1);
8948 -#endif
8949 -
8950 -       return ret;
8951 +       return 0;
8952  }
8953  
8954  static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
8955 @@ -1760,8 +717,7 @@ static int xts_ablkcipher_setkey(struct
8956  {
8957         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
8958         struct device *jrdev = ctx->jrdev;
8959 -       u32 *key_jump_cmd, *desc;
8960 -       __be64 sector_size = cpu_to_be64(512);
8961 +       u32 *desc;
8962  
8963         if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
8964                 crypto_ablkcipher_set_flags(ablkcipher,
8965 @@ -1771,126 +727,38 @@ static int xts_ablkcipher_setkey(struct
8966         }
8967  
8968         memcpy(ctx->key, key, keylen);
8969 -       ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
8970 -       if (dma_mapping_error(jrdev, ctx->key_dma)) {
8971 -               dev_err(jrdev, "unable to map key i/o memory\n");
8972 -               return -ENOMEM;
8973 -       }
8974 -       ctx->enckeylen = keylen;
8975 +       dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
8976 +       ctx->cdata.keylen = keylen;
8977 +       ctx->cdata.key_virt = ctx->key;
8978 +       ctx->cdata.key_inline = true;
8979  
8980         /* xts_ablkcipher_encrypt shared descriptor */
8981         desc = ctx->sh_desc_enc;
8982 -       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8983 -       /* Skip if already shared */
8984 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8985 -                                  JUMP_COND_SHRD);
8986 -
8987 -       /* Load class1 keys only */
8988 -       append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8989 -                         ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8990 -
8991 -       /* Load sector size with index 40 bytes (0x28) */
8992 -       append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
8993 -                  LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
8994 -       append_data(desc, (void *)&sector_size, 8);
8995 -
8996 -       set_jump_tgt_here(desc, key_jump_cmd);
8997 -
8998 -       /*
8999 -        * create sequence for loading the sector index
9000 -        * Upper 8B of IV - will be used as sector index
9001 -        * Lower 8B of IV - will be discarded
9002 -        */
9003 -       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
9004 -                  LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
9005 -       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
9006 -
9007 -       /* Load operation */
9008 -       append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
9009 -                        OP_ALG_ENCRYPT);
9010 -
9011 -       /* Perform operation */
9012 -       ablkcipher_append_src_dst(desc);
9013 -
9014 -       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
9015 -                                             DMA_TO_DEVICE);
9016 -       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
9017 -               dev_err(jrdev, "unable to map shared descriptor\n");
9018 -               return -ENOMEM;
9019 -       }
9020 -#ifdef DEBUG
9021 -       print_hex_dump(KERN_ERR,
9022 -                      "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
9023 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
9024 -#endif
9025 +       cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
9026 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
9027 +                                  desc_bytes(desc), DMA_TO_DEVICE);
9028  
9029         /* xts_ablkcipher_decrypt shared descriptor */
9030         desc = ctx->sh_desc_dec;
9031 -
9032 -       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
9033 -       /* Skip if already shared */
9034 -       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
9035 -                                  JUMP_COND_SHRD);
9036 -
9037 -       /* Load class1 key only */
9038 -       append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
9039 -                         ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
9040 -
9041 -       /* Load sector size with index 40 bytes (0x28) */
9042 -       append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
9043 -                  LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
9044 -       append_data(desc, (void *)&sector_size, 8);
9045 -
9046 -       set_jump_tgt_here(desc, key_jump_cmd);
9047 -
9048 -       /*
9049 -        * create sequence for loading the sector index
9050 -        * Upper 8B of IV - will be used as sector index
9051 -        * Lower 8B of IV - will be discarded
9052 -        */
9053 -       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
9054 -                  LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
9055 -       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
9056 -
9057 -       /* Load operation */
9058 -       append_dec_op1(desc, ctx->class1_alg_type);
9059 -
9060 -       /* Perform operation */
9061 -       ablkcipher_append_src_dst(desc);
9062 -
9063 -       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
9064 -                                             DMA_TO_DEVICE);
9065 -       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
9066 -               dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
9067 -                                desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
9068 -               dev_err(jrdev, "unable to map shared descriptor\n");
9069 -               return -ENOMEM;
9070 -       }
9071 -#ifdef DEBUG
9072 -       print_hex_dump(KERN_ERR,
9073 -                      "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
9074 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
9075 -#endif
9076 +       cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
9077 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
9078 +                                  desc_bytes(desc), DMA_TO_DEVICE);
9079  
9080         return 0;
9081  }
9082  
9083  /*
9084   * aead_edesc - s/w-extended aead descriptor
9085 - * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
9086 - * @src_nents: number of segments in input scatterlist
9087 - * @dst_nents: number of segments in output scatterlist
9088 - * @iv_dma: dma address of iv for checking continuity and link table
9089 - * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
9090 + * @src_nents: number of segments in input s/w scatterlist
9091 + * @dst_nents: number of segments in output s/w scatterlist
9092   * @sec4_sg_bytes: length of dma mapped sec4_sg space
9093   * @sec4_sg_dma: bus physical mapped address of h/w link table
9094 + * @sec4_sg: pointer to h/w link table
9095   * @hw_desc: the h/w job descriptor followed by any referenced link tables
9096   */
9097  struct aead_edesc {
9098 -       int assoc_nents;
9099         int src_nents;
9100         int dst_nents;
9101 -       dma_addr_t iv_dma;
9102         int sec4_sg_bytes;
9103         dma_addr_t sec4_sg_dma;
9104         struct sec4_sg_entry *sec4_sg;
9105 @@ -1899,12 +767,12 @@ struct aead_edesc {
9106  
9107  /*
9108   * ablkcipher_edesc - s/w-extended ablkcipher descriptor
9109 - * @src_nents: number of segments in input scatterlist
9110 - * @dst_nents: number of segments in output scatterlist
9111 + * @src_nents: number of segments in input s/w scatterlist
9112 + * @dst_nents: number of segments in output s/w scatterlist
9113   * @iv_dma: dma address of iv for checking continuity and link table
9114 - * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
9115   * @sec4_sg_bytes: length of dma mapped sec4_sg space
9116   * @sec4_sg_dma: bus physical mapped address of h/w link table
9117 + * @sec4_sg: pointer to h/w link table
9118   * @hw_desc: the h/w job descriptor followed by any referenced link tables
9119   */
9120  struct ablkcipher_edesc {
9121 @@ -1924,10 +792,11 @@ static void caam_unmap(struct device *de
9122                        int sec4_sg_bytes)
9123  {
9124         if (dst != src) {
9125 -               dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
9126 -               dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
9127 +               if (src_nents)
9128 +                       dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
9129 +               dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
9130         } else {
9131 -               dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
9132 +               dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
9133         }
9134  
9135         if (iv_dma)
9136 @@ -2021,8 +890,7 @@ static void ablkcipher_encrypt_done(stru
9137         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
9138  #endif
9139  
9140 -       edesc = (struct ablkcipher_edesc *)((char *)desc -
9141 -                offsetof(struct ablkcipher_edesc, hw_desc));
9142 +       edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
9143  
9144         if (err)
9145                 caam_jr_strstatus(jrdev, err);
9146 @@ -2031,10 +899,10 @@ static void ablkcipher_encrypt_done(stru
9147         print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
9148                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9149                        edesc->src_nents > 1 ? 100 : ivsize, 1);
9150 -       dbg_dump_sg(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
9151 -                   DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9152 -                   edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
9153  #endif
9154 +       caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
9155 +                    DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9156 +                    edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
9157  
9158         ablkcipher_unmap(jrdev, edesc, req);
9159  
9160 @@ -2062,8 +930,7 @@ static void ablkcipher_decrypt_done(stru
9161         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
9162  #endif
9163  
9164 -       edesc = (struct ablkcipher_edesc *)((char *)desc -
9165 -                offsetof(struct ablkcipher_edesc, hw_desc));
9166 +       edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
9167         if (err)
9168                 caam_jr_strstatus(jrdev, err);
9169  
9170 @@ -2071,10 +938,10 @@ static void ablkcipher_decrypt_done(stru
9171         print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
9172                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9173                        ivsize, 1);
9174 -       dbg_dump_sg(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
9175 -                   DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9176 -                   edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
9177  #endif
9178 +       caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
9179 +                    DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9180 +                    edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
9181  
9182         ablkcipher_unmap(jrdev, edesc, req);
9183  
9184 @@ -2114,7 +981,7 @@ static void init_aead_job(struct aead_re
9185         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9186  
9187         if (all_contig) {
9188 -               src_dma = sg_dma_address(req->src);
9189 +               src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0;
9190                 in_options = 0;
9191         } else {
9192                 src_dma = edesc->sec4_sg_dma;
9193 @@ -2129,7 +996,7 @@ static void init_aead_job(struct aead_re
9194         out_options = in_options;
9195  
9196         if (unlikely(req->src != req->dst)) {
9197 -               if (!edesc->dst_nents) {
9198 +               if (edesc->dst_nents == 1) {
9199                         dst_dma = sg_dma_address(req->dst);
9200                 } else {
9201                         dst_dma = edesc->sec4_sg_dma +
9202 @@ -2147,9 +1014,6 @@ static void init_aead_job(struct aead_re
9203                 append_seq_out_ptr(desc, dst_dma,
9204                                    req->assoclen + req->cryptlen - authsize,
9205                                    out_options);
9206 -
9207 -       /* REG3 = assoclen */
9208 -       append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
9209  }
9210  
9211  static void init_gcm_job(struct aead_request *req,
9212 @@ -2164,6 +1028,7 @@ static void init_gcm_job(struct aead_req
9213         unsigned int last;
9214  
9215         init_aead_job(req, edesc, all_contig, encrypt);
9216 +       append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
9217  
9218         /* BUG This should not be specific to generic GCM. */
9219         last = 0;
9220 @@ -2175,7 +1040,7 @@ static void init_gcm_job(struct aead_req
9221                          FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
9222         /* Append Salt */
9223         if (!generic_gcm)
9224 -               append_data(desc, ctx->key + ctx->enckeylen, 4);
9225 +               append_data(desc, ctx->key + ctx->cdata.keylen, 4);
9226         /* Append IV */
9227         append_data(desc, req->iv, ivsize);
9228         /* End of blank commands */
9229 @@ -2190,7 +1055,8 @@ static void init_authenc_job(struct aead
9230                                                  struct caam_aead_alg, aead);
9231         unsigned int ivsize = crypto_aead_ivsize(aead);
9232         struct caam_ctx *ctx = crypto_aead_ctx(aead);
9233 -       const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
9234 +       struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
9235 +       const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
9236                                OP_ALG_AAI_CTR_MOD128);
9237         const bool is_rfc3686 = alg->caam.rfc3686;
9238         u32 *desc = edesc->hw_desc;
9239 @@ -2213,6 +1079,15 @@ static void init_authenc_job(struct aead
9240  
9241         init_aead_job(req, edesc, all_contig, encrypt);
9242  
9243 +       /*
9244 +        * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
9245 +        * having DPOVRD as destination.
9246 +        */
9247 +       if (ctrlpriv->era < 3)
9248 +               append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
9249 +       else
9250 +               append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
9251 +
9252         if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
9253                 append_load_as_imm(desc, req->iv, ivsize,
9254                                    LDST_CLASS_1_CCB |
9255 @@ -2236,16 +1111,15 @@ static void init_ablkcipher_job(u32 *sh_
9256         int len, sec4_sg_index = 0;
9257  
9258  #ifdef DEBUG
9259 -       bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9260 -                                             CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9261         print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
9262                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9263                        ivsize, 1);
9264 -       printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
9265 -       dbg_dump_sg(KERN_ERR, "src    @"__stringify(__LINE__)": ",
9266 -                   DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9267 -                   edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
9268 +       pr_err("asked=%d, nbytes%d\n",
9269 +              (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
9270  #endif
9271 +       caam_dump_sg(KERN_ERR, "src    @" __stringify(__LINE__)": ",
9272 +                    DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9273 +                    edesc->src_nents > 1 ? 100 : req->nbytes, 1);
9274  
9275         len = desc_len(sh_desc);
9276         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9277 @@ -2261,7 +1135,7 @@ static void init_ablkcipher_job(u32 *sh_
9278         append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
9279  
9280         if (likely(req->src == req->dst)) {
9281 -               if (!edesc->src_nents && iv_contig) {
9282 +               if (edesc->src_nents == 1 && iv_contig) {
9283                         dst_dma = sg_dma_address(req->src);
9284                 } else {
9285                         dst_dma = edesc->sec4_sg_dma +
9286 @@ -2269,7 +1143,7 @@ static void init_ablkcipher_job(u32 *sh_
9287                         out_options = LDST_SGF;
9288                 }
9289         } else {
9290 -               if (!edesc->dst_nents) {
9291 +               if (edesc->dst_nents == 1) {
9292                         dst_dma = sg_dma_address(req->dst);
9293                 } else {
9294                         dst_dma = edesc->sec4_sg_dma +
9295 @@ -2296,20 +1170,18 @@ static void init_ablkcipher_giv_job(u32
9296         int len, sec4_sg_index = 0;
9297  
9298  #ifdef DEBUG
9299 -       bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9300 -                                             CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9301         print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
9302                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9303                        ivsize, 1);
9304 -       dbg_dump_sg(KERN_ERR, "src    @" __stringify(__LINE__) ": ",
9305 -                   DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9306 -                   edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
9307  #endif
9308 +       caam_dump_sg(KERN_ERR, "src    @" __stringify(__LINE__) ": ",
9309 +                    DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9310 +                    edesc->src_nents > 1 ? 100 : req->nbytes, 1);
9311  
9312         len = desc_len(sh_desc);
9313         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9314  
9315 -       if (!edesc->src_nents) {
9316 +       if (edesc->src_nents == 1) {
9317                 src_dma = sg_dma_address(req->src);
9318                 in_options = 0;
9319         } else {
9320 @@ -2340,87 +1212,100 @@ static struct aead_edesc *aead_edesc_all
9321         struct crypto_aead *aead = crypto_aead_reqtfm(req);
9322         struct caam_ctx *ctx = crypto_aead_ctx(aead);
9323         struct device *jrdev = ctx->jrdev;
9324 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9325 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
9326 -       int src_nents, dst_nents = 0;
9327 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9328 +                      GFP_KERNEL : GFP_ATOMIC;
9329 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
9330         struct aead_edesc *edesc;
9331 -       int sgc;
9332 -       bool all_contig = true;
9333 -       int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
9334 +       int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
9335         unsigned int authsize = ctx->authsize;
9336  
9337         if (unlikely(req->dst != req->src)) {
9338 -               src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
9339 -               dst_nents = sg_count(req->dst,
9340 -                                    req->assoclen + req->cryptlen +
9341 -                                       (encrypt ? authsize : (-authsize)));
9342 -       } else {
9343 -               src_nents = sg_count(req->src,
9344 -                                    req->assoclen + req->cryptlen +
9345 -                                       (encrypt ? authsize : 0));
9346 -       }
9347 -
9348 -       /* Check if data are contiguous. */
9349 -       all_contig = !src_nents;
9350 -       if (!all_contig) {
9351 -               src_nents = src_nents ? : 1;
9352 -               sec4_sg_len = src_nents;
9353 -       }
9354 -
9355 -       sec4_sg_len += dst_nents;
9356 -
9357 -       sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
9358 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
9359 +                                            req->cryptlen);
9360 +               if (unlikely(src_nents < 0)) {
9361 +                       dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9362 +                               req->assoclen + req->cryptlen);
9363 +                       return ERR_PTR(src_nents);
9364 +               }
9365  
9366 -       /* allocate space for base edesc and hw desc commands, link tables */
9367 -       edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9368 -                       GFP_DMA | flags);
9369 -       if (!edesc) {
9370 -               dev_err(jrdev, "could not allocate extended descriptor\n");
9371 -               return ERR_PTR(-ENOMEM);
9372 +               dst_nents = sg_nents_for_len(req->dst, req->assoclen +
9373 +                                            req->cryptlen +
9374 +                                               (encrypt ? authsize :
9375 +                                                          (-authsize)));
9376 +               if (unlikely(dst_nents < 0)) {
9377 +                       dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9378 +                               req->assoclen + req->cryptlen +
9379 +                               (encrypt ? authsize : (-authsize)));
9380 +                       return ERR_PTR(dst_nents);
9381 +               }
9382 +       } else {
9383 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
9384 +                                            req->cryptlen +
9385 +                                            (encrypt ? authsize : 0));
9386 +               if (unlikely(src_nents < 0)) {
9387 +                       dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9388 +                               req->assoclen + req->cryptlen +
9389 +                               (encrypt ? authsize : 0));
9390 +                       return ERR_PTR(src_nents);
9391 +               }
9392         }
9393  
9394         if (likely(req->src == req->dst)) {
9395 -               sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9396 -                                DMA_BIDIRECTIONAL);
9397 -               if (unlikely(!sgc)) {
9398 +               mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9399 +                                             DMA_BIDIRECTIONAL);
9400 +               if (unlikely(!mapped_src_nents)) {
9401                         dev_err(jrdev, "unable to map source\n");
9402 -                       kfree(edesc);
9403                         return ERR_PTR(-ENOMEM);
9404                 }
9405         } else {
9406 -               sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9407 -                                DMA_TO_DEVICE);
9408 -               if (unlikely(!sgc)) {
9409 -                       dev_err(jrdev, "unable to map source\n");
9410 -                       kfree(edesc);
9411 -                       return ERR_PTR(-ENOMEM);
9412 +               /* Cover also the case of null (zero length) input data */
9413 +               if (src_nents) {
9414 +                       mapped_src_nents = dma_map_sg(jrdev, req->src,
9415 +                                                     src_nents, DMA_TO_DEVICE);
9416 +                       if (unlikely(!mapped_src_nents)) {
9417 +                               dev_err(jrdev, "unable to map source\n");
9418 +                               return ERR_PTR(-ENOMEM);
9419 +                       }
9420 +               } else {
9421 +                       mapped_src_nents = 0;
9422                 }
9423  
9424 -               sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9425 -                                DMA_FROM_DEVICE);
9426 -               if (unlikely(!sgc)) {
9427 +               mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9428 +                                             DMA_FROM_DEVICE);
9429 +               if (unlikely(!mapped_dst_nents)) {
9430                         dev_err(jrdev, "unable to map destination\n");
9431 -                       dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
9432 -                                    DMA_TO_DEVICE);
9433 -                       kfree(edesc);
9434 +                       dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9435                         return ERR_PTR(-ENOMEM);
9436                 }
9437         }
9438  
9439 +       sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
9440 +       sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
9441 +       sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
9442 +
9443 +       /* allocate space for base edesc and hw desc commands, link tables */
9444 +       edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9445 +                       GFP_DMA | flags);
9446 +       if (!edesc) {
9447 +               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9448 +                          0, 0, 0);
9449 +               return ERR_PTR(-ENOMEM);
9450 +       }
9451 +
9452         edesc->src_nents = src_nents;
9453         edesc->dst_nents = dst_nents;
9454         edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
9455                          desc_bytes;
9456 -       *all_contig_ptr = all_contig;
9457 +       *all_contig_ptr = !(mapped_src_nents > 1);
9458  
9459         sec4_sg_index = 0;
9460 -       if (!all_contig) {
9461 -               sg_to_sec4_sg_last(req->src, src_nents,
9462 -                             edesc->sec4_sg + sec4_sg_index, 0);
9463 -               sec4_sg_index += src_nents;
9464 +       if (mapped_src_nents > 1) {
9465 +               sg_to_sec4_sg_last(req->src, mapped_src_nents,
9466 +                                  edesc->sec4_sg + sec4_sg_index, 0);
9467 +               sec4_sg_index += mapped_src_nents;
9468         }
9469 -       if (dst_nents) {
9470 -               sg_to_sec4_sg_last(req->dst, dst_nents,
9471 +       if (mapped_dst_nents > 1) {
9472 +               sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9473                                    edesc->sec4_sg + sec4_sg_index, 0);
9474         }
9475  
9476 @@ -2573,13 +1458,9 @@ static int aead_decrypt(struct aead_requ
9477         u32 *desc;
9478         int ret = 0;
9479  
9480 -#ifdef DEBUG
9481 -       bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9482 -                                             CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9483 -       dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
9484 -                   DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9485 -                   req->assoclen + req->cryptlen, 1, may_sleep);
9486 -#endif
9487 +       caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
9488 +                    DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9489 +                    req->assoclen + req->cryptlen, 1);
9490  
9491         /* allocate extended descriptor */
9492         edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
9493 @@ -2619,51 +1500,80 @@ static struct ablkcipher_edesc *ablkciph
9494         struct device *jrdev = ctx->jrdev;
9495         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9496                        GFP_KERNEL : GFP_ATOMIC;
9497 -       int src_nents, dst_nents = 0, sec4_sg_bytes;
9498 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
9499         struct ablkcipher_edesc *edesc;
9500         dma_addr_t iv_dma = 0;
9501 -       bool iv_contig = false;
9502 -       int sgc;
9503 +       bool in_contig;
9504         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
9505 -       int sec4_sg_index;
9506 +       int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
9507  
9508 -       src_nents = sg_count(req->src, req->nbytes);
9509 +       src_nents = sg_nents_for_len(req->src, req->nbytes);
9510 +       if (unlikely(src_nents < 0)) {
9511 +               dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9512 +                       req->nbytes);
9513 +               return ERR_PTR(src_nents);
9514 +       }
9515  
9516 -       if (req->dst != req->src)
9517 -               dst_nents = sg_count(req->dst, req->nbytes);
9518 +       if (req->dst != req->src) {
9519 +               dst_nents = sg_nents_for_len(req->dst, req->nbytes);
9520 +               if (unlikely(dst_nents < 0)) {
9521 +                       dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9522 +                               req->nbytes);
9523 +                       return ERR_PTR(dst_nents);
9524 +               }
9525 +       }
9526  
9527         if (likely(req->src == req->dst)) {
9528 -               sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9529 -                                DMA_BIDIRECTIONAL);
9530 +               mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9531 +                                             DMA_BIDIRECTIONAL);
9532 +               if (unlikely(!mapped_src_nents)) {
9533 +                       dev_err(jrdev, "unable to map source\n");
9534 +                       return ERR_PTR(-ENOMEM);
9535 +               }
9536         } else {
9537 -               sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9538 -                                DMA_TO_DEVICE);
9539 -               sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9540 -                                DMA_FROM_DEVICE);
9541 +               mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9542 +                                             DMA_TO_DEVICE);
9543 +               if (unlikely(!mapped_src_nents)) {
9544 +                       dev_err(jrdev, "unable to map source\n");
9545 +                       return ERR_PTR(-ENOMEM);
9546 +               }
9547 +
9548 +               mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9549 +                                             DMA_FROM_DEVICE);
9550 +               if (unlikely(!mapped_dst_nents)) {
9551 +                       dev_err(jrdev, "unable to map destination\n");
9552 +                       dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9553 +                       return ERR_PTR(-ENOMEM);
9554 +               }
9555         }
9556  
9557         iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
9558         if (dma_mapping_error(jrdev, iv_dma)) {
9559                 dev_err(jrdev, "unable to map IV\n");
9560 +               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9561 +                          0, 0, 0);
9562                 return ERR_PTR(-ENOMEM);
9563         }
9564  
9565 -       /*
9566 -        * Check if iv can be contiguous with source and destination.
9567 -        * If so, include it. If not, create scatterlist.
9568 -        */
9569 -       if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
9570 -               iv_contig = true;
9571 -       else
9572 -               src_nents = src_nents ? : 1;
9573 -       sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
9574 -                       sizeof(struct sec4_sg_entry);
9575 +       if (mapped_src_nents == 1 &&
9576 +           iv_dma + ivsize == sg_dma_address(req->src)) {
9577 +               in_contig = true;
9578 +               sec4_sg_ents = 0;
9579 +       } else {
9580 +               in_contig = false;
9581 +               sec4_sg_ents = 1 + mapped_src_nents;
9582 +       }
9583 +       dst_sg_idx = sec4_sg_ents;
9584 +       sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
9585 +       sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
9586  
9587         /* allocate space for base edesc and hw desc commands, link tables */
9588         edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9589                         GFP_DMA | flags);
9590         if (!edesc) {
9591                 dev_err(jrdev, "could not allocate extended descriptor\n");
9592 +               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9593 +                          iv_dma, ivsize, 0, 0);
9594                 return ERR_PTR(-ENOMEM);
9595         }
9596  
9597 @@ -2673,23 +1583,24 @@ static struct ablkcipher_edesc *ablkciph
9598         edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
9599                          desc_bytes;
9600  
9601 -       sec4_sg_index = 0;
9602 -       if (!iv_contig) {
9603 +       if (!in_contig) {
9604                 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
9605 -               sg_to_sec4_sg_last(req->src, src_nents,
9606 +               sg_to_sec4_sg_last(req->src, mapped_src_nents,
9607                                    edesc->sec4_sg + 1, 0);
9608 -               sec4_sg_index += 1 + src_nents;
9609         }
9610  
9611 -       if (dst_nents) {
9612 -               sg_to_sec4_sg_last(req->dst, dst_nents,
9613 -                       edesc->sec4_sg + sec4_sg_index, 0);
9614 +       if (mapped_dst_nents > 1) {
9615 +               sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9616 +                                  edesc->sec4_sg + dst_sg_idx, 0);
9617         }
9618  
9619         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
9620                                             sec4_sg_bytes, DMA_TO_DEVICE);
9621         if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
9622                 dev_err(jrdev, "unable to map S/G table\n");
9623 +               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9624 +                          iv_dma, ivsize, 0, 0);
9625 +               kfree(edesc);
9626                 return ERR_PTR(-ENOMEM);
9627         }
9628  
9629 @@ -2701,7 +1612,7 @@ static struct ablkcipher_edesc *ablkciph
9630                        sec4_sg_bytes, 1);
9631  #endif
9632  
9633 -       *iv_contig_out = iv_contig;
9634 +       *iv_contig_out = in_contig;
9635         return edesc;
9636  }
9637  
9638 @@ -2792,30 +1703,54 @@ static struct ablkcipher_edesc *ablkciph
9639         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
9640         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
9641         struct device *jrdev = ctx->jrdev;
9642 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9643 -                                         CRYPTO_TFM_REQ_MAY_SLEEP)) ?
9644 +       gfp_t flags = (req->base.flags &  CRYPTO_TFM_REQ_MAY_SLEEP) ?
9645                        GFP_KERNEL : GFP_ATOMIC;
9646 -       int src_nents, dst_nents = 0, sec4_sg_bytes;
9647 +       int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
9648         struct ablkcipher_edesc *edesc;
9649         dma_addr_t iv_dma = 0;
9650 -       bool iv_contig = false;
9651 -       int sgc;
9652 +       bool out_contig;
9653         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
9654 -       int sec4_sg_index;
9655 +       int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
9656  
9657 -       src_nents = sg_count(req->src, req->nbytes);
9658 -
9659 -       if (unlikely(req->dst != req->src))
9660 -               dst_nents = sg_count(req->dst, req->nbytes);
9661 +       src_nents = sg_nents_for_len(req->src, req->nbytes);
9662 +       if (unlikely(src_nents < 0)) {
9663 +               dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9664 +                       req->nbytes);
9665 +               return ERR_PTR(src_nents);
9666 +       }
9667  
9668         if (likely(req->src == req->dst)) {
9669 -               sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9670 -                                DMA_BIDIRECTIONAL);
9671 +               mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9672 +                                             DMA_BIDIRECTIONAL);
9673 +               if (unlikely(!mapped_src_nents)) {
9674 +                       dev_err(jrdev, "unable to map source\n");
9675 +                       return ERR_PTR(-ENOMEM);
9676 +               }
9677 +
9678 +               dst_nents = src_nents;
9679 +               mapped_dst_nents = src_nents;
9680         } else {
9681 -               sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9682 -                                DMA_TO_DEVICE);
9683 -               sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9684 -                                DMA_FROM_DEVICE);
9685 +               mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9686 +                                             DMA_TO_DEVICE);
9687 +               if (unlikely(!mapped_src_nents)) {
9688 +                       dev_err(jrdev, "unable to map source\n");
9689 +                       return ERR_PTR(-ENOMEM);
9690 +               }
9691 +
9692 +               dst_nents = sg_nents_for_len(req->dst, req->nbytes);
9693 +               if (unlikely(dst_nents < 0)) {
9694 +                       dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9695 +                               req->nbytes);
9696 +                       return ERR_PTR(dst_nents);
9697 +               }
9698 +
9699 +               mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9700 +                                             DMA_FROM_DEVICE);
9701 +               if (unlikely(!mapped_dst_nents)) {
9702 +                       dev_err(jrdev, "unable to map destination\n");
9703 +                       dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9704 +                       return ERR_PTR(-ENOMEM);
9705 +               }
9706         }
9707  
9708         /*
9709 @@ -2825,21 +1760,29 @@ static struct ablkcipher_edesc *ablkciph
9710         iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
9711         if (dma_mapping_error(jrdev, iv_dma)) {
9712                 dev_err(jrdev, "unable to map IV\n");
9713 +               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9714 +                          0, 0, 0);
9715                 return ERR_PTR(-ENOMEM);
9716         }
9717  
9718 -       if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
9719 -               iv_contig = true;
9720 -       else
9721 -               dst_nents = dst_nents ? : 1;
9722 -       sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
9723 -                       sizeof(struct sec4_sg_entry);
9724 +       sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
9725 +       dst_sg_idx = sec4_sg_ents;
9726 +       if (mapped_dst_nents == 1 &&
9727 +           iv_dma + ivsize == sg_dma_address(req->dst)) {
9728 +               out_contig = true;
9729 +       } else {
9730 +               out_contig = false;
9731 +               sec4_sg_ents += 1 + mapped_dst_nents;
9732 +       }
9733  
9734         /* allocate space for base edesc and hw desc commands, link tables */
9735 +       sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
9736         edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9737                         GFP_DMA | flags);
9738         if (!edesc) {
9739                 dev_err(jrdev, "could not allocate extended descriptor\n");
9740 +               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9741 +                          iv_dma, ivsize, 0, 0);
9742                 return ERR_PTR(-ENOMEM);
9743         }
9744  
9745 @@ -2849,24 +1792,24 @@ static struct ablkcipher_edesc *ablkciph
9746         edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
9747                          desc_bytes;
9748  
9749 -       sec4_sg_index = 0;
9750 -       if (src_nents) {
9751 -               sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
9752 -               sec4_sg_index += src_nents;
9753 -       }
9754 +       if (mapped_src_nents > 1)
9755 +               sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
9756 +                                  0);
9757  
9758 -       if (!iv_contig) {
9759 -               dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
9760 +       if (!out_contig) {
9761 +               dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx,
9762                                    iv_dma, ivsize, 0);
9763 -               sec4_sg_index += 1;
9764 -               sg_to_sec4_sg_last(req->dst, dst_nents,
9765 -                                  edesc->sec4_sg + sec4_sg_index, 0);
9766 +               sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9767 +                                  edesc->sec4_sg + dst_sg_idx + 1, 0);
9768         }
9769  
9770         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
9771                                             sec4_sg_bytes, DMA_TO_DEVICE);
9772         if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
9773                 dev_err(jrdev, "unable to map S/G table\n");
9774 +               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9775 +                          iv_dma, ivsize, 0, 0);
9776 +               kfree(edesc);
9777                 return ERR_PTR(-ENOMEM);
9778         }
9779         edesc->iv_dma = iv_dma;
9780 @@ -2878,7 +1821,7 @@ static struct ablkcipher_edesc *ablkciph
9781                        sec4_sg_bytes, 1);
9782  #endif
9783  
9784 -       *iv_contig_out = iv_contig;
9785 +       *iv_contig_out = out_contig;
9786         return edesc;
9787  }
9788  
9789 @@ -2889,7 +1832,7 @@ static int ablkcipher_givencrypt(struct
9790         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
9791         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
9792         struct device *jrdev = ctx->jrdev;
9793 -       bool iv_contig;
9794 +       bool iv_contig = false;
9795         u32 *desc;
9796         int ret = 0;
9797  
9798 @@ -2933,7 +1876,6 @@ struct caam_alg_template {
9799         } template_u;
9800         u32 class1_alg_type;
9801         u32 class2_alg_type;
9802 -       u32 alg_op;
9803  };
9804  
9805  static struct caam_alg_template driver_algs[] = {
9806 @@ -3118,7 +2060,6 @@ static struct caam_aead_alg driver_aeads
9807                 .caam = {
9808                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9809                                            OP_ALG_AAI_HMAC_PRECOMP,
9810 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9811                 },
9812         },
9813         {
9814 @@ -3140,7 +2081,6 @@ static struct caam_aead_alg driver_aeads
9815                 .caam = {
9816                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9817                                            OP_ALG_AAI_HMAC_PRECOMP,
9818 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9819                 },
9820         },
9821         {
9822 @@ -3162,7 +2102,6 @@ static struct caam_aead_alg driver_aeads
9823                 .caam = {
9824                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9825                                            OP_ALG_AAI_HMAC_PRECOMP,
9826 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9827                 },
9828         },
9829         {
9830 @@ -3184,7 +2123,6 @@ static struct caam_aead_alg driver_aeads
9831                 .caam = {
9832                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9833                                            OP_ALG_AAI_HMAC_PRECOMP,
9834 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9835                 },
9836         },
9837         {
9838 @@ -3206,7 +2144,6 @@ static struct caam_aead_alg driver_aeads
9839                 .caam = {
9840                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9841                                            OP_ALG_AAI_HMAC_PRECOMP,
9842 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9843                 },
9844         },
9845         {
9846 @@ -3228,7 +2165,6 @@ static struct caam_aead_alg driver_aeads
9847                 .caam = {
9848                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9849                                            OP_ALG_AAI_HMAC_PRECOMP,
9850 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9851                 },
9852         },
9853         {
9854 @@ -3250,7 +2186,6 @@ static struct caam_aead_alg driver_aeads
9855                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9856                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9857                                            OP_ALG_AAI_HMAC_PRECOMP,
9858 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9859                 },
9860         },
9861         {
9862 @@ -3273,7 +2208,6 @@ static struct caam_aead_alg driver_aeads
9863                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9864                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9865                                            OP_ALG_AAI_HMAC_PRECOMP,
9866 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9867                         .geniv = true,
9868                 },
9869         },
9870 @@ -3296,7 +2230,6 @@ static struct caam_aead_alg driver_aeads
9871                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9872                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9873                                            OP_ALG_AAI_HMAC_PRECOMP,
9874 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9875                 },
9876         },
9877         {
9878 @@ -3319,7 +2252,6 @@ static struct caam_aead_alg driver_aeads
9879                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9880                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9881                                            OP_ALG_AAI_HMAC_PRECOMP,
9882 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9883                         .geniv = true,
9884                 },
9885         },
9886 @@ -3342,7 +2274,6 @@ static struct caam_aead_alg driver_aeads
9887                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9888                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9889                                            OP_ALG_AAI_HMAC_PRECOMP,
9890 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9891                 },
9892         },
9893         {
9894 @@ -3365,7 +2296,6 @@ static struct caam_aead_alg driver_aeads
9895                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9896                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9897                                            OP_ALG_AAI_HMAC_PRECOMP,
9898 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9899                         .geniv = true,
9900                 },
9901         },
9902 @@ -3388,7 +2318,6 @@ static struct caam_aead_alg driver_aeads
9903                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9904                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9905                                            OP_ALG_AAI_HMAC_PRECOMP,
9906 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9907                 },
9908         },
9909         {
9910 @@ -3411,7 +2340,6 @@ static struct caam_aead_alg driver_aeads
9911                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9912                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9913                                            OP_ALG_AAI_HMAC_PRECOMP,
9914 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9915                         .geniv = true,
9916                 },
9917         },
9918 @@ -3434,7 +2362,6 @@ static struct caam_aead_alg driver_aeads
9919                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9920                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9921                                            OP_ALG_AAI_HMAC_PRECOMP,
9922 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9923                 },
9924         },
9925         {
9926 @@ -3457,7 +2384,6 @@ static struct caam_aead_alg driver_aeads
9927                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9928                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9929                                            OP_ALG_AAI_HMAC_PRECOMP,
9930 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9931                         .geniv = true,
9932                 },
9933         },
9934 @@ -3480,7 +2406,6 @@ static struct caam_aead_alg driver_aeads
9935                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9936                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9937                                            OP_ALG_AAI_HMAC_PRECOMP,
9938 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9939                 },
9940         },
9941         {
9942 @@ -3503,7 +2428,6 @@ static struct caam_aead_alg driver_aeads
9943                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9944                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9945                                            OP_ALG_AAI_HMAC_PRECOMP,
9946 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9947                         .geniv = true,
9948                 },
9949         },
9950 @@ -3526,7 +2450,6 @@ static struct caam_aead_alg driver_aeads
9951                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9952                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9953                                            OP_ALG_AAI_HMAC_PRECOMP,
9954 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9955                 }
9956         },
9957         {
9958 @@ -3549,7 +2472,6 @@ static struct caam_aead_alg driver_aeads
9959                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9960                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9961                                            OP_ALG_AAI_HMAC_PRECOMP,
9962 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9963                         .geniv = true,
9964                 }
9965         },
9966 @@ -3573,7 +2495,6 @@ static struct caam_aead_alg driver_aeads
9967                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9968                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9969                                            OP_ALG_AAI_HMAC_PRECOMP,
9970 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9971                 },
9972         },
9973         {
9974 @@ -3597,7 +2518,6 @@ static struct caam_aead_alg driver_aeads
9975                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9976                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9977                                            OP_ALG_AAI_HMAC_PRECOMP,
9978 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9979                         .geniv = true,
9980                 },
9981         },
9982 @@ -3621,7 +2541,6 @@ static struct caam_aead_alg driver_aeads
9983                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9984                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9985                                            OP_ALG_AAI_HMAC_PRECOMP,
9986 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9987                 },
9988         },
9989         {
9990 @@ -3645,7 +2564,6 @@ static struct caam_aead_alg driver_aeads
9991                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9992                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9993                                            OP_ALG_AAI_HMAC_PRECOMP,
9994 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9995                         .geniv = true,
9996                 },
9997         },
9998 @@ -3669,7 +2587,6 @@ static struct caam_aead_alg driver_aeads
9999                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
10000                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10001                                            OP_ALG_AAI_HMAC_PRECOMP,
10002 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10003                 },
10004         },
10005         {
10006 @@ -3693,7 +2610,6 @@ static struct caam_aead_alg driver_aeads
10007                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
10008                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10009                                            OP_ALG_AAI_HMAC_PRECOMP,
10010 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10011                         .geniv = true,
10012                 },
10013         },
10014 @@ -3717,7 +2633,6 @@ static struct caam_aead_alg driver_aeads
10015                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
10016                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10017                                            OP_ALG_AAI_HMAC_PRECOMP,
10018 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10019                 },
10020         },
10021         {
10022 @@ -3741,7 +2656,6 @@ static struct caam_aead_alg driver_aeads
10023                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
10024                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10025                                            OP_ALG_AAI_HMAC_PRECOMP,
10026 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10027                         .geniv = true,
10028                 },
10029         },
10030 @@ -3765,7 +2679,6 @@ static struct caam_aead_alg driver_aeads
10031                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
10032                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10033                                            OP_ALG_AAI_HMAC_PRECOMP,
10034 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10035                 },
10036         },
10037         {
10038 @@ -3789,7 +2702,6 @@ static struct caam_aead_alg driver_aeads
10039                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
10040                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10041                                            OP_ALG_AAI_HMAC_PRECOMP,
10042 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10043                         .geniv = true,
10044                 },
10045         },
10046 @@ -3812,7 +2724,6 @@ static struct caam_aead_alg driver_aeads
10047                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10048                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
10049                                            OP_ALG_AAI_HMAC_PRECOMP,
10050 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
10051                 },
10052         },
10053         {
10054 @@ -3835,7 +2746,6 @@ static struct caam_aead_alg driver_aeads
10055                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10056                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
10057                                            OP_ALG_AAI_HMAC_PRECOMP,
10058 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
10059                         .geniv = true,
10060                 },
10061         },
10062 @@ -3858,7 +2768,6 @@ static struct caam_aead_alg driver_aeads
10063                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10064                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10065                                            OP_ALG_AAI_HMAC_PRECOMP,
10066 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10067                 },
10068         },
10069         {
10070 @@ -3881,7 +2790,6 @@ static struct caam_aead_alg driver_aeads
10071                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10072                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10073                                            OP_ALG_AAI_HMAC_PRECOMP,
10074 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10075                         .geniv = true,
10076                 },
10077         },
10078 @@ -3904,7 +2812,6 @@ static struct caam_aead_alg driver_aeads
10079                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10080                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10081                                            OP_ALG_AAI_HMAC_PRECOMP,
10082 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10083                 },
10084         },
10085         {
10086 @@ -3927,7 +2834,6 @@ static struct caam_aead_alg driver_aeads
10087                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10088                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10089                                            OP_ALG_AAI_HMAC_PRECOMP,
10090 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10091                         .geniv = true,
10092                 },
10093         },
10094 @@ -3950,7 +2856,6 @@ static struct caam_aead_alg driver_aeads
10095                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10096                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10097                                            OP_ALG_AAI_HMAC_PRECOMP,
10098 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10099                 },
10100         },
10101         {
10102 @@ -3973,7 +2878,6 @@ static struct caam_aead_alg driver_aeads
10103                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10104                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10105                                            OP_ALG_AAI_HMAC_PRECOMP,
10106 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10107                         .geniv = true,
10108                 },
10109         },
10110 @@ -3996,7 +2900,6 @@ static struct caam_aead_alg driver_aeads
10111                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10112                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10113                                            OP_ALG_AAI_HMAC_PRECOMP,
10114 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10115                 },
10116         },
10117         {
10118 @@ -4019,7 +2922,6 @@ static struct caam_aead_alg driver_aeads
10119                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10120                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10121                                            OP_ALG_AAI_HMAC_PRECOMP,
10122 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10123                         .geniv = true,
10124                 },
10125         },
10126 @@ -4042,7 +2944,6 @@ static struct caam_aead_alg driver_aeads
10127                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10128                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10129                                            OP_ALG_AAI_HMAC_PRECOMP,
10130 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10131                 },
10132         },
10133         {
10134 @@ -4065,7 +2966,6 @@ static struct caam_aead_alg driver_aeads
10135                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10136                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10137                                            OP_ALG_AAI_HMAC_PRECOMP,
10138 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10139                         .geniv = true,
10140                 },
10141         },
10142 @@ -4090,7 +2990,6 @@ static struct caam_aead_alg driver_aeads
10143                                            OP_ALG_AAI_CTR_MOD128,
10144                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
10145                                            OP_ALG_AAI_HMAC_PRECOMP,
10146 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
10147                         .rfc3686 = true,
10148                 },
10149         },
10150 @@ -4115,7 +3014,6 @@ static struct caam_aead_alg driver_aeads
10151                                            OP_ALG_AAI_CTR_MOD128,
10152                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
10153                                            OP_ALG_AAI_HMAC_PRECOMP,
10154 -                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
10155                         .rfc3686 = true,
10156                         .geniv = true,
10157                 },
10158 @@ -4141,7 +3039,6 @@ static struct caam_aead_alg driver_aeads
10159                                            OP_ALG_AAI_CTR_MOD128,
10160                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10161                                            OP_ALG_AAI_HMAC_PRECOMP,
10162 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10163                         .rfc3686 = true,
10164                 },
10165         },
10166 @@ -4166,7 +3063,6 @@ static struct caam_aead_alg driver_aeads
10167                                            OP_ALG_AAI_CTR_MOD128,
10168                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10169                                            OP_ALG_AAI_HMAC_PRECOMP,
10170 -                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10171                         .rfc3686 = true,
10172                         .geniv = true,
10173                 },
10174 @@ -4192,7 +3088,6 @@ static struct caam_aead_alg driver_aeads
10175                                            OP_ALG_AAI_CTR_MOD128,
10176                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10177                                            OP_ALG_AAI_HMAC_PRECOMP,
10178 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10179                         .rfc3686 = true,
10180                 },
10181         },
10182 @@ -4217,7 +3112,6 @@ static struct caam_aead_alg driver_aeads
10183                                            OP_ALG_AAI_CTR_MOD128,
10184                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10185                                            OP_ALG_AAI_HMAC_PRECOMP,
10186 -                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10187                         .rfc3686 = true,
10188                         .geniv = true,
10189                 },
10190 @@ -4243,7 +3137,6 @@ static struct caam_aead_alg driver_aeads
10191                                            OP_ALG_AAI_CTR_MOD128,
10192                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10193                                            OP_ALG_AAI_HMAC_PRECOMP,
10194 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10195                         .rfc3686 = true,
10196                 },
10197         },
10198 @@ -4268,7 +3161,6 @@ static struct caam_aead_alg driver_aeads
10199                                            OP_ALG_AAI_CTR_MOD128,
10200                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10201                                            OP_ALG_AAI_HMAC_PRECOMP,
10202 -                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10203                         .rfc3686 = true,
10204                         .geniv = true,
10205                 },
10206 @@ -4294,7 +3186,6 @@ static struct caam_aead_alg driver_aeads
10207                                            OP_ALG_AAI_CTR_MOD128,
10208                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10209                                            OP_ALG_AAI_HMAC_PRECOMP,
10210 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10211                         .rfc3686 = true,
10212                 },
10213         },
10214 @@ -4319,7 +3210,6 @@ static struct caam_aead_alg driver_aeads
10215                                            OP_ALG_AAI_CTR_MOD128,
10216                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10217                                            OP_ALG_AAI_HMAC_PRECOMP,
10218 -                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10219                         .rfc3686 = true,
10220                         .geniv = true,
10221                 },
10222 @@ -4345,7 +3235,6 @@ static struct caam_aead_alg driver_aeads
10223                                            OP_ALG_AAI_CTR_MOD128,
10224                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10225                                            OP_ALG_AAI_HMAC_PRECOMP,
10226 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10227                         .rfc3686 = true,
10228                 },
10229         },
10230 @@ -4370,7 +3259,6 @@ static struct caam_aead_alg driver_aeads
10231                                            OP_ALG_AAI_CTR_MOD128,
10232                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10233                                            OP_ALG_AAI_HMAC_PRECOMP,
10234 -                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10235                         .rfc3686 = true,
10236                         .geniv = true,
10237                 },
10238 @@ -4385,16 +3273,34 @@ struct caam_crypto_alg {
10239  
10240  static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
10241  {
10242 +       dma_addr_t dma_addr;
10243 +
10244         ctx->jrdev = caam_jr_alloc();
10245         if (IS_ERR(ctx->jrdev)) {
10246                 pr_err("Job Ring Device allocation for transform failed\n");
10247                 return PTR_ERR(ctx->jrdev);
10248         }
10249  
10250 +       dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
10251 +                                       offsetof(struct caam_ctx,
10252 +                                                sh_desc_enc_dma),
10253 +                                       DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
10254 +       if (dma_mapping_error(ctx->jrdev, dma_addr)) {
10255 +               dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
10256 +               caam_jr_free(ctx->jrdev);
10257 +               return -ENOMEM;
10258 +       }
10259 +
10260 +       ctx->sh_desc_enc_dma = dma_addr;
10261 +       ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
10262 +                                                  sh_desc_dec);
10263 +       ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
10264 +                                                     sh_desc_givenc);
10265 +       ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
10266 +
10267         /* copy descriptor header template value */
10268 -       ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
10269 -       ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
10270 -       ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
10271 +       ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
10272 +       ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
10273  
10274         return 0;
10275  }
10276 @@ -4421,25 +3327,9 @@ static int caam_aead_init(struct crypto_
10277  
10278  static void caam_exit_common(struct caam_ctx *ctx)
10279  {
10280 -       if (ctx->sh_desc_enc_dma &&
10281 -           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
10282 -               dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
10283 -                                desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
10284 -       if (ctx->sh_desc_dec_dma &&
10285 -           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
10286 -               dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
10287 -                                desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
10288 -       if (ctx->sh_desc_givenc_dma &&
10289 -           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
10290 -               dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
10291 -                                desc_bytes(ctx->sh_desc_givenc),
10292 -                                DMA_TO_DEVICE);
10293 -       if (ctx->key_dma &&
10294 -           !dma_mapping_error(ctx->jrdev, ctx->key_dma))
10295 -               dma_unmap_single(ctx->jrdev, ctx->key_dma,
10296 -                                ctx->enckeylen + ctx->split_key_pad_len,
10297 -                                DMA_TO_DEVICE);
10298 -
10299 +       dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
10300 +                              offsetof(struct caam_ctx, sh_desc_enc_dma),
10301 +                              DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
10302         caam_jr_free(ctx->jrdev);
10303  }
10304  
10305 @@ -4515,7 +3405,6 @@ static struct caam_crypto_alg *caam_alg_
10306  
10307         t_alg->caam.class1_alg_type = template->class1_alg_type;
10308         t_alg->caam.class2_alg_type = template->class2_alg_type;
10309 -       t_alg->caam.alg_op = template->alg_op;
10310  
10311         return t_alg;
10312  }
10313 --- /dev/null
10314 +++ b/drivers/crypto/caam/caamalg_desc.c
10315 @@ -0,0 +1,1961 @@
10316 +/*
10317 + * Shared descriptors for aead, ablkcipher algorithms
10318 + *
10319 + * Copyright 2016 NXP
10320 + */
10321 +
10322 +#include "compat.h"
10323 +#include "desc_constr.h"
10324 +#include "caamalg_desc.h"
10325 +
10326 +/*
10327 + * For aead functions, read payload and write payload,
10328 + * both of which are specified in req->src and req->dst
10329 + */
10330 +static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
10331 +{
10332 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
10333 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
10334 +                            KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
10335 +}
10336 +
10337 +/* Set DK bit in class 1 operation if shared */
10338 +static inline void append_dec_op1(u32 *desc, u32 type)
10339 +{
10340 +       u32 *jump_cmd, *uncond_jump_cmd;
10341 +
10342 +       /* DK bit is valid only for AES */
10343 +       if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
10344 +               append_operation(desc, type | OP_ALG_AS_INITFINAL |
10345 +                                OP_ALG_DECRYPT);
10346 +               return;
10347 +       }
10348 +
10349 +       jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
10350 +       append_operation(desc, type | OP_ALG_AS_INITFINAL |
10351 +                        OP_ALG_DECRYPT);
10352 +       uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
10353 +       set_jump_tgt_here(desc, jump_cmd);
10354 +       append_operation(desc, type | OP_ALG_AS_INITFINAL |
10355 +                        OP_ALG_DECRYPT | OP_ALG_AAI_DK);
10356 +       set_jump_tgt_here(desc, uncond_jump_cmd);
10357 +}
10358 +
10359 +/**
10360 + * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
10361 + *                               (non-protocol) with no (null) encryption.
10362 + * @desc: pointer to buffer used for descriptor construction
10363 + * @adata: pointer to authentication transform definitions.
10364 + *         A split key is required for SEC Era < 6; the size of the split key
10365 + *         is specified in this case. Valid algorithm values - one of
10366 + *         OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
10367 + *         with OP_ALG_AAI_HMAC_PRECOMP.
10368 + * @icvsize: integrity check value (ICV) size (truncated or full)
10369 + * @era: SEC Era
10370 + */
10371 +void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
10372 +                                unsigned int icvsize, int era)
10373 +{
10374 +       u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
10375 +
10376 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
10377 +
10378 +       /* Skip if already shared */
10379 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10380 +                                  JUMP_COND_SHRD);
10381 +       if (era < 6) {
10382 +               if (adata->key_inline)
10383 +                       append_key_as_imm(desc, adata->key_virt,
10384 +                                         adata->keylen_pad, adata->keylen,
10385 +                                         CLASS_2 | KEY_DEST_MDHA_SPLIT |
10386 +                                         KEY_ENC);
10387 +               else
10388 +                       append_key(desc, adata->key_dma, adata->keylen,
10389 +                                  CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
10390 +       } else {
10391 +               append_proto_dkp(desc, adata);
10392 +       }
10393 +       set_jump_tgt_here(desc, key_jump_cmd);
10394 +
10395 +       /* assoclen + cryptlen = seqinlen */
10396 +       append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
10397 +
10398 +       /* Prepare to read and write cryptlen + assoclen bytes */
10399 +       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10400 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10401 +
10402 +       /*
10403 +        * MOVE_LEN opcode is not available in all SEC HW revisions,
10404 +        * thus need to do some magic, i.e. self-patch the descriptor
10405 +        * buffer.
10406 +        */
10407 +       read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
10408 +                                   MOVE_DEST_MATH3 |
10409 +                                   (0x6 << MOVE_LEN_SHIFT));
10410 +       write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
10411 +                                    MOVE_DEST_DESCBUF |
10412 +                                    MOVE_WAITCOMP |
10413 +                                    (0x8 << MOVE_LEN_SHIFT));
10414 +
10415 +       /* Class 2 operation */
10416 +       append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10417 +                        OP_ALG_ENCRYPT);
10418 +
10419 +       /* Read and write cryptlen bytes */
10420 +       aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
10421 +
10422 +       set_move_tgt_here(desc, read_move_cmd);
10423 +       set_move_tgt_here(desc, write_move_cmd);
10424 +       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10425 +       append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
10426 +                   MOVE_AUX_LS);
10427 +
10428 +       /* Write ICV */
10429 +       append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10430 +                        LDST_SRCDST_BYTE_CONTEXT);
10431 +
10432 +#ifdef DEBUG
10433 +       print_hex_dump(KERN_ERR,
10434 +                      "aead null enc shdesc@" __stringify(__LINE__)": ",
10435 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10436 +#endif
10437 +}
10438 +EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
10439 +
10440 +/**
10441 + * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
10442 + *                               (non-protocol) with no (null) decryption.
10443 + * @desc: pointer to buffer used for descriptor construction
10444 + * @adata: pointer to authentication transform definitions.
10445 + *         A split key is required for SEC Era < 6; the size of the split key
10446 + *         is specified in this case. Valid algorithm values - one of
10447 + *         OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
10448 + *         with OP_ALG_AAI_HMAC_PRECOMP.
10449 + * @icvsize: integrity check value (ICV) size (truncated or full)
10450 + * @era: SEC Era
10451 + */
10452 +void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
10453 +                                unsigned int icvsize, int era)
10454 +{
10455 +       u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
10456 +
10457 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
10458 +
10459 +       /* Skip if already shared */
10460 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10461 +                                  JUMP_COND_SHRD);
10462 +       if (era < 6) {
10463 +               if (adata->key_inline)
10464 +                       append_key_as_imm(desc, adata->key_virt,
10465 +                                         adata->keylen_pad, adata->keylen,
10466 +                                         CLASS_2 | KEY_DEST_MDHA_SPLIT |
10467 +                                         KEY_ENC);
10468 +               else
10469 +                       append_key(desc, adata->key_dma, adata->keylen,
10470 +                                  CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
10471 +       } else {
10472 +               append_proto_dkp(desc, adata);
10473 +       }
10474 +       set_jump_tgt_here(desc, key_jump_cmd);
10475 +
10476 +       /* Class 2 operation */
10477 +       append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10478 +                        OP_ALG_DECRYPT | OP_ALG_ICV_ON);
10479 +
10480 +       /* assoclen + cryptlen = seqoutlen */
10481 +       append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10482 +
10483 +       /* Prepare to read and write cryptlen + assoclen bytes */
10484 +       append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
10485 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
10486 +
10487 +       /*
10488 +        * MOVE_LEN opcode is not available in all SEC HW revisions,
10489 +        * thus need to do some magic, i.e. self-patch the descriptor
10490 +        * buffer.
10491 +        */
10492 +       read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
10493 +                                   MOVE_DEST_MATH2 |
10494 +                                   (0x6 << MOVE_LEN_SHIFT));
10495 +       write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
10496 +                                    MOVE_DEST_DESCBUF |
10497 +                                    MOVE_WAITCOMP |
10498 +                                    (0x8 << MOVE_LEN_SHIFT));
10499 +
10500 +       /* Read and write cryptlen bytes */
10501 +       aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
10502 +
10503 +       /*
10504 +        * Insert a NOP here, since we need at least 4 instructions between
10505 +        * code patching the descriptor buffer and the location being patched.
10506 +        */
10507 +       jump_cmd = append_jump(desc, JUMP_TEST_ALL);
10508 +       set_jump_tgt_here(desc, jump_cmd);
10509 +
10510 +       set_move_tgt_here(desc, read_move_cmd);
10511 +       set_move_tgt_here(desc, write_move_cmd);
10512 +       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10513 +       append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
10514 +                   MOVE_AUX_LS);
10515 +       append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
10516 +
10517 +       /* Load ICV */
10518 +       append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
10519 +                            FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
10520 +
10521 +#ifdef DEBUG
10522 +       print_hex_dump(KERN_ERR,
10523 +                      "aead null dec shdesc@" __stringify(__LINE__)": ",
10524 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10525 +#endif
10526 +}
10527 +EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
10528 +
10529 +static void init_sh_desc_key_aead(u32 * const desc,
10530 +                                 struct alginfo * const cdata,
10531 +                                 struct alginfo * const adata,
10532 +                                 const bool is_rfc3686, u32 *nonce, int era)
10533 +{
10534 +       u32 *key_jump_cmd;
10535 +       unsigned int enckeylen = cdata->keylen;
10536 +
10537 +       /* Note: Context registers are saved. */
10538 +       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
10539 +
10540 +       /* Skip if already shared */
10541 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10542 +                                  JUMP_COND_SHRD);
10543 +
10544 +       /*
10545 +        * RFC3686 specific:
10546 +        *      | key = {AUTH_KEY, ENC_KEY, NONCE}
10547 +        *      | enckeylen = encryption key size + nonce size
10548 +        */
10549 +       if (is_rfc3686)
10550 +               enckeylen -= CTR_RFC3686_NONCE_SIZE;
10551 +
10552 +       if (era < 6) {
10553 +               if (adata->key_inline)
10554 +                       append_key_as_imm(desc, adata->key_virt,
10555 +                                         adata->keylen_pad, adata->keylen,
10556 +                                         CLASS_2 | KEY_DEST_MDHA_SPLIT |
10557 +                                         KEY_ENC);
10558 +               else
10559 +                       append_key(desc, adata->key_dma, adata->keylen,
10560 +                                  CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
10561 +       } else {
10562 +               append_proto_dkp(desc, adata);
10563 +       }
10564 +
10565 +       if (cdata->key_inline)
10566 +               append_key_as_imm(desc, cdata->key_virt, enckeylen,
10567 +                                 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
10568 +       else
10569 +               append_key(desc, cdata->key_dma, enckeylen, CLASS_1 |
10570 +                          KEY_DEST_CLASS_REG);
10571 +
10572 +       /* Load Counter into CONTEXT1 reg */
10573 +       if (is_rfc3686) {
10574 +               append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
10575 +                                  LDST_CLASS_IND_CCB |
10576 +                                  LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
10577 +               append_move(desc,
10578 +                           MOVE_SRC_OUTFIFO |
10579 +                           MOVE_DEST_CLASS1CTX |
10580 +                           (16 << MOVE_OFFSET_SHIFT) |
10581 +                           (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
10582 +       }
10583 +
10584 +       set_jump_tgt_here(desc, key_jump_cmd);
10585 +}
10586 +
10587 +/**
10588 + * cnstr_shdsc_aead_encap - IPSec ESP encapsulation shared descriptor
10589 + *                          (non-protocol).
10590 + * @desc: pointer to buffer used for descriptor construction
10591 + * @cdata: pointer to block cipher transform definitions
10592 + *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10593 + *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10594 + * @adata: pointer to authentication transform definitions.
10595 + *         A split key is required for SEC Era < 6; the size of the split key
10596 + *         is specified in this case. Valid algorithm values - one of
10597 + *         OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
10598 + *         with OP_ALG_AAI_HMAC_PRECOMP.
10599 + * @ivsize: initialization vector size
10600 + * @icvsize: integrity check value (ICV) size (truncated or full)
10601 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10602 + * @nonce: pointer to rfc3686 nonce
10603 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10604 + * @is_qi: true when called from caam/qi
10605 + * @era: SEC Era
10606 + */
10607 +void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
10608 +                           struct alginfo *adata, unsigned int ivsize,
10609 +                           unsigned int icvsize, const bool is_rfc3686,
10610 +                           u32 *nonce, const u32 ctx1_iv_off, const bool is_qi,
10611 +                           int era)
10612 +{
10613 +       /* Note: Context registers are saved. */
10614 +       init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
10615 +
10616 +       /* Class 2 operation */
10617 +       append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10618 +                        OP_ALG_ENCRYPT);
10619 +
10620 +       if (is_qi) {
10621 +               u32 *wait_load_cmd;
10622 +
10623 +               /* REG3 = assoclen */
10624 +               append_seq_load(desc, 4, LDST_CLASS_DECO |
10625 +                               LDST_SRCDST_WORD_DECO_MATH3 |
10626 +                               (4 << LDST_OFFSET_SHIFT));
10627 +
10628 +               wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10629 +                                           JUMP_COND_CALM | JUMP_COND_NCP |
10630 +                                           JUMP_COND_NOP | JUMP_COND_NIP |
10631 +                                           JUMP_COND_NIFP);
10632 +               set_jump_tgt_here(desc, wait_load_cmd);
10633 +
10634 +               append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10635 +                               LDST_SRCDST_BYTE_CONTEXT |
10636 +                               (ctx1_iv_off << LDST_OFFSET_SHIFT));
10637 +       }
10638 +
10639 +       /* Read and write assoclen bytes */
10640 +       if (is_qi || era < 3) {
10641 +               append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10642 +               append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10643 +       } else {
10644 +               append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
10645 +               append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
10646 +       }
10647 +
10648 +       /* Skip assoc data */
10649 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10650 +
10651 +       /* read assoc before reading payload */
10652 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10653 +                                     FIFOLDST_VLF);
10654 +
10655 +       /* Load Counter into CONTEXT1 reg */
10656 +       if (is_rfc3686)
10657 +               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10658 +                                    LDST_SRCDST_BYTE_CONTEXT |
10659 +                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10660 +                                     LDST_OFFSET_SHIFT));
10661 +
10662 +       /* Class 1 operation */
10663 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10664 +                        OP_ALG_ENCRYPT);
10665 +
10666 +       /* Read and write cryptlen bytes */
10667 +       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10668 +       append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10669 +       aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
10670 +
10671 +       /* Write ICV */
10672 +       append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10673 +                        LDST_SRCDST_BYTE_CONTEXT);
10674 +
10675 +#ifdef DEBUG
10676 +       print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ",
10677 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10678 +#endif
10679 +}
10680 +EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
10681 +
10682 +/**
10683 + * cnstr_shdsc_aead_decap - IPSec ESP decapsulation shared descriptor
10684 + *                          (non-protocol).
10685 + * @desc: pointer to buffer used for descriptor construction
10686 + * @cdata: pointer to block cipher transform definitions
10687 + *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10688 + *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10689 + * @adata: pointer to authentication transform definitions.
10690 + *         A split key is required for SEC Era < 6; the size of the split key
10691 + *         is specified in this case. Valid algorithm values - one of
10692 + *         OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
10693 + *         with OP_ALG_AAI_HMAC_PRECOMP.
10694 + * @ivsize: initialization vector size
10695 + * @icvsize: integrity check value (ICV) size (truncated or full)
10696 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10697 + * @nonce: pointer to rfc3686 nonce
10698 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10699 + * @is_qi: true when called from caam/qi
10700 + * @era: SEC Era
10701 + */
10702 +void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
10703 +                           struct alginfo *adata, unsigned int ivsize,
10704 +                           unsigned int icvsize, const bool geniv,
10705 +                           const bool is_rfc3686, u32 *nonce,
10706 +                           const u32 ctx1_iv_off, const bool is_qi, int era)
10707 +{
10708 +       /* Note: Context registers are saved. */
10709 +       init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
10710 +
10711 +       /* Class 2 operation */
10712 +       append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10713 +                        OP_ALG_DECRYPT | OP_ALG_ICV_ON);
10714 +
10715 +       if (is_qi) {
10716 +               u32 *wait_load_cmd;
10717 +
10718 +               /* REG3 = assoclen */
10719 +               append_seq_load(desc, 4, LDST_CLASS_DECO |
10720 +                               LDST_SRCDST_WORD_DECO_MATH3 |
10721 +                               (4 << LDST_OFFSET_SHIFT));
10722 +
10723 +               wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10724 +                                           JUMP_COND_CALM | JUMP_COND_NCP |
10725 +                                           JUMP_COND_NOP | JUMP_COND_NIP |
10726 +                                           JUMP_COND_NIFP);
10727 +               set_jump_tgt_here(desc, wait_load_cmd);
10728 +
10729 +               if (!geniv)
10730 +                       append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10731 +                                       LDST_SRCDST_BYTE_CONTEXT |
10732 +                                       (ctx1_iv_off << LDST_OFFSET_SHIFT));
10733 +       }
10734 +
10735 +       /* Read and write assoclen bytes */
10736 +       if (is_qi || era < 3) {
10737 +               append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10738 +               if (geniv)
10739 +                       append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM,
10740 +                                               ivsize);
10741 +               else
10742 +                       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3,
10743 +                                       CAAM_CMD_SZ);
10744 +       } else {
10745 +               append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
10746 +               if (geniv)
10747 +                       append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM,
10748 +                                               ivsize);
10749 +               else
10750 +                       append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD,
10751 +                                       CAAM_CMD_SZ);
10752 +       }
10753 +
10754 +       /* Skip assoc data */
10755 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10756 +
10757 +       /* read assoc before reading payload */
10758 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10759 +                            KEY_VLF);
10760 +
10761 +       if (geniv) {
10762 +               append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10763 +                               LDST_SRCDST_BYTE_CONTEXT |
10764 +                               (ctx1_iv_off << LDST_OFFSET_SHIFT));
10765 +               append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
10766 +                           (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
10767 +       }
10768 +
10769 +       /* Load Counter into CONTEXT1 reg */
10770 +       if (is_rfc3686)
10771 +               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10772 +                                    LDST_SRCDST_BYTE_CONTEXT |
10773 +                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10774 +                                     LDST_OFFSET_SHIFT));
10775 +
10776 +       /* Choose operation */
10777 +       if (ctx1_iv_off)
10778 +               append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10779 +                                OP_ALG_DECRYPT);
10780 +       else
10781 +               append_dec_op1(desc, cdata->algtype);
10782 +
10783 +       /* Read and write cryptlen bytes */
10784 +       append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10785 +       append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10786 +       aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
10787 +
10788 +       /* Load ICV */
10789 +       append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
10790 +                            FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
10791 +
10792 +#ifdef DEBUG
10793 +       print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ",
10794 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10795 +#endif
10796 +}
10797 +EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
10798 +
10799 +/**
10800 + * cnstr_shdsc_aead_givencap - IPSec ESP encapsulation shared descriptor
10801 + *                             (non-protocol) with HW-generated initialization
10802 + *                             vector.
10803 + * @desc: pointer to buffer used for descriptor construction
10804 + * @cdata: pointer to block cipher transform definitions
10805 + *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10806 + *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10807 + * @adata: pointer to authentication transform definitions.
10808 + *         A split key is required for SEC Era < 6; the size of the split key
10809 + *         is specified in this case. Valid algorithm values - one of
10810 + *         OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
10811 + *         with OP_ALG_AAI_HMAC_PRECOMP.
10812 + * @ivsize: initialization vector size
10813 + * @icvsize: integrity check value (ICV) size (truncated or full)
10814 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10815 + * @nonce: pointer to rfc3686 nonce
10816 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10817 + * @is_qi: true when called from caam/qi
10818 + * @era: SEC Era
10819 + */
10820 +void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
10821 +                              struct alginfo *adata, unsigned int ivsize,
10822 +                              unsigned int icvsize, const bool is_rfc3686,
10823 +                              u32 *nonce, const u32 ctx1_iv_off,
10824 +                              const bool is_qi, int era)
10825 +{
10826 +       u32 geniv, moveiv;
10827 +
10828 +       /* Note: Context registers are saved. */
10829 +       init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
10830 +
10831 +       if (is_qi) {
10832 +               u32 *wait_load_cmd;
10833 +
10834 +               /* REG3 = assoclen */
10835 +               append_seq_load(desc, 4, LDST_CLASS_DECO |
10836 +                               LDST_SRCDST_WORD_DECO_MATH3 |
10837 +                               (4 << LDST_OFFSET_SHIFT));
10838 +
10839 +               wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10840 +                                           JUMP_COND_CALM | JUMP_COND_NCP |
10841 +                                           JUMP_COND_NOP | JUMP_COND_NIP |
10842 +                                           JUMP_COND_NIFP);
10843 +               set_jump_tgt_here(desc, wait_load_cmd);
10844 +       }
10845 +
10846 +       if (is_rfc3686) {
10847 +               if (is_qi)
10848 +                       append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10849 +                                       LDST_SRCDST_BYTE_CONTEXT |
10850 +                                       (ctx1_iv_off << LDST_OFFSET_SHIFT));
10851 +
10852 +               goto copy_iv;
10853 +       }
10854 +
10855 +       /* Generate IV */
10856 +       geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
10857 +               NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
10858 +               NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
10859 +       append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
10860 +                           LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
10861 +       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10862 +       append_move(desc, MOVE_WAITCOMP |
10863 +                   MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
10864 +                   (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
10865 +                   (ivsize << MOVE_LEN_SHIFT));
10866 +       append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
10867 +
10868 +copy_iv:
10869 +       /* Copy IV to class 1 context */
10870 +       append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
10871 +                   (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
10872 +                   (ivsize << MOVE_LEN_SHIFT));
10873 +
10874 +       /* Return to encryption */
10875 +       append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10876 +                        OP_ALG_ENCRYPT);
10877 +
10878 +       /* Read and write assoclen bytes */
10879 +       if (is_qi || era < 3) {
10880 +               append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10881 +               append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10882 +       } else {
10883 +               append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
10884 +               append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
10885 +       }
10886 +
10887 +       /* Skip assoc data */
10888 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10889 +
10890 +       /* read assoc before reading payload */
10891 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10892 +                            KEY_VLF);
10893 +
10894 +       /* Copy iv from outfifo to class 2 fifo */
10895 +       moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
10896 +                NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
10897 +       append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
10898 +                           LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
10899 +       append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
10900 +                           LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
10901 +
10902 +       /* Load Counter into CONTEXT1 reg */
10903 +       if (is_rfc3686)
10904 +               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10905 +                                    LDST_SRCDST_BYTE_CONTEXT |
10906 +                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10907 +                                     LDST_OFFSET_SHIFT));
10908 +
10909 +       /* Class 1 operation */
10910 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10911 +                        OP_ALG_ENCRYPT);
10912 +
10913 +       /* Will write ivsize + cryptlen */
10914 +       append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10915 +
10916 +       /* Not need to reload iv */
10917 +       append_seq_fifo_load(desc, ivsize,
10918 +                            FIFOLD_CLASS_SKIP);
10919 +
10920 +       /* Will read cryptlen */
10921 +       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10922 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
10923 +                            FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
10924 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
10925 +
10926 +       /* Write ICV */
10927 +       append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10928 +                        LDST_SRCDST_BYTE_CONTEXT);
10929 +
10930 +#ifdef DEBUG
10931 +       print_hex_dump(KERN_ERR,
10932 +                      "aead givenc shdesc@" __stringify(__LINE__)": ",
10933 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10934 +#endif
10935 +}
10936 +EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
10937 +
10938 +/**
10939 + * cnstr_shdsc_tls_encap - tls encapsulation shared descriptor
10940 + * @desc: pointer to buffer used for descriptor construction
10941 + * @cdata: pointer to block cipher transform definitions
10942 + *         Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
10943 + *         with OP_ALG_AAI_CBC
10944 + * @adata: pointer to authentication transform definitions.
10945 + *         A split key is required for SEC Era < 6; the size of the split key
10946 + *         is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
10947 + *         ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10948 + * @assoclen: associated data length
10949 + * @ivsize: initialization vector size
10950 + * @authsize: authentication data size
10951 + * @blocksize: block cipher size
10952 + * @era: SEC Era
10953 + */
10954 +void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
10955 +                          struct alginfo *adata, unsigned int assoclen,
10956 +                          unsigned int ivsize, unsigned int authsize,
10957 +                          unsigned int blocksize, int era)
10958 +{
10959 +       u32 *key_jump_cmd, *zero_payload_jump_cmd;
10960 +       u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
10961 +
10962 +       /*
10963 +        * Compute the index (in bytes) for the LOAD with destination of
10964 +        * Class 1 Data Size Register and for the LOAD that generates padding
10965 +        */
10966 +       if (adata->key_inline) {
10967 +               idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad +
10968 +                               cdata->keylen - 4 * CAAM_CMD_SZ;
10969 +               idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad +
10970 +                            cdata->keylen - 2 * CAAM_CMD_SZ;
10971 +       } else {
10972 +               idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
10973 +                               4 * CAAM_CMD_SZ;
10974 +               idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
10975 +                            2 * CAAM_CMD_SZ;
10976 +       }
10977 +
10978 +       stidx = 1 << HDR_START_IDX_SHIFT;
10979 +       init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
10980 +
10981 +       /* skip key loading if they are loaded due to sharing */
10982 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10983 +                                  JUMP_COND_SHRD);
10984 +
10985 +       if (era < 6) {
10986 +               if (adata->key_inline)
10987 +                       append_key_as_imm(desc, adata->key_virt,
10988 +                                         adata->keylen_pad, adata->keylen,
10989 +                                         CLASS_2 | KEY_DEST_MDHA_SPLIT |
10990 +                                         KEY_ENC);
10991 +               else
10992 +                       append_key(desc, adata->key_dma, adata->keylen,
10993 +                                  CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
10994 +       } else {
10995 +               append_proto_dkp(desc, adata);
10996 +       }
10997 +
10998 +       if (cdata->key_inline)
10999 +               append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11000 +                                 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11001 +       else
11002 +               append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11003 +                          KEY_DEST_CLASS_REG);
11004 +
11005 +       set_jump_tgt_here(desc, key_jump_cmd);
11006 +
11007 +       /* class 2 operation */
11008 +       append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
11009 +                        OP_ALG_ENCRYPT);
11010 +       /* class 1 operation */
11011 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11012 +                        OP_ALG_ENCRYPT);
11013 +
11014 +       /* payloadlen = input data length - (assoclen + ivlen) */
11015 +       append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize);
11016 +
11017 +       /* math1 = payloadlen + icvlen */
11018 +       append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize);
11019 +
11020 +       /* padlen = block_size - math1 % block_size */
11021 +       append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
11022 +       append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
11023 +
11024 +       /* cryptlen = payloadlen + icvlen + padlen */
11025 +       append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
11026 +
11027 +       /*
11028 +        * update immediate data with the padding length value
11029 +        * for the LOAD in the class 1 data size register.
11030 +        */
11031 +       append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
11032 +                       (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7);
11033 +       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
11034 +                       (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8);
11035 +
11036 +       /* overwrite PL field for the padding iNFO FIFO entry  */
11037 +       append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
11038 +                       (idx_ld_pad << MOVE_OFFSET_SHIFT) | 7);
11039 +       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
11040 +                       (idx_ld_pad << MOVE_OFFSET_SHIFT) | 8);
11041 +
11042 +       /* store encrypted payload, icv and padding */
11043 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
11044 +
11045 +       /* if payload length is zero, jump to zero-payload commands */
11046 +       append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4);
11047 +       zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11048 +                                           JUMP_COND_MATH_Z);
11049 +
11050 +       /* load iv in context1 */
11051 +       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
11052 +                  LDST_CLASS_1_CCB | ivsize);
11053 +
11054 +       /* read assoc for authentication */
11055 +       append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
11056 +                            FIFOLD_TYPE_MSG);
11057 +       /* insnoop payload */
11058 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
11059 +                            FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
11060 +
11061 +       /* jump the zero-payload commands */
11062 +       append_jump(desc, JUMP_TEST_ALL | 3);
11063 +
11064 +       /* zero-payload commands */
11065 +       set_jump_tgt_here(desc, zero_payload_jump_cmd);
11066 +
11067 +       /* load iv in context1 */
11068 +       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
11069 +                  LDST_CLASS_1_CCB | ivsize);
11070 +
11071 +       /* assoc data is the only data for authentication */
11072 +       append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
11073 +                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
11074 +
11075 +       /* send icv to encryption */
11076 +       append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
11077 +                   authsize);
11078 +
11079 +       /* update class 1 data size register with padding length */
11080 +       append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB |
11081 +                           LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
11082 +
11083 +       /* generate padding and send it to encryption */
11084 +       genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
11085 +             NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
11086 +       append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
11087 +                           LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
11088 +
11089 +#ifdef DEBUG
11090 +       print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ",
11091 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
11092 +                      desc_bytes(desc), 1);
11093 +#endif
11094 +}
11095 +EXPORT_SYMBOL(cnstr_shdsc_tls_encap);
11096 +
11097 +/**
11098 + * cnstr_shdsc_tls_decap - tls decapsulation shared descriptor
11099 + * @desc: pointer to buffer used for descriptor construction
11100 + * @cdata: pointer to block cipher transform definitions
11101 + *         Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
11102 + *         with OP_ALG_AAI_CBC
11103 + * @adata: pointer to authentication transform definitions.
11104 + *         A split key is required for SEC Era < 6; the size of the split key
11105 + *         is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
11106 + *         ANDed with OP_ALG_AAI_HMAC_PRECOMP.
11107 + * @assoclen: associated data length
11108 + * @ivsize: initialization vector size
11109 + * @authsize: authentication data size
11110 + * @blocksize: block cipher size
11111 + * @era: SEC Era
11112 + */
11113 +void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
11114 +                          struct alginfo *adata, unsigned int assoclen,
11115 +                          unsigned int ivsize, unsigned int authsize,
11116 +                          unsigned int blocksize, int era)
11117 +{
11118 +       u32 stidx, jumpback;
11119 +       u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
11120 +       /*
11121 +        * Pointer Size bool determines the size of address pointers.
11122 +        * false - Pointers fit in one 32-bit word.
11123 +        * true - Pointers fit in two 32-bit words.
11124 +        */
11125 +       static const bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ);
11126 +
11127 +       stidx = 1 << HDR_START_IDX_SHIFT;
11128 +       init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
11129 +
11130 +       /* skip key loading if they are loaded due to sharing */
11131 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11132 +                                  JUMP_COND_SHRD);
11133 +
11134 +       if (era < 6)
11135 +               append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
11136 +                          KEY_DEST_MDHA_SPLIT | KEY_ENC);
11137 +       else
11138 +               append_proto_dkp(desc, adata);
11139 +
11140 +       append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11141 +                  KEY_DEST_CLASS_REG);
11142 +
11143 +       set_jump_tgt_here(desc, key_jump_cmd);
11144 +
11145 +       /* class 2 operation */
11146 +       append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
11147 +                        OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11148 +       /* class 1 operation */
11149 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11150 +                        OP_ALG_DECRYPT);
11151 +
11152 +       /* VSIL = input data length - 2 * block_size */
11153 +       append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
11154 +                               blocksize);
11155 +
11156 +       /*
11157 +        * payloadlen + icvlen + padlen = input data length - (assoclen +
11158 +        * ivsize)
11159 +        */
11160 +       append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize);
11161 +
11162 +       /* skip data to the last but one cipher block */
11163 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
11164 +
11165 +       /* load iv for the last cipher block */
11166 +       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
11167 +                  LDST_CLASS_1_CCB | ivsize);
11168 +
11169 +       /* read last cipher block */
11170 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
11171 +                            FIFOLD_TYPE_LAST1 | blocksize);
11172 +
11173 +       /* move decrypted block into math0 and math1 */
11174 +       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
11175 +                   blocksize);
11176 +
11177 +       /* reset AES CHA */
11178 +       append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB |
11179 +                           LDST_SRCDST_WORD_CHACTRL | LDST_IMM);
11180 +
11181 +       /* rewind input sequence */
11182 +       append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
11183 +
11184 +       /* key1 is in decryption form */
11185 +       append_operation(desc, cdata->algtype | OP_ALG_AAI_DK |
11186 +                        OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
11187 +
11188 +       /* load iv in context1 */
11189 +       append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
11190 +                  LDST_SRCDST_WORD_CLASS_CTX | ivsize);
11191 +
11192 +       /* read sequence number */
11193 +       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
11194 +       /* load Type, Version and Len fields in math0 */
11195 +       append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
11196 +                  LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5);
11197 +
11198 +       /* compute (padlen - 1) */
11199 +       append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
11200 +
11201 +       /* math2 = icvlen + (padlen - 1) + 1 */
11202 +       append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1);
11203 +
11204 +       append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11205 +
11206 +       /* VSOL = payloadlen + icvlen + padlen */
11207 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
11208 +
11209 +#ifdef __LITTLE_ENDIAN
11210 +       append_moveb(desc, MOVE_WAITCOMP |
11211 +                    MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
11212 +#endif
11213 +       /* update Len field */
11214 +       append_math_sub(desc, REG0, REG0, REG2, 8);
11215 +
11216 +       /* store decrypted payload, icv and padding */
11217 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
11218 +
11219 +       /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
11220 +       append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
11221 +
11222 +       zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11223 +                                           JUMP_COND_MATH_Z);
11224 +
11225 +       /* send Type, Version and Len(pre ICV) fields to authentication */
11226 +       append_move(desc, MOVE_WAITCOMP |
11227 +                   MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
11228 +                   (3 << MOVE_OFFSET_SHIFT) | 5);
11229 +
11230 +       /* outsnooping payload */
11231 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
11232 +                            FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
11233 +                            FIFOLDST_VLF);
11234 +       skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2);
11235 +
11236 +       set_jump_tgt_here(desc, zero_payload_jump_cmd);
11237 +       /* send Type, Version and Len(pre ICV) fields to authentication */
11238 +       append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS |
11239 +                   MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
11240 +                   (3 << MOVE_OFFSET_SHIFT) | 5);
11241 +
11242 +       set_jump_tgt_here(desc, skip_zero_jump_cmd);
11243 +       append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
11244 +
11245 +       /* load icvlen and padlen */
11246 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
11247 +                            FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
11248 +
11249 +       /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
11250 +       append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
11251 +
11252 +       /*
11253 +        * Start a new input sequence using the SEQ OUT PTR command options,
11254 +        * pointer and length used when the current output sequence was defined.
11255 +        */
11256 +       if (ps) {
11257 +               /*
11258 +                * Move the lower 32 bits of Shared Descriptor address, the
11259 +                * SEQ OUT PTR command, Output Pointer (2 words) and
11260 +                * Output Length into math registers.
11261 +                */
11262 +#ifdef __LITTLE_ENDIAN
11263 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11264 +                           MOVE_DEST_MATH0 | (55 * 4 << MOVE_OFFSET_SHIFT) |
11265 +                           20);
11266 +#else
11267 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11268 +                           MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) |
11269 +                           20);
11270 +#endif
11271 +               /* Transform SEQ OUT PTR command in SEQ IN PTR command */
11272 +               append_math_and_imm_u32(desc, REG0, REG0, IMM,
11273 +                                       ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
11274 +               /* Append a JUMP command after the copied fields */
11275 +               jumpback = CMD_JUMP | (char)-9;
11276 +               append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
11277 +                                   LDST_SRCDST_WORD_DECO_MATH2 |
11278 +                                   (4 << LDST_OFFSET_SHIFT));
11279 +               append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11280 +               /* Move the updated fields back to the Job Descriptor */
11281 +#ifdef __LITTLE_ENDIAN
11282 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11283 +                           MOVE_DEST_DESCBUF | (55 * 4 << MOVE_OFFSET_SHIFT) |
11284 +                           24);
11285 +#else
11286 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11287 +                           MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) |
11288 +                           24);
11289 +#endif
11290 +               /*
11291 +                * Read the new SEQ IN PTR command, Input Pointer, Input Length
11292 +                * and then jump back to the next command from the
11293 +                * Shared Descriptor.
11294 +                */
11295 +               append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
11296 +       } else {
11297 +               /*
11298 +                * Move the SEQ OUT PTR command, Output Pointer (1 word) and
11299 +                * Output Length into math registers.
11300 +                */
11301 +#ifdef __LITTLE_ENDIAN
11302 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11303 +                           MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) |
11304 +                           12);
11305 +#else
11306 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11307 +                           MOVE_DEST_MATH0 | (53 * 4 << MOVE_OFFSET_SHIFT) |
11308 +                           12);
11309 +#endif
11310 +               /* Transform SEQ OUT PTR command in SEQ IN PTR command */
11311 +               append_math_and_imm_u64(desc, REG0, REG0, IMM,
11312 +                                       ~(((u64)(CMD_SEQ_IN_PTR ^
11313 +                                                CMD_SEQ_OUT_PTR)) << 32));
11314 +               /* Append a JUMP command after the copied fields */
11315 +               jumpback = CMD_JUMP | (char)-7;
11316 +               append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
11317 +                                   LDST_SRCDST_WORD_DECO_MATH1 |
11318 +                                   (4 << LDST_OFFSET_SHIFT));
11319 +               append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11320 +               /* Move the updated fields back to the Job Descriptor */
11321 +#ifdef __LITTLE_ENDIAN
11322 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11323 +                           MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) |
11324 +                           16);
11325 +#else
11326 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11327 +                           MOVE_DEST_DESCBUF | (53 * 4 << MOVE_OFFSET_SHIFT) |
11328 +                           16);
11329 +#endif
11330 +               /*
11331 +                * Read the new SEQ IN PTR command, Input Pointer, Input Length
11332 +                * and then jump back to the next command from the
11333 +                * Shared Descriptor.
11334 +                */
11335 +                append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5);
11336 +       }
11337 +
11338 +       /* skip payload */
11339 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
11340 +       /* check icv */
11341 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
11342 +                            FIFOLD_TYPE_LAST2 | authsize);
11343 +
11344 +#ifdef DEBUG
11345 +       print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ",
11346 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
11347 +                      desc_bytes(desc), 1);
11348 +#endif
11349 +}
11350 +EXPORT_SYMBOL(cnstr_shdsc_tls_decap);
11351 +
11352 +/**
11353 + * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
11354 + * @desc: pointer to buffer used for descriptor construction
11355 + * @cdata: pointer to block cipher transform definitions
11356 + *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11357 + * @ivsize: initialization vector size
11358 + * @icvsize: integrity check value (ICV) size (truncated or full)
11359 + * @is_qi: true when called from caam/qi
11360 + */
11361 +void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
11362 +                          unsigned int ivsize, unsigned int icvsize,
11363 +                          const bool is_qi)
11364 +{
11365 +       u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
11366 +           *zero_assoc_jump_cmd2;
11367 +
11368 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
11369 +
11370 +       /* skip key loading if they are loaded due to sharing */
11371 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11372 +                                  JUMP_COND_SHRD);
11373 +       if (cdata->key_inline)
11374 +               append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11375 +                                 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11376 +       else
11377 +               append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11378 +                          KEY_DEST_CLASS_REG);
11379 +       set_jump_tgt_here(desc, key_jump_cmd);
11380 +
11381 +       /* class 1 operation */
11382 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11383 +                        OP_ALG_ENCRYPT);
11384 +
11385 +       if (is_qi) {
11386 +               u32 *wait_load_cmd;
11387 +
11388 +               /* REG3 = assoclen */
11389 +               append_seq_load(desc, 4, LDST_CLASS_DECO |
11390 +                               LDST_SRCDST_WORD_DECO_MATH3 |
11391 +                               (4 << LDST_OFFSET_SHIFT));
11392 +
11393 +               wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11394 +                                           JUMP_COND_CALM | JUMP_COND_NCP |
11395 +                                           JUMP_COND_NOP | JUMP_COND_NIP |
11396 +                                           JUMP_COND_NIFP);
11397 +               set_jump_tgt_here(desc, wait_load_cmd);
11398 +
11399 +               append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
11400 +                                       ivsize);
11401 +       } else {
11402 +               append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
11403 +                               CAAM_CMD_SZ);
11404 +       }
11405 +
11406 +       /* if assoclen + cryptlen is ZERO, skip to ICV write */
11407 +       zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
11408 +                                                JUMP_COND_MATH_Z);
11409 +
11410 +       if (is_qi)
11411 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11412 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11413 +
11414 +       /* if assoclen is ZERO, skip reading the assoc data */
11415 +       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
11416 +       zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
11417 +                                          JUMP_COND_MATH_Z);
11418 +
11419 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11420 +
11421 +       /* skip assoc data */
11422 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11423 +
11424 +       /* cryptlen = seqinlen - assoclen */
11425 +       append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
11426 +
11427 +       /* if cryptlen is ZERO jump to zero-payload commands */
11428 +       zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11429 +                                           JUMP_COND_MATH_Z);
11430 +
11431 +       /* read assoc data */
11432 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11433 +                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11434 +       set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
11435 +
11436 +       append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11437 +
11438 +       /* write encrypted data */
11439 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11440 +
11441 +       /* read payload data */
11442 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11443 +                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11444 +
11445 +       /* jump to ICV writing */
11446 +       if (is_qi)
11447 +               append_jump(desc, JUMP_TEST_ALL | 4);
11448 +       else
11449 +               append_jump(desc, JUMP_TEST_ALL | 2);
11450 +
11451 +       /* zero-payload commands */
11452 +       set_jump_tgt_here(desc, zero_payload_jump_cmd);
11453 +
11454 +       /* read assoc data */
11455 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11456 +                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
11457 +       if (is_qi)
11458 +               /* jump to ICV writing */
11459 +               append_jump(desc, JUMP_TEST_ALL | 2);
11460 +
11461 +       /* There is no input data */
11462 +       set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
11463 +
11464 +       if (is_qi)
11465 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11466 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
11467 +                                    FIFOLD_TYPE_LAST1);
11468 +
11469 +       /* write ICV */
11470 +       append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11471 +                        LDST_SRCDST_BYTE_CONTEXT);
11472 +
11473 +#ifdef DEBUG
11474 +       print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ",
11475 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11476 +#endif
11477 +}
11478 +EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
11479 +
11480 +/**
11481 + * cnstr_shdsc_gcm_decap - gcm decapsulation shared descriptor
11482 + * @desc: pointer to buffer used for descriptor construction
11483 + * @cdata: pointer to block cipher transform definitions
11484 + *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11485 + * @ivsize: initialization vector size
11486 + * @icvsize: integrity check value (ICV) size (truncated or full)
11487 + * @is_qi: true when called from caam/qi
11488 + */
11489 +void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
11490 +                          unsigned int ivsize, unsigned int icvsize,
11491 +                          const bool is_qi)
11492 +{
11493 +       u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
11494 +
11495 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
11496 +
11497 +       /* skip key loading if they are loaded due to sharing */
11498 +       key_jump_cmd = append_jump(desc, JUMP_JSL |
11499 +                                  JUMP_TEST_ALL | JUMP_COND_SHRD);
11500 +       if (cdata->key_inline)
11501 +               append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11502 +                                 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11503 +       else
11504 +               append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11505 +                          KEY_DEST_CLASS_REG);
11506 +       set_jump_tgt_here(desc, key_jump_cmd);
11507 +
11508 +       /* class 1 operation */
11509 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11510 +                        OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11511 +
11512 +       if (is_qi) {
11513 +               u32 *wait_load_cmd;
11514 +
11515 +               /* REG3 = assoclen */
11516 +               append_seq_load(desc, 4, LDST_CLASS_DECO |
11517 +                               LDST_SRCDST_WORD_DECO_MATH3 |
11518 +                               (4 << LDST_OFFSET_SHIFT));
11519 +
11520 +               wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11521 +                                           JUMP_COND_CALM | JUMP_COND_NCP |
11522 +                                           JUMP_COND_NOP | JUMP_COND_NIP |
11523 +                                           JUMP_COND_NIFP);
11524 +               set_jump_tgt_here(desc, wait_load_cmd);
11525 +
11526 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11527 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11528 +       }
11529 +
11530 +       /* if assoclen is ZERO, skip reading the assoc data */
11531 +       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
11532 +       zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
11533 +                                                JUMP_COND_MATH_Z);
11534 +
11535 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11536 +
11537 +       /* skip assoc data */
11538 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11539 +
11540 +       /* read assoc data */
11541 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11542 +                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11543 +
11544 +       set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
11545 +
11546 +       /* cryptlen = seqoutlen - assoclen */
11547 +       append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11548 +
11549 +       /* jump to zero-payload command if cryptlen is zero */
11550 +       zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11551 +                                           JUMP_COND_MATH_Z);
11552 +
11553 +       append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11554 +
11555 +       /* store encrypted data */
11556 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11557 +
11558 +       /* read payload data */
11559 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11560 +                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
11561 +
11562 +       /* zero-payload command */
11563 +       set_jump_tgt_here(desc, zero_payload_jump_cmd);
11564 +
11565 +       /* read ICV */
11566 +       append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11567 +                            FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11568 +
11569 +#ifdef DEBUG
11570 +       print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ",
11571 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11572 +#endif
11573 +}
11574 +EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
11575 +
11576 +/**
11577 + * cnstr_shdsc_rfc4106_encap - IPSec ESP gcm encapsulation shared descriptor
11578 + *                             (non-protocol).
11579 + * @desc: pointer to buffer used for descriptor construction
11580 + * @cdata: pointer to block cipher transform definitions
11581 + *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11582 + * @ivsize: initialization vector size
11583 + * @icvsize: integrity check value (ICV) size (truncated or full)
11584 + * @is_qi: true when called from caam/qi
11585 + */
11586 +void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
11587 +                              unsigned int ivsize, unsigned int icvsize,
11588 +                              const bool is_qi)
11589 +{
11590 +       u32 *key_jump_cmd;
11591 +
11592 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
11593 +
11594 +       /* Skip key loading if it is loaded due to sharing */
11595 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11596 +                                  JUMP_COND_SHRD);
11597 +       if (cdata->key_inline)
11598 +               append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11599 +                                 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11600 +       else
11601 +               append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11602 +                          KEY_DEST_CLASS_REG);
11603 +       set_jump_tgt_here(desc, key_jump_cmd);
11604 +
11605 +       /* Class 1 operation */
11606 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11607 +                        OP_ALG_ENCRYPT);
11608 +
11609 +       if (is_qi) {
11610 +               u32 *wait_load_cmd;
11611 +
11612 +               /* REG3 = assoclen */
11613 +               append_seq_load(desc, 4, LDST_CLASS_DECO |
11614 +                               LDST_SRCDST_WORD_DECO_MATH3 |
11615 +                               (4 << LDST_OFFSET_SHIFT));
11616 +
11617 +               wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11618 +                                           JUMP_COND_CALM | JUMP_COND_NCP |
11619 +                                           JUMP_COND_NOP | JUMP_COND_NIP |
11620 +                                           JUMP_COND_NIFP);
11621 +               set_jump_tgt_here(desc, wait_load_cmd);
11622 +
11623 +               /* Read salt and IV */
11624 +               append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11625 +                                       cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11626 +                                       FIFOLD_TYPE_IV);
11627 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11628 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11629 +       }
11630 +
11631 +       append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
11632 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11633 +
11634 +       /* Read assoc data */
11635 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11636 +                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11637 +
11638 +       /* Skip IV */
11639 +       append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
11640 +
11641 +       /* Will read cryptlen bytes */
11642 +       append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11643 +
11644 +       /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
11645 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
11646 +
11647 +       /* Skip assoc data */
11648 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11649 +
11650 +       /* cryptlen = seqoutlen - assoclen */
11651 +       append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
11652 +
11653 +       /* Write encrypted data */
11654 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11655 +
11656 +       /* Read payload data */
11657 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11658 +                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11659 +
11660 +       /* Write ICV */
11661 +       append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11662 +                        LDST_SRCDST_BYTE_CONTEXT);
11663 +
11664 +#ifdef DEBUG
11665 +       print_hex_dump(KERN_ERR,
11666 +                      "rfc4106 enc shdesc@" __stringify(__LINE__)": ",
11667 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11668 +#endif
11669 +}
11670 +EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
11671 +
11672 +/**
11673 + * cnstr_shdsc_rfc4106_decap - IPSec ESP gcm decapsulation shared descriptor
11674 + *                             (non-protocol).
11675 + * @desc: pointer to buffer used for descriptor construction
11676 + * @cdata: pointer to block cipher transform definitions
11677 + *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11678 + * @ivsize: initialization vector size
11679 + * @icvsize: integrity check value (ICV) size (truncated or full)
11680 + * @is_qi: true when called from caam/qi
11681 + */
11682 +void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
11683 +                              unsigned int ivsize, unsigned int icvsize,
11684 +                              const bool is_qi)
11685 +{
11686 +       u32 *key_jump_cmd;
11687 +
11688 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
11689 +
11690 +       /* Skip key loading if it is loaded due to sharing */
11691 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11692 +                                  JUMP_COND_SHRD);
11693 +       if (cdata->key_inline)
11694 +               append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11695 +                                 cdata->keylen, CLASS_1 |
11696 +                                 KEY_DEST_CLASS_REG);
11697 +       else
11698 +               append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11699 +                          KEY_DEST_CLASS_REG);
11700 +       set_jump_tgt_here(desc, key_jump_cmd);
11701 +
11702 +       /* Class 1 operation */
11703 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11704 +                        OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11705 +
11706 +       if (is_qi) {
11707 +               u32 *wait_load_cmd;
11708 +
11709 +               /* REG3 = assoclen */
11710 +               append_seq_load(desc, 4, LDST_CLASS_DECO |
11711 +                               LDST_SRCDST_WORD_DECO_MATH3 |
11712 +                               (4 << LDST_OFFSET_SHIFT));
11713 +
11714 +               wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11715 +                                           JUMP_COND_CALM | JUMP_COND_NCP |
11716 +                                           JUMP_COND_NOP | JUMP_COND_NIP |
11717 +                                           JUMP_COND_NIFP);
11718 +               set_jump_tgt_here(desc, wait_load_cmd);
11719 +
11720 +               /* Read salt and IV */
11721 +               append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11722 +                                       cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11723 +                                       FIFOLD_TYPE_IV);
11724 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11725 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11726 +       }
11727 +
11728 +       append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
11729 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11730 +
11731 +       /* Read assoc data */
11732 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11733 +                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11734 +
11735 +       /* Skip IV */
11736 +       append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
11737 +
11738 +       /* Will read cryptlen bytes */
11739 +       append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
11740 +
11741 +       /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
11742 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
11743 +
11744 +       /* Skip assoc data */
11745 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11746 +
11747 +       /* Will write cryptlen bytes */
11748 +       append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11749 +
11750 +       /* Store payload data */
11751 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11752 +
11753 +       /* Read encrypted data */
11754 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11755 +                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
11756 +
11757 +       /* Read ICV */
11758 +       append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11759 +                            FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11760 +
11761 +#ifdef DEBUG
11762 +       print_hex_dump(KERN_ERR,
11763 +                      "rfc4106 dec shdesc@" __stringify(__LINE__)": ",
11764 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11765 +#endif
11766 +}
11767 +EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
11768 +
11769 +/**
11770 + * cnstr_shdsc_rfc4543_encap - IPSec ESP gmac encapsulation shared descriptor
11771 + *                             (non-protocol).
11772 + * @desc: pointer to buffer used for descriptor construction
11773 + * @cdata: pointer to block cipher transform definitions
11774 + *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11775 + * @ivsize: initialization vector size
11776 + * @icvsize: integrity check value (ICV) size (truncated or full)
11777 + * @is_qi: true when called from caam/qi
11778 + */
11779 +void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
11780 +                              unsigned int ivsize, unsigned int icvsize,
11781 +                              const bool is_qi)
11782 +{
11783 +       u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
11784 +
11785 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
11786 +
11787 +       /* Skip key loading if it is loaded due to sharing */
11788 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11789 +                                  JUMP_COND_SHRD);
11790 +       if (cdata->key_inline)
11791 +               append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11792 +                                 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11793 +       else
11794 +               append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11795 +                          KEY_DEST_CLASS_REG);
11796 +       set_jump_tgt_here(desc, key_jump_cmd);
11797 +
11798 +       /* Class 1 operation */
11799 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11800 +                        OP_ALG_ENCRYPT);
11801 +
11802 +       if (is_qi) {
11803 +               /* assoclen is not needed, skip it */
11804 +               append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
11805 +
11806 +               /* Read salt and IV */
11807 +               append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11808 +                                       cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11809 +                                       FIFOLD_TYPE_IV);
11810 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11811 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11812 +       }
11813 +
11814 +       /* assoclen + cryptlen = seqinlen */
11815 +       append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
11816 +
11817 +       /*
11818 +        * MOVE_LEN opcode is not available in all SEC HW revisions,
11819 +        * thus need to do some magic, i.e. self-patch the descriptor
11820 +        * buffer.
11821 +        */
11822 +       read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
11823 +                                   (0x6 << MOVE_LEN_SHIFT));
11824 +       write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
11825 +                                    (0x8 << MOVE_LEN_SHIFT));
11826 +
11827 +       /* Will read assoclen + cryptlen bytes */
11828 +       append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11829 +
11830 +       /* Will write assoclen + cryptlen bytes */
11831 +       append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11832 +
11833 +       /* Read and write assoclen + cryptlen bytes */
11834 +       aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
11835 +
11836 +       set_move_tgt_here(desc, read_move_cmd);
11837 +       set_move_tgt_here(desc, write_move_cmd);
11838 +       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
11839 +       /* Move payload data to OFIFO */
11840 +       append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
11841 +
11842 +       /* Write ICV */
11843 +       append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11844 +                        LDST_SRCDST_BYTE_CONTEXT);
11845 +
11846 +#ifdef DEBUG
11847 +       print_hex_dump(KERN_ERR,
11848 +                      "rfc4543 enc shdesc@" __stringify(__LINE__)": ",
11849 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11850 +#endif
11851 +}
11852 +EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
11853 +
11854 +/**
11855 + * cnstr_shdsc_rfc4543_decap - IPSec ESP gmac decapsulation shared descriptor
11856 + *                             (non-protocol).
11857 + * @desc: pointer to buffer used for descriptor construction
11858 + * @cdata: pointer to block cipher transform definitions
11859 + *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11860 + * @ivsize: initialization vector size
11861 + * @icvsize: integrity check value (ICV) size (truncated or full)
11862 + * @is_qi: true when called from caam/qi
11863 + */
11864 +void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
11865 +                              unsigned int ivsize, unsigned int icvsize,
11866 +                              const bool is_qi)
11867 +{
11868 +       u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
11869 +
11870 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
11871 +
11872 +       /* Skip key loading if it is loaded due to sharing */
11873 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11874 +                                  JUMP_COND_SHRD);
11875 +       if (cdata->key_inline)
11876 +               append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11877 +                                 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11878 +       else
11879 +               append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11880 +                          KEY_DEST_CLASS_REG);
11881 +       set_jump_tgt_here(desc, key_jump_cmd);
11882 +
11883 +       /* Class 1 operation */
11884 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11885 +                        OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11886 +
11887 +       if (is_qi) {
11888 +               /* assoclen is not needed, skip it */
11889 +               append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
11890 +
11891 +               /* Read salt and IV */
11892 +               append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11893 +                                       cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11894 +                                       FIFOLD_TYPE_IV);
11895 +               append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11896 +                                    FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11897 +       }
11898 +
11899 +       /* assoclen + cryptlen = seqoutlen */
11900 +       append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11901 +
11902 +       /*
11903 +        * MOVE_LEN opcode is not available in all SEC HW revisions,
11904 +        * thus need to do some magic, i.e. self-patch the descriptor
11905 +        * buffer.
11906 +        */
11907 +       read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
11908 +                                   (0x6 << MOVE_LEN_SHIFT));
11909 +       write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
11910 +                                    (0x8 << MOVE_LEN_SHIFT));
11911 +
11912 +       /* Will read assoclen + cryptlen bytes */
11913 +       append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11914 +
11915 +       /* Will write assoclen + cryptlen bytes */
11916 +       append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11917 +
11918 +       /* Store payload data */
11919 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11920 +
11921 +       /* In-snoop assoclen + cryptlen data */
11922 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
11923 +                            FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
11924 +
11925 +       set_move_tgt_here(desc, read_move_cmd);
11926 +       set_move_tgt_here(desc, write_move_cmd);
11927 +       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
11928 +       /* Move payload data to OFIFO */
11929 +       append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
11930 +       append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
11931 +
11932 +       /* Read ICV */
11933 +       append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11934 +                            FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11935 +
11936 +#ifdef DEBUG
11937 +       print_hex_dump(KERN_ERR,
11938 +                      "rfc4543 dec shdesc@" __stringify(__LINE__)": ",
11939 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11940 +#endif
11941 +}
11942 +EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
11943 +
11944 +/*
11945 + * For ablkcipher encrypt and decrypt, read from req->src and
11946 + * write to req->dst
11947 + */
11948 +static inline void ablkcipher_append_src_dst(u32 *desc)
11949 +{
11950 +       append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11951 +       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11952 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
11953 +                            KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11954 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
11955 +}
11956 +
11957 +/**
11958 + * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
11959 + * @desc: pointer to buffer used for descriptor construction
11960 + * @cdata: pointer to block cipher transform definitions
11961 + *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
11962 + *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
11963 + * @ivsize: initialization vector size
11964 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
11965 + * @ctx1_iv_off: IV offset in CONTEXT1 register
11966 + */
11967 +void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
11968 +                                 unsigned int ivsize, const bool is_rfc3686,
11969 +                                 const u32 ctx1_iv_off)
11970 +{
11971 +       u32 *key_jump_cmd;
11972 +
11973 +       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
11974 +       /* Skip if already shared */
11975 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11976 +                                  JUMP_COND_SHRD);
11977 +
11978 +       /* Load class1 key only */
11979 +       append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11980 +                         cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11981 +
11982 +       /* Load nonce into CONTEXT1 reg */
11983 +       if (is_rfc3686) {
11984 +               const u8 *nonce = cdata->key_virt + cdata->keylen;
11985 +
11986 +               append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
11987 +                                  LDST_CLASS_IND_CCB |
11988 +                                  LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
11989 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
11990 +                           MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
11991 +                           (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
11992 +       }
11993 +
11994 +       set_jump_tgt_here(desc, key_jump_cmd);
11995 +
11996 +       /* Load iv */
11997 +       append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
11998 +                       LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
11999 +
12000 +       /* Load counter into CONTEXT1 reg */
12001 +       if (is_rfc3686)
12002 +               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
12003 +                                    LDST_SRCDST_BYTE_CONTEXT |
12004 +                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
12005 +                                     LDST_OFFSET_SHIFT));
12006 +
12007 +       /* Load operation */
12008 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
12009 +                        OP_ALG_ENCRYPT);
12010 +
12011 +       /* Perform operation */
12012 +       ablkcipher_append_src_dst(desc);
12013 +
12014 +#ifdef DEBUG
12015 +       print_hex_dump(KERN_ERR,
12016 +                      "ablkcipher enc shdesc@" __stringify(__LINE__)": ",
12017 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12018 +#endif
12019 +}
12020 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
12021 +
12022 +/**
12023 + * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor
12024 + * @desc: pointer to buffer used for descriptor construction
12025 + * @cdata: pointer to block cipher transform definitions
12026 + *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
12027 + *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
12028 + * @ivsize: initialization vector size
12029 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
12030 + * @ctx1_iv_off: IV offset in CONTEXT1 register
12031 + */
12032 +void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
12033 +                                 unsigned int ivsize, const bool is_rfc3686,
12034 +                                 const u32 ctx1_iv_off)
12035 +{
12036 +       u32 *key_jump_cmd;
12037 +
12038 +       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12039 +       /* Skip if already shared */
12040 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12041 +                                  JUMP_COND_SHRD);
12042 +
12043 +       /* Load class1 key only */
12044 +       append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12045 +                         cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12046 +
12047 +       /* Load nonce into CONTEXT1 reg */
12048 +       if (is_rfc3686) {
12049 +               const u8 *nonce = cdata->key_virt + cdata->keylen;
12050 +
12051 +               append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
12052 +                                  LDST_CLASS_IND_CCB |
12053 +                                  LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
12054 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
12055 +                           MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
12056 +                           (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
12057 +       }
12058 +
12059 +       set_jump_tgt_here(desc, key_jump_cmd);
12060 +
12061 +       /* load IV */
12062 +       append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
12063 +                       LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
12064 +
12065 +       /* Load counter into CONTEXT1 reg */
12066 +       if (is_rfc3686)
12067 +               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
12068 +                                    LDST_SRCDST_BYTE_CONTEXT |
12069 +                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
12070 +                                     LDST_OFFSET_SHIFT));
12071 +
12072 +       /* Choose operation */
12073 +       if (ctx1_iv_off)
12074 +               append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
12075 +                                OP_ALG_DECRYPT);
12076 +       else
12077 +               append_dec_op1(desc, cdata->algtype);
12078 +
12079 +       /* Perform operation */
12080 +       ablkcipher_append_src_dst(desc);
12081 +
12082 +#ifdef DEBUG
12083 +       print_hex_dump(KERN_ERR,
12084 +                      "ablkcipher dec shdesc@" __stringify(__LINE__)": ",
12085 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12086 +#endif
12087 +}
12088 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
12089 +
12090 +/**
12091 + * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
12092 + *                                   with HW-generated initialization vector.
12093 + * @desc: pointer to buffer used for descriptor construction
12094 + * @cdata: pointer to block cipher transform definitions
12095 + *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
12096 + *         with OP_ALG_AAI_CBC.
12097 + * @ivsize: initialization vector size
12098 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
12099 + * @ctx1_iv_off: IV offset in CONTEXT1 register
12100 + */
12101 +void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
12102 +                                    unsigned int ivsize, const bool is_rfc3686,
12103 +                                    const u32 ctx1_iv_off)
12104 +{
12105 +       u32 *key_jump_cmd, geniv;
12106 +
12107 +       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12108 +       /* Skip if already shared */
12109 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12110 +                                  JUMP_COND_SHRD);
12111 +
12112 +       /* Load class1 key only */
12113 +       append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12114 +                         cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12115 +
12116 +       /* Load Nonce into CONTEXT1 reg */
12117 +       if (is_rfc3686) {
12118 +               const u8 *nonce = cdata->key_virt + cdata->keylen;
12119 +
12120 +               append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
12121 +                                  LDST_CLASS_IND_CCB |
12122 +                                  LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
12123 +               append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
12124 +                           MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
12125 +                           (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
12126 +       }
12127 +       set_jump_tgt_here(desc, key_jump_cmd);
12128 +
12129 +       /* Generate IV */
12130 +       geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
12131 +               NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
12132 +               (ivsize << NFIFOENTRY_DLEN_SHIFT);
12133 +       append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
12134 +                           LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
12135 +       append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
12136 +       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
12137 +                   MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
12138 +                   (ctx1_iv_off << MOVE_OFFSET_SHIFT));
12139 +       append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
12140 +
12141 +       /* Copy generated IV to memory */
12142 +       append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
12143 +                        LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
12144 +
12145 +       /* Load Counter into CONTEXT1 reg */
12146 +       if (is_rfc3686)
12147 +               append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
12148 +                                    LDST_SRCDST_BYTE_CONTEXT |
12149 +                                    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
12150 +                                     LDST_OFFSET_SHIFT));
12151 +
12152 +       if (ctx1_iv_off)
12153 +               append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
12154 +                           (1 << JUMP_OFFSET_SHIFT));
12155 +
12156 +       /* Load operation */
12157 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
12158 +                        OP_ALG_ENCRYPT);
12159 +
12160 +       /* Perform operation */
12161 +       ablkcipher_append_src_dst(desc);
12162 +
12163 +#ifdef DEBUG
12164 +       print_hex_dump(KERN_ERR,
12165 +                      "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
12166 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12167 +#endif
12168 +}
12169 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
12170 +
12171 +/**
12172 + * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
12173 + *                                    descriptor
12174 + * @desc: pointer to buffer used for descriptor construction
12175 + * @cdata: pointer to block cipher transform definitions
12176 + *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
12177 + */
12178 +void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
12179 +{
12180 +       __be64 sector_size = cpu_to_be64(512);
12181 +       u32 *key_jump_cmd;
12182 +
12183 +       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12184 +       /* Skip if already shared */
12185 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12186 +                                  JUMP_COND_SHRD);
12187 +
12188 +       /* Load class1 keys only */
12189 +       append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12190 +                         cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12191 +
12192 +       /* Load sector size with index 40 bytes (0x28) */
12193 +       append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
12194 +                          LDST_SRCDST_BYTE_CONTEXT |
12195 +                          (0x28 << LDST_OFFSET_SHIFT));
12196 +
12197 +       set_jump_tgt_here(desc, key_jump_cmd);
12198 +
12199 +       /*
12200 +        * create sequence for loading the sector index
12201 +        * Upper 8B of IV - will be used as sector index
12202 +        * Lower 8B of IV - will be discarded
12203 +        */
12204 +       append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
12205 +                       (0x20 << LDST_OFFSET_SHIFT));
12206 +       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
12207 +
12208 +       /* Load operation */
12209 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
12210 +                        OP_ALG_ENCRYPT);
12211 +
12212 +       /* Perform operation */
12213 +       ablkcipher_append_src_dst(desc);
12214 +
12215 +#ifdef DEBUG
12216 +       print_hex_dump(KERN_ERR,
12217 +                      "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
12218 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12219 +#endif
12220 +}
12221 +EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
12222 +
12223 +/**
12224 + * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared
12225 + *                                    descriptor
12226 + * @desc: pointer to buffer used for descriptor construction
12227 + * @cdata: pointer to block cipher transform definitions
12228 + *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
12229 + */
12230 +void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
12231 +{
12232 +       __be64 sector_size = cpu_to_be64(512);
12233 +       u32 *key_jump_cmd;
12234 +
12235 +       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12236 +       /* Skip if already shared */
12237 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12238 +                                  JUMP_COND_SHRD);
12239 +
12240 +       /* Load class1 key only */
12241 +       append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12242 +                         cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12243 +
12244 +       /* Load sector size with index 40 bytes (0x28) */
12245 +       append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
12246 +                          LDST_SRCDST_BYTE_CONTEXT |
12247 +                          (0x28 << LDST_OFFSET_SHIFT));
12248 +
12249 +       set_jump_tgt_here(desc, key_jump_cmd);
12250 +
12251 +       /*
12252 +        * create sequence for loading the sector index
12253 +        * Upper 8B of IV - will be used as sector index
12254 +        * Lower 8B of IV - will be discarded
12255 +        */
12256 +       append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
12257 +                       (0x20 << LDST_OFFSET_SHIFT));
12258 +       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
12259 +
12260 +       /* Load operation */
12261 +       append_dec_op1(desc, cdata->algtype);
12262 +
12263 +       /* Perform operation */
12264 +       ablkcipher_append_src_dst(desc);
12265 +
12266 +#ifdef DEBUG
12267 +       print_hex_dump(KERN_ERR,
12268 +                      "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
12269 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12270 +#endif
12271 +}
12272 +EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap);
12273 +
12274 +MODULE_LICENSE("GPL");
12275 +MODULE_DESCRIPTION("FSL CAAM descriptor support");
12276 +MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
12277 --- /dev/null
12278 +++ b/drivers/crypto/caam/caamalg_desc.h
12279 @@ -0,0 +1,127 @@
12280 +/*
12281 + * Shared descriptors for aead, ablkcipher algorithms
12282 + *
12283 + * Copyright 2016 NXP
12284 + */
12285 +
12286 +#ifndef _CAAMALG_DESC_H_
12287 +#define _CAAMALG_DESC_H_
12288 +
12289 +/* length of descriptors text */
12290 +#define DESC_AEAD_BASE                 (4 * CAAM_CMD_SZ)
12291 +#define DESC_AEAD_ENC_LEN              (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
12292 +#define DESC_AEAD_DEC_LEN              (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
12293 +#define DESC_AEAD_GIVENC_LEN           (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
12294 +#define DESC_QI_AEAD_ENC_LEN           (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
12295 +#define DESC_QI_AEAD_DEC_LEN           (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
12296 +#define DESC_QI_AEAD_GIVENC_LEN                (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
12297 +
12298 +#define DESC_TLS_BASE                  (4 * CAAM_CMD_SZ)
12299 +#define DESC_TLS10_ENC_LEN             (DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
12300 +
12301 +/* Note: Nonce is counted in cdata.keylen */
12302 +#define DESC_AEAD_CTR_RFC3686_LEN      (4 * CAAM_CMD_SZ)
12303 +
12304 +#define DESC_AEAD_NULL_BASE            (3 * CAAM_CMD_SZ)
12305 +#define DESC_AEAD_NULL_ENC_LEN         (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
12306 +#define DESC_AEAD_NULL_DEC_LEN         (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
12307 +
12308 +#define DESC_GCM_BASE                  (3 * CAAM_CMD_SZ)
12309 +#define DESC_GCM_ENC_LEN               (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
12310 +#define DESC_GCM_DEC_LEN               (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
12311 +#define DESC_QI_GCM_ENC_LEN            (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
12312 +#define DESC_QI_GCM_DEC_LEN            (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
12313 +
12314 +#define DESC_RFC4106_BASE              (3 * CAAM_CMD_SZ)
12315 +#define DESC_RFC4106_ENC_LEN           (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
12316 +#define DESC_RFC4106_DEC_LEN           (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
12317 +#define DESC_QI_RFC4106_ENC_LEN                (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
12318 +#define DESC_QI_RFC4106_DEC_LEN                (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
12319 +
12320 +#define DESC_RFC4543_BASE              (3 * CAAM_CMD_SZ)
12321 +#define DESC_RFC4543_ENC_LEN           (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
12322 +#define DESC_RFC4543_DEC_LEN           (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
12323 +#define DESC_QI_RFC4543_ENC_LEN                (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
12324 +#define DESC_QI_RFC4543_DEC_LEN                (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
12325 +
12326 +#define DESC_ABLKCIPHER_BASE           (3 * CAAM_CMD_SZ)
12327 +#define DESC_ABLKCIPHER_ENC_LEN                (DESC_ABLKCIPHER_BASE + \
12328 +                                        20 * CAAM_CMD_SZ)
12329 +#define DESC_ABLKCIPHER_DEC_LEN                (DESC_ABLKCIPHER_BASE + \
12330 +                                        15 * CAAM_CMD_SZ)
12331 +
12332 +void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
12333 +                                unsigned int icvsize, int era);
12334 +
12335 +void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
12336 +                                unsigned int icvsize, int era);
12337 +
12338 +void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
12339 +                           struct alginfo *adata, unsigned int ivsize,
12340 +                           unsigned int icvsize, const bool is_rfc3686,
12341 +                           u32 *nonce, const u32 ctx1_iv_off,
12342 +                           const bool is_qi, int era);
12343 +
12344 +void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
12345 +                           struct alginfo *adata, unsigned int ivsize,
12346 +                           unsigned int icvsize, const bool geniv,
12347 +                           const bool is_rfc3686, u32 *nonce,
12348 +                           const u32 ctx1_iv_off, const bool is_qi, int era);
12349 +
12350 +void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
12351 +                              struct alginfo *adata, unsigned int ivsize,
12352 +                              unsigned int icvsize, const bool is_rfc3686,
12353 +                              u32 *nonce, const u32 ctx1_iv_off,
12354 +                              const bool is_qi, int era);
12355 +
12356 +void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
12357 +                          struct alginfo *adata, unsigned int assoclen,
12358 +                          unsigned int ivsize, unsigned int authsize,
12359 +                          unsigned int blocksize, int era);
12360 +
12361 +void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
12362 +                          struct alginfo *adata, unsigned int assoclen,
12363 +                          unsigned int ivsize, unsigned int authsize,
12364 +                          unsigned int blocksize, int era);
12365 +
12366 +void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
12367 +                          unsigned int ivsize, unsigned int icvsize,
12368 +                          const bool is_qi);
12369 +
12370 +void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
12371 +                          unsigned int ivsize, unsigned int icvsize,
12372 +                          const bool is_qi);
12373 +
12374 +void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
12375 +                              unsigned int ivsize, unsigned int icvsize,
12376 +                              const bool is_qi);
12377 +
12378 +void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
12379 +                              unsigned int ivsize, unsigned int icvsize,
12380 +                              const bool is_qi);
12381 +
12382 +void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
12383 +                              unsigned int ivsize, unsigned int icvsize,
12384 +                              const bool is_qi);
12385 +
12386 +void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
12387 +                              unsigned int ivsize, unsigned int icvsize,
12388 +                              const bool is_qi);
12389 +
12390 +void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
12391 +                                 unsigned int ivsize, const bool is_rfc3686,
12392 +                                 const u32 ctx1_iv_off);
12393 +
12394 +void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
12395 +                                 unsigned int ivsize, const bool is_rfc3686,
12396 +                                 const u32 ctx1_iv_off);
12397 +
12398 +void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
12399 +                                    unsigned int ivsize, const bool is_rfc3686,
12400 +                                    const u32 ctx1_iv_off);
12401 +
12402 +void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
12403 +
12404 +void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
12405 +
12406 +#endif /* _CAAMALG_DESC_H_ */
12407 --- /dev/null
12408 +++ b/drivers/crypto/caam/caamalg_qi.c
12409 @@ -0,0 +1,2929 @@
12410 +/*
12411 + * Freescale FSL CAAM support for crypto API over QI backend.
12412 + * Based on caamalg.c
12413 + *
12414 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
12415 + * Copyright 2016-2017 NXP
12416 + */
12417 +
12418 +#include "compat.h"
12419 +#include "ctrl.h"
12420 +#include "regs.h"
12421 +#include "intern.h"
12422 +#include "desc_constr.h"
12423 +#include "error.h"
12424 +#include "sg_sw_qm.h"
12425 +#include "key_gen.h"
12426 +#include "qi.h"
12427 +#include "jr.h"
12428 +#include "caamalg_desc.h"
12429 +
12430 +/*
12431 + * crypto alg
12432 + */
12433 +#define CAAM_CRA_PRIORITY              2000
12434 +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
12435 +#define CAAM_MAX_KEY_SIZE              (AES_MAX_KEY_SIZE + \
12436 +                                        SHA512_DIGEST_SIZE * 2)
12437 +
12438 +#define DESC_MAX_USED_BYTES            (DESC_QI_AEAD_GIVENC_LEN + \
12439 +                                        CAAM_MAX_KEY_SIZE)
12440 +#define DESC_MAX_USED_LEN              (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
12441 +
12442 +struct caam_alg_entry {
12443 +       int class1_alg_type;
12444 +       int class2_alg_type;
12445 +       bool rfc3686;
12446 +       bool geniv;
12447 +};
12448 +
12449 +struct caam_aead_alg {
12450 +       struct aead_alg aead;
12451 +       struct caam_alg_entry caam;
12452 +       bool registered;
12453 +};
12454 +
12455 +/*
12456 + * per-session context
12457 + */
12458 +struct caam_ctx {
12459 +       struct device *jrdev;
12460 +       u32 sh_desc_enc[DESC_MAX_USED_LEN];
12461 +       u32 sh_desc_dec[DESC_MAX_USED_LEN];
12462 +       u32 sh_desc_givenc[DESC_MAX_USED_LEN];
12463 +       u8 key[CAAM_MAX_KEY_SIZE];
12464 +       dma_addr_t key_dma;
12465 +       struct alginfo adata;
12466 +       struct alginfo cdata;
12467 +       unsigned int authsize;
12468 +       struct device *qidev;
12469 +       spinlock_t lock;        /* Protects multiple init of driver context */
12470 +       struct caam_drv_ctx *drv_ctx[NUM_OP];
12471 +};
12472 +
12473 +static int aead_set_sh_desc(struct crypto_aead *aead)
12474 +{
12475 +       struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
12476 +                                                typeof(*alg), aead);
12477 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
12478 +       unsigned int ivsize = crypto_aead_ivsize(aead);
12479 +       u32 ctx1_iv_off = 0;
12480 +       u32 *nonce = NULL;
12481 +       unsigned int data_len[2];
12482 +       u32 inl_mask;
12483 +       const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
12484 +                              OP_ALG_AAI_CTR_MOD128);
12485 +       const bool is_rfc3686 = alg->caam.rfc3686;
12486 +       struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
12487 +
12488 +       if (!ctx->cdata.keylen || !ctx->authsize)
12489 +               return 0;
12490 +
12491 +       /*
12492 +        * AES-CTR needs to load IV in CONTEXT1 reg
12493 +        * at an offset of 128bits (16bytes)
12494 +        * CONTEXT1[255:128] = IV
12495 +        */
12496 +       if (ctr_mode)
12497 +               ctx1_iv_off = 16;
12498 +
12499 +       /*
12500 +        * RFC3686 specific:
12501 +        *      CONTEXT1[255:128] = {NONCE, IV, COUNTER}
12502 +        */
12503 +       if (is_rfc3686) {
12504 +               ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
12505 +               nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
12506 +                               ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
12507 +       }
12508 +
12509 +       data_len[0] = ctx->adata.keylen_pad;
12510 +       data_len[1] = ctx->cdata.keylen;
12511 +
12512 +       if (alg->caam.geniv)
12513 +               goto skip_enc;
12514 +
12515 +       /* aead_encrypt shared descriptor */
12516 +       if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
12517 +                             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12518 +                             DESC_JOB_IO_LEN, data_len, &inl_mask,
12519 +                             ARRAY_SIZE(data_len)) < 0)
12520 +               return -EINVAL;
12521 +
12522 +       if (inl_mask & 1)
12523 +               ctx->adata.key_virt = ctx->key;
12524 +       else
12525 +               ctx->adata.key_dma = ctx->key_dma;
12526 +
12527 +       if (inl_mask & 2)
12528 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12529 +       else
12530 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12531 +
12532 +       ctx->adata.key_inline = !!(inl_mask & 1);
12533 +       ctx->cdata.key_inline = !!(inl_mask & 2);
12534 +
12535 +       cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12536 +                              ivsize, ctx->authsize, is_rfc3686, nonce,
12537 +                              ctx1_iv_off, true, ctrlpriv->era);
12538 +
12539 +skip_enc:
12540 +       /* aead_decrypt shared descriptor */
12541 +       if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
12542 +                             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12543 +                             DESC_JOB_IO_LEN, data_len, &inl_mask,
12544 +                             ARRAY_SIZE(data_len)) < 0)
12545 +               return -EINVAL;
12546 +
12547 +       if (inl_mask & 1)
12548 +               ctx->adata.key_virt = ctx->key;
12549 +       else
12550 +               ctx->adata.key_dma = ctx->key_dma;
12551 +
12552 +       if (inl_mask & 2)
12553 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12554 +       else
12555 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12556 +
12557 +       ctx->adata.key_inline = !!(inl_mask & 1);
12558 +       ctx->cdata.key_inline = !!(inl_mask & 2);
12559 +
12560 +       cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
12561 +                              ivsize, ctx->authsize, alg->caam.geniv,
12562 +                              is_rfc3686, nonce, ctx1_iv_off, true,
12563 +                              ctrlpriv->era);
12564 +
12565 +       if (!alg->caam.geniv)
12566 +               goto skip_givenc;
12567 +
12568 +       /* aead_givencrypt shared descriptor */
12569 +       if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
12570 +                             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12571 +                             DESC_JOB_IO_LEN, data_len, &inl_mask,
12572 +                             ARRAY_SIZE(data_len)) < 0)
12573 +               return -EINVAL;
12574 +
12575 +       if (inl_mask & 1)
12576 +               ctx->adata.key_virt = ctx->key;
12577 +       else
12578 +               ctx->adata.key_dma = ctx->key_dma;
12579 +
12580 +       if (inl_mask & 2)
12581 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12582 +       else
12583 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12584 +
12585 +       ctx->adata.key_inline = !!(inl_mask & 1);
12586 +       ctx->cdata.key_inline = !!(inl_mask & 2);
12587 +
12588 +       cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12589 +                                 ivsize, ctx->authsize, is_rfc3686, nonce,
12590 +                                 ctx1_iv_off, true, ctrlpriv->era);
12591 +
12592 +skip_givenc:
12593 +       return 0;
12594 +}
12595 +
12596 +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
12597 +{
12598 +       struct caam_ctx *ctx = crypto_aead_ctx(authenc);
12599 +
12600 +       ctx->authsize = authsize;
12601 +       aead_set_sh_desc(authenc);
12602 +
12603 +       return 0;
12604 +}
12605 +
12606 +static int aead_setkey(struct crypto_aead *aead, const u8 *key,
12607 +                      unsigned int keylen)
12608 +{
12609 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
12610 +       struct device *jrdev = ctx->jrdev;
12611 +       struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
12612 +       struct crypto_authenc_keys keys;
12613 +       int ret = 0;
12614 +
12615 +       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
12616 +               goto badkey;
12617 +
12618 +#ifdef DEBUG
12619 +       dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
12620 +               keys.authkeylen + keys.enckeylen, keys.enckeylen,
12621 +               keys.authkeylen);
12622 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12623 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12624 +#endif
12625 +
12626 +       /*
12627 +        * If DKP is supported, use it in the shared descriptor to generate
12628 +        * the split key.
12629 +        */
12630 +       if (ctrlpriv->era >= 6) {
12631 +               ctx->adata.keylen = keys.authkeylen;
12632 +               ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
12633 +                                                     OP_ALG_ALGSEL_MASK);
12634 +
12635 +               if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
12636 +                       goto badkey;
12637 +
12638 +               memcpy(ctx->key, keys.authkey, keys.authkeylen);
12639 +               memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
12640 +                      keys.enckeylen);
12641 +               dma_sync_single_for_device(jrdev, ctx->key_dma,
12642 +                                          ctx->adata.keylen_pad +
12643 +                                          keys.enckeylen, DMA_TO_DEVICE);
12644 +               goto skip_split_key;
12645 +       }
12646 +
12647 +       ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
12648 +                           keys.authkeylen, CAAM_MAX_KEY_SIZE -
12649 +                           keys.enckeylen);
12650 +       if (ret)
12651 +               goto badkey;
12652 +
12653 +       /* postpend encryption key to auth split key */
12654 +       memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
12655 +       dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
12656 +                                  keys.enckeylen, DMA_TO_DEVICE);
12657 +#ifdef DEBUG
12658 +       print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
12659 +                      DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
12660 +                      ctx->adata.keylen_pad + keys.enckeylen, 1);
12661 +#endif
12662 +
12663 +skip_split_key:
12664 +       ctx->cdata.keylen = keys.enckeylen;
12665 +
12666 +       ret = aead_set_sh_desc(aead);
12667 +       if (ret)
12668 +               goto badkey;
12669 +
12670 +       /* Now update the driver contexts with the new shared descriptor */
12671 +       if (ctx->drv_ctx[ENCRYPT]) {
12672 +               ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12673 +                                         ctx->sh_desc_enc);
12674 +               if (ret) {
12675 +                       dev_err(jrdev, "driver enc context update failed\n");
12676 +                       goto badkey;
12677 +               }
12678 +       }
12679 +
12680 +       if (ctx->drv_ctx[DECRYPT]) {
12681 +               ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12682 +                                         ctx->sh_desc_dec);
12683 +               if (ret) {
12684 +                       dev_err(jrdev, "driver dec context update failed\n");
12685 +                       goto badkey;
12686 +               }
12687 +       }
12688 +
12689 +       return ret;
12690 +badkey:
12691 +       crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
12692 +       return -EINVAL;
12693 +}
12694 +
12695 +static int tls_set_sh_desc(struct crypto_aead *tls)
12696 +{
12697 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
12698 +       unsigned int ivsize = crypto_aead_ivsize(tls);
12699 +       unsigned int blocksize = crypto_aead_blocksize(tls);
12700 +       unsigned int assoclen = 13; /* always 13 bytes for TLS */
12701 +       unsigned int data_len[2];
12702 +       u32 inl_mask;
12703 +       struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
12704 +
12705 +       if (!ctx->cdata.keylen || !ctx->authsize)
12706 +               return 0;
12707 +
12708 +       /*
12709 +        * TLS 1.0 encrypt shared descriptor
12710 +        * Job Descriptor and Shared Descriptor
12711 +        * must fit into the 64-word Descriptor h/w Buffer
12712 +        */
12713 +       data_len[0] = ctx->adata.keylen_pad;
12714 +       data_len[1] = ctx->cdata.keylen;
12715 +
12716 +       if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
12717 +                             &inl_mask, ARRAY_SIZE(data_len)) < 0)
12718 +               return -EINVAL;
12719 +
12720 +       if (inl_mask & 1)
12721 +               ctx->adata.key_virt = ctx->key;
12722 +       else
12723 +               ctx->adata.key_dma = ctx->key_dma;
12724 +
12725 +       if (inl_mask & 2)
12726 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12727 +       else
12728 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12729 +
12730 +       ctx->adata.key_inline = !!(inl_mask & 1);
12731 +       ctx->cdata.key_inline = !!(inl_mask & 2);
12732 +
12733 +       cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12734 +                             assoclen, ivsize, ctx->authsize, blocksize,
12735 +                             ctrlpriv->era);
12736 +
12737 +       /*
12738 +        * TLS 1.0 decrypt shared descriptor
12739 +        * Keys do not fit inline, regardless of algorithms used
12740 +        */
12741 +       ctx->adata.key_inline = false;
12742 +       ctx->adata.key_dma = ctx->key_dma;
12743 +       ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12744 +
12745 +       cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
12746 +                             assoclen, ivsize, ctx->authsize, blocksize,
12747 +                             ctrlpriv->era);
12748 +
12749 +       return 0;
12750 +}
12751 +
12752 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
12753 +{
12754 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
12755 +
12756 +       ctx->authsize = authsize;
12757 +       tls_set_sh_desc(tls);
12758 +
12759 +       return 0;
12760 +}
12761 +
12762 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
12763 +                     unsigned int keylen)
12764 +{
12765 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
12766 +       struct device *jrdev = ctx->jrdev;
12767 +       struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
12768 +       struct crypto_authenc_keys keys;
12769 +       int ret = 0;
12770 +
12771 +       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
12772 +               goto badkey;
12773 +
12774 +#ifdef DEBUG
12775 +       dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
12776 +               keys.authkeylen + keys.enckeylen, keys.enckeylen,
12777 +               keys.authkeylen);
12778 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12779 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12780 +#endif
12781 +
12782 +       /*
12783 +        * If DKP is supported, use it in the shared descriptor to generate
12784 +        * the split key.
12785 +        */
12786 +       if (ctrlpriv->era >= 6) {
12787 +               ctx->adata.keylen = keys.authkeylen;
12788 +               ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
12789 +                                                     OP_ALG_ALGSEL_MASK);
12790 +
12791 +               if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
12792 +                       goto badkey;
12793 +
12794 +               memcpy(ctx->key, keys.authkey, keys.authkeylen);
12795 +               memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
12796 +                      keys.enckeylen);
12797 +               dma_sync_single_for_device(jrdev, ctx->key_dma,
12798 +                                          ctx->adata.keylen_pad +
12799 +                                          keys.enckeylen, DMA_TO_DEVICE);
12800 +               goto skip_split_key;
12801 +       }
12802 +
12803 +       ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
12804 +                           keys.authkeylen, CAAM_MAX_KEY_SIZE -
12805 +                           keys.enckeylen);
12806 +       if (ret)
12807 +               goto badkey;
12808 +
12809 +       /* postpend encryption key to auth split key */
12810 +       memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
12811 +       dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
12812 +                                  keys.enckeylen, DMA_TO_DEVICE);
12813 +
12814 +#ifdef DEBUG
12815 +       dev_err(jrdev, "split keylen %d split keylen padded %d\n",
12816 +               ctx->adata.keylen, ctx->adata.keylen_pad);
12817 +       print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
12818 +                      DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
12819 +                      ctx->adata.keylen_pad + keys.enckeylen, 1);
12820 +#endif
12821 +
12822 +skip_split_key:
12823 +       ctx->cdata.keylen = keys.enckeylen;
12824 +
12825 +       ret = tls_set_sh_desc(tls);
12826 +       if (ret)
12827 +               goto badkey;
12828 +
12829 +       /* Now update the driver contexts with the new shared descriptor */
12830 +       if (ctx->drv_ctx[ENCRYPT]) {
12831 +               ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12832 +                                         ctx->sh_desc_enc);
12833 +               if (ret) {
12834 +                       dev_err(jrdev, "driver enc context update failed\n");
12835 +                       goto badkey;
12836 +               }
12837 +       }
12838 +
12839 +       if (ctx->drv_ctx[DECRYPT]) {
12840 +               ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12841 +                                         ctx->sh_desc_dec);
12842 +               if (ret) {
12843 +                       dev_err(jrdev, "driver dec context update failed\n");
12844 +                       goto badkey;
12845 +               }
12846 +       }
12847 +
12848 +       return ret;
12849 +badkey:
12850 +       crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
12851 +       return -EINVAL;
12852 +}
12853 +
12854 +static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
12855 +                            const u8 *key, unsigned int keylen)
12856 +{
12857 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
12858 +       struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
12859 +       const char *alg_name = crypto_tfm_alg_name(tfm);
12860 +       struct device *jrdev = ctx->jrdev;
12861 +       unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
12862 +       u32 ctx1_iv_off = 0;
12863 +       const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
12864 +                              OP_ALG_AAI_CTR_MOD128);
12865 +       const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
12866 +       int ret = 0;
12867 +
12868 +       memcpy(ctx->key, key, keylen);
12869 +#ifdef DEBUG
12870 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12871 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12872 +#endif
12873 +       /*
12874 +        * AES-CTR needs to load IV in CONTEXT1 reg
12875 +        * at an offset of 128bits (16bytes)
12876 +        * CONTEXT1[255:128] = IV
12877 +        */
12878 +       if (ctr_mode)
12879 +               ctx1_iv_off = 16;
12880 +
12881 +       /*
12882 +        * RFC3686 specific:
12883 +        *      | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
12884 +        *      | *key = {KEY, NONCE}
12885 +        */
12886 +       if (is_rfc3686) {
12887 +               ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
12888 +               keylen -= CTR_RFC3686_NONCE_SIZE;
12889 +       }
12890 +
12891 +       dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
12892 +       ctx->cdata.keylen = keylen;
12893 +       ctx->cdata.key_virt = ctx->key;
12894 +       ctx->cdata.key_inline = true;
12895 +
12896 +       /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
12897 +       cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
12898 +                                    is_rfc3686, ctx1_iv_off);
12899 +       cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
12900 +                                    is_rfc3686, ctx1_iv_off);
12901 +       cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
12902 +                                       ivsize, is_rfc3686, ctx1_iv_off);
12903 +
12904 +       /* Now update the driver contexts with the new shared descriptor */
12905 +       if (ctx->drv_ctx[ENCRYPT]) {
12906 +               ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12907 +                                         ctx->sh_desc_enc);
12908 +               if (ret) {
12909 +                       dev_err(jrdev, "driver enc context update failed\n");
12910 +                       goto badkey;
12911 +               }
12912 +       }
12913 +
12914 +       if (ctx->drv_ctx[DECRYPT]) {
12915 +               ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12916 +                                         ctx->sh_desc_dec);
12917 +               if (ret) {
12918 +                       dev_err(jrdev, "driver dec context update failed\n");
12919 +                       goto badkey;
12920 +               }
12921 +       }
12922 +
12923 +       if (ctx->drv_ctx[GIVENCRYPT]) {
12924 +               ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
12925 +                                         ctx->sh_desc_givenc);
12926 +               if (ret) {
12927 +                       dev_err(jrdev, "driver givenc context update failed\n");
12928 +                       goto badkey;
12929 +               }
12930 +       }
12931 +
12932 +       return ret;
12933 +badkey:
12934 +       crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
12935 +       return -EINVAL;
12936 +}
12937 +
12938 +static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
12939 +                                const u8 *key, unsigned int keylen)
12940 +{
12941 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
12942 +       struct device *jrdev = ctx->jrdev;
12943 +       int ret = 0;
12944 +
12945 +       if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
12946 +               crypto_ablkcipher_set_flags(ablkcipher,
12947 +                                           CRYPTO_TFM_RES_BAD_KEY_LEN);
12948 +               dev_err(jrdev, "key size mismatch\n");
12949 +               return -EINVAL;
12950 +       }
12951 +
12952 +       memcpy(ctx->key, key, keylen);
12953 +       dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
12954 +       ctx->cdata.keylen = keylen;
12955 +       ctx->cdata.key_virt = ctx->key;
12956 +       ctx->cdata.key_inline = true;
12957 +
12958 +       /* xts ablkcipher encrypt, decrypt shared descriptors */
12959 +       cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
12960 +       cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
12961 +
12962 +       /* Now update the driver contexts with the new shared descriptor */
12963 +       if (ctx->drv_ctx[ENCRYPT]) {
12964 +               ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12965 +                                         ctx->sh_desc_enc);
12966 +               if (ret) {
12967 +                       dev_err(jrdev, "driver enc context update failed\n");
12968 +                       goto badkey;
12969 +               }
12970 +       }
12971 +
12972 +       if (ctx->drv_ctx[DECRYPT]) {
12973 +               ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12974 +                                         ctx->sh_desc_dec);
12975 +               if (ret) {
12976 +                       dev_err(jrdev, "driver dec context update failed\n");
12977 +                       goto badkey;
12978 +               }
12979 +       }
12980 +
12981 +       return ret;
12982 +badkey:
12983 +       crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
12984 +       return 0;
12985 +}
12986 +
12987 +/*
12988 + * aead_edesc - s/w-extended aead descriptor
12989 + * @src_nents: number of segments in input scatterlist
12990 + * @dst_nents: number of segments in output scatterlist
12991 + * @iv_dma: dma address of iv for checking continuity and link table
12992 + * @qm_sg_bytes: length of dma mapped h/w link table
12993 + * @qm_sg_dma: bus physical mapped address of h/w link table
12994 + * @assoclen: associated data length, in CAAM endianness
12995 + * @assoclen_dma: bus physical mapped address of req->assoclen
12996 + * @drv_req: driver-specific request structure
12997 + * @sgt: the h/w link table
12998 + */
12999 +struct aead_edesc {
13000 +       int src_nents;
13001 +       int dst_nents;
13002 +       dma_addr_t iv_dma;
13003 +       int qm_sg_bytes;
13004 +       dma_addr_t qm_sg_dma;
13005 +       unsigned int assoclen;
13006 +       dma_addr_t assoclen_dma;
13007 +       struct caam_drv_req drv_req;
13008 +#define CAAM_QI_MAX_AEAD_SG                                            \
13009 +       ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) /   \
13010 +        sizeof(struct qm_sg_entry))
13011 +       struct qm_sg_entry sgt[0];
13012 +};
13013 +
13014 +/*
13015 + * tls_edesc - s/w-extended tls descriptor
13016 + * @src_nents: number of segments in input scatterlist
13017 + * @dst_nents: number of segments in output scatterlist
13018 + * @iv_dma: dma address of iv for checking continuity and link table
13019 + * @qm_sg_bytes: length of dma mapped h/w link table
13020 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
13021 + * @qm_sg_dma: bus physical mapped address of h/w link table
13022 + * @drv_req: driver-specific request structure
13023 + * @sgt: the h/w link table
13024 + */
13025 +struct tls_edesc {
13026 +       int src_nents;
13027 +       int dst_nents;
13028 +       dma_addr_t iv_dma;
13029 +       int qm_sg_bytes;
13030 +       dma_addr_t qm_sg_dma;
13031 +       struct scatterlist tmp[2];
13032 +       struct scatterlist *dst;
13033 +       struct caam_drv_req drv_req;
13034 +       struct qm_sg_entry sgt[0];
13035 +};
13036 +
13037 +/*
13038 + * ablkcipher_edesc - s/w-extended ablkcipher descriptor
13039 + * @src_nents: number of segments in input scatterlist
13040 + * @dst_nents: number of segments in output scatterlist
13041 + * @iv_dma: dma address of iv for checking continuity and link table
13042 + * @qm_sg_bytes: length of dma mapped h/w link table
13043 + * @qm_sg_dma: bus physical mapped address of h/w link table
13044 + * @drv_req: driver-specific request structure
13045 + * @sgt: the h/w link table
13046 + */
13047 +struct ablkcipher_edesc {
13048 +       int src_nents;
13049 +       int dst_nents;
13050 +       dma_addr_t iv_dma;
13051 +       int qm_sg_bytes;
13052 +       dma_addr_t qm_sg_dma;
13053 +       struct caam_drv_req drv_req;
13054 +#define CAAM_QI_MAX_ABLKCIPHER_SG                                          \
13055 +       ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
13056 +        sizeof(struct qm_sg_entry))
13057 +       struct qm_sg_entry sgt[0];
13058 +};
13059 +
13060 +static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
13061 +                                       enum optype type)
13062 +{
13063 +       /*
13064 +        * This function is called on the fast path with values of 'type'
13065 +        * known at compile time. Invalid arguments are not expected and
13066 +        * thus no checks are made.
13067 +        */
13068 +       struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
13069 +       u32 *desc;
13070 +
13071 +       if (unlikely(!drv_ctx)) {
13072 +               spin_lock(&ctx->lock);
13073 +
13074 +               /* Read again to check if some other core init drv_ctx */
13075 +               drv_ctx = ctx->drv_ctx[type];
13076 +               if (!drv_ctx) {
13077 +                       int cpu;
13078 +
13079 +                       if (type == ENCRYPT)
13080 +                               desc = ctx->sh_desc_enc;
13081 +                       else if (type == DECRYPT)
13082 +                               desc = ctx->sh_desc_dec;
13083 +                       else /* (type == GIVENCRYPT) */
13084 +                               desc = ctx->sh_desc_givenc;
13085 +
13086 +                       cpu = smp_processor_id();
13087 +                       drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
13088 +                       if (likely(!IS_ERR_OR_NULL(drv_ctx)))
13089 +                               drv_ctx->op_type = type;
13090 +
13091 +                       ctx->drv_ctx[type] = drv_ctx;
13092 +               }
13093 +
13094 +               spin_unlock(&ctx->lock);
13095 +       }
13096 +
13097 +       return drv_ctx;
13098 +}
13099 +
13100 +static void caam_unmap(struct device *dev, struct scatterlist *src,
13101 +                      struct scatterlist *dst, int src_nents,
13102 +                      int dst_nents, dma_addr_t iv_dma, int ivsize,
13103 +                      enum optype op_type, dma_addr_t qm_sg_dma,
13104 +                      int qm_sg_bytes)
13105 +{
13106 +       if (dst != src) {
13107 +               if (src_nents)
13108 +                       dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
13109 +               dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
13110 +       } else {
13111 +               dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
13112 +       }
13113 +
13114 +       if (iv_dma)
13115 +               dma_unmap_single(dev, iv_dma, ivsize,
13116 +                                op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
13117 +                                                        DMA_TO_DEVICE);
13118 +       if (qm_sg_bytes)
13119 +               dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
13120 +}
13121 +
13122 +static void aead_unmap(struct device *dev,
13123 +                      struct aead_edesc *edesc,
13124 +                      struct aead_request *req)
13125 +{
13126 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
13127 +       int ivsize = crypto_aead_ivsize(aead);
13128 +
13129 +       caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
13130 +                  edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
13131 +                  edesc->qm_sg_dma, edesc->qm_sg_bytes);
13132 +       dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
13133 +}
13134 +
13135 +static void tls_unmap(struct device *dev,
13136 +                     struct tls_edesc *edesc,
13137 +                     struct aead_request *req)
13138 +{
13139 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
13140 +       int ivsize = crypto_aead_ivsize(aead);
13141 +
13142 +       caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
13143 +                  edesc->dst_nents, edesc->iv_dma, ivsize,
13144 +                  edesc->drv_req.drv_ctx->op_type, edesc->qm_sg_dma,
13145 +                  edesc->qm_sg_bytes);
13146 +}
13147 +
13148 +static void ablkcipher_unmap(struct device *dev,
13149 +                            struct ablkcipher_edesc *edesc,
13150 +                            struct ablkcipher_request *req)
13151 +{
13152 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13153 +       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13154 +
13155 +       caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
13156 +                  edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
13157 +                  edesc->qm_sg_dma, edesc->qm_sg_bytes);
13158 +}
13159 +
13160 +static void aead_done(struct caam_drv_req *drv_req, u32 status)
13161 +{
13162 +       struct device *qidev;
13163 +       struct aead_edesc *edesc;
13164 +       struct aead_request *aead_req = drv_req->app_ctx;
13165 +       struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
13166 +       struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
13167 +       int ecode = 0;
13168 +
13169 +       qidev = caam_ctx->qidev;
13170 +
13171 +       if (unlikely(status)) {
13172 +               caam_jr_strstatus(qidev, status);
13173 +               ecode = -EIO;
13174 +       }
13175 +
13176 +       edesc = container_of(drv_req, typeof(*edesc), drv_req);
13177 +       aead_unmap(qidev, edesc, aead_req);
13178 +
13179 +       aead_request_complete(aead_req, ecode);
13180 +       qi_cache_free(edesc);
13181 +}
13182 +
13183 +/*
13184 + * allocate and map the aead extended descriptor
13185 + */
13186 +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
13187 +                                          bool encrypt)
13188 +{
13189 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
13190 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
13191 +       struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
13192 +                                                typeof(*alg), aead);
13193 +       struct device *qidev = ctx->qidev;
13194 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
13195 +                      GFP_KERNEL : GFP_ATOMIC;
13196 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
13197 +       struct aead_edesc *edesc;
13198 +       dma_addr_t qm_sg_dma, iv_dma = 0;
13199 +       int ivsize = 0;
13200 +       unsigned int authsize = ctx->authsize;
13201 +       int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
13202 +       int in_len, out_len;
13203 +       struct qm_sg_entry *sg_table, *fd_sgt;
13204 +       struct caam_drv_ctx *drv_ctx;
13205 +       enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
13206 +
13207 +       drv_ctx = get_drv_ctx(ctx, op_type);
13208 +       if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13209 +               return (struct aead_edesc *)drv_ctx;
13210 +
13211 +       /* allocate space for base edesc and hw desc commands, link tables */
13212 +       edesc = qi_cache_alloc(GFP_DMA | flags);
13213 +       if (unlikely(!edesc)) {
13214 +               dev_err(qidev, "could not allocate extended descriptor\n");
13215 +               return ERR_PTR(-ENOMEM);
13216 +       }
13217 +
13218 +       if (likely(req->src == req->dst)) {
13219 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
13220 +                                            req->cryptlen +
13221 +                                               (encrypt ? authsize : 0));
13222 +               if (unlikely(src_nents < 0)) {
13223 +                       dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13224 +                               req->assoclen + req->cryptlen +
13225 +                               (encrypt ? authsize : 0));
13226 +                       qi_cache_free(edesc);
13227 +                       return ERR_PTR(src_nents);
13228 +               }
13229 +
13230 +               mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13231 +                                             DMA_BIDIRECTIONAL);
13232 +               if (unlikely(!mapped_src_nents)) {
13233 +                       dev_err(qidev, "unable to map source\n");
13234 +                       qi_cache_free(edesc);
13235 +                       return ERR_PTR(-ENOMEM);
13236 +               }
13237 +       } else {
13238 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
13239 +                                            req->cryptlen);
13240 +               if (unlikely(src_nents < 0)) {
13241 +                       dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13242 +                               req->assoclen + req->cryptlen);
13243 +                       qi_cache_free(edesc);
13244 +                       return ERR_PTR(src_nents);
13245 +               }
13246 +
13247 +               dst_nents = sg_nents_for_len(req->dst, req->assoclen +
13248 +                                            req->cryptlen +
13249 +                                            (encrypt ? authsize :
13250 +                                                       (-authsize)));
13251 +               if (unlikely(dst_nents < 0)) {
13252 +                       dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13253 +                               req->assoclen + req->cryptlen +
13254 +                               (encrypt ? authsize : (-authsize)));
13255 +                       qi_cache_free(edesc);
13256 +                       return ERR_PTR(dst_nents);
13257 +               }
13258 +
13259 +               if (src_nents) {
13260 +                       mapped_src_nents = dma_map_sg(qidev, req->src,
13261 +                                                     src_nents, DMA_TO_DEVICE);
13262 +                       if (unlikely(!mapped_src_nents)) {
13263 +                               dev_err(qidev, "unable to map source\n");
13264 +                               qi_cache_free(edesc);
13265 +                               return ERR_PTR(-ENOMEM);
13266 +                       }
13267 +               } else {
13268 +                       mapped_src_nents = 0;
13269 +               }
13270 +
13271 +               mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
13272 +                                             DMA_FROM_DEVICE);
13273 +               if (unlikely(!mapped_dst_nents)) {
13274 +                       dev_err(qidev, "unable to map destination\n");
13275 +                       dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13276 +                       qi_cache_free(edesc);
13277 +                       return ERR_PTR(-ENOMEM);
13278 +               }
13279 +       }
13280 +
13281 +       if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
13282 +               ivsize = crypto_aead_ivsize(aead);
13283 +               iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
13284 +               if (dma_mapping_error(qidev, iv_dma)) {
13285 +                       dev_err(qidev, "unable to map IV\n");
13286 +                       caam_unmap(qidev, req->src, req->dst, src_nents,
13287 +                                  dst_nents, 0, 0, op_type, 0, 0);
13288 +                       qi_cache_free(edesc);
13289 +                       return ERR_PTR(-ENOMEM);
13290 +               }
13291 +       }
13292 +
13293 +       /*
13294 +        * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
13295 +        * Input is not contiguous.
13296 +        */
13297 +       qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
13298 +                    (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
13299 +       if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
13300 +               dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
13301 +                       qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
13302 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13303 +                          iv_dma, ivsize, op_type, 0, 0);
13304 +               qi_cache_free(edesc);
13305 +               return ERR_PTR(-ENOMEM);
13306 +       }
13307 +       sg_table = &edesc->sgt[0];
13308 +       qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13309 +
13310 +       edesc->src_nents = src_nents;
13311 +       edesc->dst_nents = dst_nents;
13312 +       edesc->iv_dma = iv_dma;
13313 +       edesc->drv_req.app_ctx = req;
13314 +       edesc->drv_req.cbk = aead_done;
13315 +       edesc->drv_req.drv_ctx = drv_ctx;
13316 +
13317 +       edesc->assoclen = cpu_to_caam32(req->assoclen);
13318 +       edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
13319 +                                            DMA_TO_DEVICE);
13320 +       if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
13321 +               dev_err(qidev, "unable to map assoclen\n");
13322 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13323 +                          iv_dma, ivsize, op_type, 0, 0);
13324 +               qi_cache_free(edesc);
13325 +               return ERR_PTR(-ENOMEM);
13326 +       }
13327 +
13328 +       dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
13329 +       qm_sg_index++;
13330 +       if (ivsize) {
13331 +               dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
13332 +               qm_sg_index++;
13333 +       }
13334 +       sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
13335 +       qm_sg_index += mapped_src_nents;
13336 +
13337 +       if (mapped_dst_nents > 1)
13338 +               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
13339 +                                qm_sg_index, 0);
13340 +
13341 +       qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
13342 +       if (dma_mapping_error(qidev, qm_sg_dma)) {
13343 +               dev_err(qidev, "unable to map S/G table\n");
13344 +               dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
13345 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13346 +                          iv_dma, ivsize, op_type, 0, 0);
13347 +               qi_cache_free(edesc);
13348 +               return ERR_PTR(-ENOMEM);
13349 +       }
13350 +
13351 +       edesc->qm_sg_dma = qm_sg_dma;
13352 +       edesc->qm_sg_bytes = qm_sg_bytes;
13353 +
13354 +       out_len = req->assoclen + req->cryptlen +
13355 +                 (encrypt ? ctx->authsize : (-ctx->authsize));
13356 +       in_len = 4 + ivsize + req->assoclen + req->cryptlen;
13357 +
13358 +       fd_sgt = &edesc->drv_req.fd_sgt[0];
13359 +       dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
13360 +
13361 +       if (req->dst == req->src) {
13362 +               if (mapped_src_nents == 1)
13363 +                       dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
13364 +                                        out_len, 0);
13365 +               else
13366 +                       dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
13367 +                                            (1 + !!ivsize) * sizeof(*sg_table),
13368 +                                            out_len, 0);
13369 +       } else if (mapped_dst_nents == 1) {
13370 +               dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
13371 +                                0);
13372 +       } else {
13373 +               dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
13374 +                                    qm_sg_index, out_len, 0);
13375 +       }
13376 +
13377 +       return edesc;
13378 +}
13379 +
13380 +static inline int aead_crypt(struct aead_request *req, bool encrypt)
13381 +{
13382 +       struct aead_edesc *edesc;
13383 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
13384 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
13385 +       int ret;
13386 +
13387 +       if (unlikely(caam_congested))
13388 +               return -EAGAIN;
13389 +
13390 +       /* allocate extended descriptor */
13391 +       edesc = aead_edesc_alloc(req, encrypt);
13392 +       if (IS_ERR_OR_NULL(edesc))
13393 +               return PTR_ERR(edesc);
13394 +
13395 +       /* Create and submit job descriptor */
13396 +       ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13397 +       if (!ret) {
13398 +               ret = -EINPROGRESS;
13399 +       } else {
13400 +               aead_unmap(ctx->qidev, edesc, req);
13401 +               qi_cache_free(edesc);
13402 +       }
13403 +
13404 +       return ret;
13405 +}
13406 +
13407 +static int aead_encrypt(struct aead_request *req)
13408 +{
13409 +       return aead_crypt(req, true);
13410 +}
13411 +
13412 +static int aead_decrypt(struct aead_request *req)
13413 +{
13414 +       return aead_crypt(req, false);
13415 +}
13416 +
13417 +static void tls_done(struct caam_drv_req *drv_req, u32 status)
13418 +{
13419 +       struct device *qidev;
13420 +       struct tls_edesc *edesc;
13421 +       struct aead_request *aead_req = drv_req->app_ctx;
13422 +       struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
13423 +       struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
13424 +       int ecode = 0;
13425 +
13426 +       qidev = caam_ctx->qidev;
13427 +
13428 +       if (unlikely(status)) {
13429 +               caam_jr_strstatus(qidev, status);
13430 +               ecode = -EIO;
13431 +       }
13432 +
13433 +       edesc = container_of(drv_req, typeof(*edesc), drv_req);
13434 +       tls_unmap(qidev, edesc, aead_req);
13435 +
13436 +       aead_request_complete(aead_req, ecode);
13437 +       qi_cache_free(edesc);
13438 +}
13439 +
13440 +/*
13441 + * allocate and map the tls extended descriptor
13442 + */
13443 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
13444 +{
13445 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
13446 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
13447 +       unsigned int blocksize = crypto_aead_blocksize(aead);
13448 +       unsigned int padsize, authsize;
13449 +       struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
13450 +                                                typeof(*alg), aead);
13451 +       struct device *qidev = ctx->qidev;
13452 +       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
13453 +                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
13454 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
13455 +       struct tls_edesc *edesc;
13456 +       dma_addr_t qm_sg_dma, iv_dma = 0;
13457 +       int ivsize = 0;
13458 +       int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
13459 +       int in_len, out_len;
13460 +       struct qm_sg_entry *sg_table, *fd_sgt;
13461 +       struct caam_drv_ctx *drv_ctx;
13462 +       enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
13463 +       struct scatterlist *dst;
13464 +
13465 +       if (encrypt) {
13466 +               padsize = blocksize - ((req->cryptlen + ctx->authsize) %
13467 +                                       blocksize);
13468 +               authsize = ctx->authsize + padsize;
13469 +       } else {
13470 +               authsize = ctx->authsize;
13471 +       }
13472 +
13473 +       drv_ctx = get_drv_ctx(ctx, op_type);
13474 +       if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13475 +               return (struct tls_edesc *)drv_ctx;
13476 +
13477 +       /* allocate space for base edesc and hw desc commands, link tables */
13478 +       edesc = qi_cache_alloc(GFP_DMA | flags);
13479 +       if (unlikely(!edesc)) {
13480 +               dev_err(qidev, "could not allocate extended descriptor\n");
13481 +               return ERR_PTR(-ENOMEM);
13482 +       }
13483 +
13484 +       if (likely(req->src == req->dst)) {
13485 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
13486 +                                            req->cryptlen +
13487 +                                            (encrypt ? authsize : 0));
13488 +               if (unlikely(src_nents < 0)) {
13489 +                       dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13490 +                               req->assoclen + req->cryptlen +
13491 +                               (encrypt ? authsize : 0));
13492 +                       qi_cache_free(edesc);
13493 +                       return ERR_PTR(src_nents);
13494 +               }
13495 +
13496 +               mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13497 +                                             DMA_BIDIRECTIONAL);
13498 +               if (unlikely(!mapped_src_nents)) {
13499 +                       dev_err(qidev, "unable to map source\n");
13500 +                       qi_cache_free(edesc);
13501 +                       return ERR_PTR(-ENOMEM);
13502 +               }
13503 +               dst = req->dst;
13504 +       } else {
13505 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
13506 +                                            req->cryptlen);
13507 +               if (unlikely(src_nents < 0)) {
13508 +                       dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13509 +                               req->assoclen + req->cryptlen);
13510 +                       qi_cache_free(edesc);
13511 +                       return ERR_PTR(src_nents);
13512 +               }
13513 +
13514 +               dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
13515 +               dst_nents = sg_nents_for_len(dst, req->cryptlen +
13516 +                                            (encrypt ? authsize : 0));
13517 +               if (unlikely(dst_nents < 0)) {
13518 +                       dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13519 +                               req->cryptlen +
13520 +                               (encrypt ? authsize : 0));
13521 +                       qi_cache_free(edesc);
13522 +                       return ERR_PTR(dst_nents);
13523 +               }
13524 +
13525 +               if (src_nents) {
13526 +                       mapped_src_nents = dma_map_sg(qidev, req->src,
13527 +                                                     src_nents, DMA_TO_DEVICE);
13528 +                       if (unlikely(!mapped_src_nents)) {
13529 +                               dev_err(qidev, "unable to map source\n");
13530 +                               qi_cache_free(edesc);
13531 +                               return ERR_PTR(-ENOMEM);
13532 +                       }
13533 +               } else {
13534 +                       mapped_src_nents = 0;
13535 +               }
13536 +
13537 +               mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
13538 +                                             DMA_FROM_DEVICE);
13539 +               if (unlikely(!mapped_dst_nents)) {
13540 +                       dev_err(qidev, "unable to map destination\n");
13541 +                       dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13542 +                       qi_cache_free(edesc);
13543 +                       return ERR_PTR(-ENOMEM);
13544 +               }
13545 +       }
13546 +
13547 +       ivsize = crypto_aead_ivsize(aead);
13548 +       iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
13549 +       if (dma_mapping_error(qidev, iv_dma)) {
13550 +               dev_err(qidev, "unable to map IV\n");
13551 +               caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0,
13552 +                          op_type, 0, 0);
13553 +               qi_cache_free(edesc);
13554 +               return ERR_PTR(-ENOMEM);
13555 +       }
13556 +
13557 +       /*
13558 +        * Create S/G table: IV, src, dst.
13559 +        * Input is not contiguous.
13560 +        */
13561 +       qm_sg_ents = 1 + mapped_src_nents +
13562 +                    (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
13563 +       sg_table = &edesc->sgt[0];
13564 +       qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13565 +
13566 +       edesc->src_nents = src_nents;
13567 +       edesc->dst_nents = dst_nents;
13568 +       edesc->dst = dst;
13569 +       edesc->iv_dma = iv_dma;
13570 +       edesc->drv_req.app_ctx = req;
13571 +       edesc->drv_req.cbk = tls_done;
13572 +       edesc->drv_req.drv_ctx = drv_ctx;
13573 +
13574 +       dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
13575 +       qm_sg_index = 1;
13576 +
13577 +       sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
13578 +       qm_sg_index += mapped_src_nents;
13579 +
13580 +       if (mapped_dst_nents > 1)
13581 +               sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
13582 +                                qm_sg_index, 0);
13583 +
13584 +       qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
13585 +       if (dma_mapping_error(qidev, qm_sg_dma)) {
13586 +               dev_err(qidev, "unable to map S/G table\n");
13587 +               caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
13588 +                          ivsize, op_type, 0, 0);
13589 +               qi_cache_free(edesc);
13590 +               return ERR_PTR(-ENOMEM);
13591 +       }
13592 +
13593 +       edesc->qm_sg_dma = qm_sg_dma;
13594 +       edesc->qm_sg_bytes = qm_sg_bytes;
13595 +
13596 +       out_len = req->cryptlen + (encrypt ? authsize : 0);
13597 +       in_len = ivsize + req->assoclen + req->cryptlen;
13598 +
13599 +       fd_sgt = &edesc->drv_req.fd_sgt[0];
13600 +
13601 +       dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
13602 +
13603 +       if (req->dst == req->src)
13604 +               dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
13605 +                                   (sg_nents_for_len(req->src, req->assoclen) +
13606 +                                    1) * sizeof(*sg_table), out_len, 0);
13607 +       else if (mapped_dst_nents == 1)
13608 +               dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
13609 +       else
13610 +               dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
13611 +                                    qm_sg_index, out_len, 0);
13612 +
13613 +       return edesc;
13614 +}
13615 +
13616 +static int tls_crypt(struct aead_request *req, bool encrypt)
13617 +{
13618 +       struct tls_edesc *edesc;
13619 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
13620 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
13621 +       int ret;
13622 +
13623 +       if (unlikely(caam_congested))
13624 +               return -EAGAIN;
13625 +
13626 +       edesc = tls_edesc_alloc(req, encrypt);
13627 +       if (IS_ERR_OR_NULL(edesc))
13628 +               return PTR_ERR(edesc);
13629 +
13630 +       ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13631 +       if (!ret) {
13632 +               ret = -EINPROGRESS;
13633 +       } else {
13634 +               tls_unmap(ctx->qidev, edesc, req);
13635 +               qi_cache_free(edesc);
13636 +       }
13637 +
13638 +       return ret;
13639 +}
13640 +
13641 +static int tls_encrypt(struct aead_request *req)
13642 +{
13643 +       return tls_crypt(req, true);
13644 +}
13645 +
13646 +static int tls_decrypt(struct aead_request *req)
13647 +{
13648 +       return tls_crypt(req, false);
13649 +}
13650 +
13651 +static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
13652 +{
13653 +       struct ablkcipher_edesc *edesc;
13654 +       struct ablkcipher_request *req = drv_req->app_ctx;
13655 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13656 +       struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
13657 +       struct device *qidev = caam_ctx->qidev;
13658 +       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13659 +
13660 +#ifdef DEBUG
13661 +       dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
13662 +#endif
13663 +
13664 +       edesc = container_of(drv_req, typeof(*edesc), drv_req);
13665 +
13666 +       if (status)
13667 +               caam_jr_strstatus(qidev, status);
13668 +
13669 +#ifdef DEBUG
13670 +       print_hex_dump(KERN_ERR, "dstiv  @" __stringify(__LINE__)": ",
13671 +                      DUMP_PREFIX_ADDRESS, 16, 4, req->info,
13672 +                      edesc->src_nents > 1 ? 100 : ivsize, 1);
13673 +       caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
13674 +                    DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
13675 +                    edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
13676 +#endif
13677 +
13678 +       ablkcipher_unmap(qidev, edesc, req);
13679 +       qi_cache_free(edesc);
13680 +
13681 +       /*
13682 +        * The crypto API expects us to set the IV (req->info) to the last
13683 +        * ciphertext block. This is used e.g. by the CTS mode.
13684 +        */
13685 +       scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
13686 +                                ivsize, 0);
13687 +
13688 +       ablkcipher_request_complete(req, status);
13689 +}
13690 +
13691 +static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
13692 +                                                      *req, bool encrypt)
13693 +{
13694 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13695 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13696 +       struct device *qidev = ctx->qidev;
13697 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
13698 +                      GFP_KERNEL : GFP_ATOMIC;
13699 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
13700 +       struct ablkcipher_edesc *edesc;
13701 +       dma_addr_t iv_dma;
13702 +       bool in_contig;
13703 +       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13704 +       int dst_sg_idx, qm_sg_ents;
13705 +       struct qm_sg_entry *sg_table, *fd_sgt;
13706 +       struct caam_drv_ctx *drv_ctx;
13707 +       enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
13708 +
13709 +       drv_ctx = get_drv_ctx(ctx, op_type);
13710 +       if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13711 +               return (struct ablkcipher_edesc *)drv_ctx;
13712 +
13713 +       src_nents = sg_nents_for_len(req->src, req->nbytes);
13714 +       if (unlikely(src_nents < 0)) {
13715 +               dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13716 +                       req->nbytes);
13717 +               return ERR_PTR(src_nents);
13718 +       }
13719 +
13720 +       if (unlikely(req->src != req->dst)) {
13721 +               dst_nents = sg_nents_for_len(req->dst, req->nbytes);
13722 +               if (unlikely(dst_nents < 0)) {
13723 +                       dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13724 +                               req->nbytes);
13725 +                       return ERR_PTR(dst_nents);
13726 +               }
13727 +
13728 +               mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13729 +                                             DMA_TO_DEVICE);
13730 +               if (unlikely(!mapped_src_nents)) {
13731 +                       dev_err(qidev, "unable to map source\n");
13732 +                       return ERR_PTR(-ENOMEM);
13733 +               }
13734 +
13735 +               mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
13736 +                                             DMA_FROM_DEVICE);
13737 +               if (unlikely(!mapped_dst_nents)) {
13738 +                       dev_err(qidev, "unable to map destination\n");
13739 +                       dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13740 +                       return ERR_PTR(-ENOMEM);
13741 +               }
13742 +       } else {
13743 +               mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13744 +                                             DMA_BIDIRECTIONAL);
13745 +               if (unlikely(!mapped_src_nents)) {
13746 +                       dev_err(qidev, "unable to map source\n");
13747 +                       return ERR_PTR(-ENOMEM);
13748 +               }
13749 +       }
13750 +
13751 +       iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
13752 +       if (dma_mapping_error(qidev, iv_dma)) {
13753 +               dev_err(qidev, "unable to map IV\n");
13754 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
13755 +                          0, 0, 0, 0);
13756 +               return ERR_PTR(-ENOMEM);
13757 +       }
13758 +
13759 +       if (mapped_src_nents == 1 &&
13760 +           iv_dma + ivsize == sg_dma_address(req->src)) {
13761 +               in_contig = true;
13762 +               qm_sg_ents = 0;
13763 +       } else {
13764 +               in_contig = false;
13765 +               qm_sg_ents = 1 + mapped_src_nents;
13766 +       }
13767 +       dst_sg_idx = qm_sg_ents;
13768 +
13769 +       qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
13770 +       if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
13771 +               dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
13772 +                       qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
13773 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13774 +                          iv_dma, ivsize, op_type, 0, 0);
13775 +               return ERR_PTR(-ENOMEM);
13776 +       }
13777 +
13778 +       /* allocate space for base edesc and link tables */
13779 +       edesc = qi_cache_alloc(GFP_DMA | flags);
13780 +       if (unlikely(!edesc)) {
13781 +               dev_err(qidev, "could not allocate extended descriptor\n");
13782 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13783 +                          iv_dma, ivsize, op_type, 0, 0);
13784 +               return ERR_PTR(-ENOMEM);
13785 +       }
13786 +
13787 +       edesc->src_nents = src_nents;
13788 +       edesc->dst_nents = dst_nents;
13789 +       edesc->iv_dma = iv_dma;
13790 +       sg_table = &edesc->sgt[0];
13791 +       edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13792 +       edesc->drv_req.app_ctx = req;
13793 +       edesc->drv_req.cbk = ablkcipher_done;
13794 +       edesc->drv_req.drv_ctx = drv_ctx;
13795 +
13796 +       if (!in_contig) {
13797 +               dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
13798 +               sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
13799 +       }
13800 +
13801 +       if (mapped_dst_nents > 1)
13802 +               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
13803 +                                dst_sg_idx, 0);
13804 +
13805 +       edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
13806 +                                         DMA_TO_DEVICE);
13807 +       if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
13808 +               dev_err(qidev, "unable to map S/G table\n");
13809 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13810 +                          iv_dma, ivsize, op_type, 0, 0);
13811 +               qi_cache_free(edesc);
13812 +               return ERR_PTR(-ENOMEM);
13813 +       }
13814 +
13815 +       fd_sgt = &edesc->drv_req.fd_sgt[0];
13816 +
13817 +       if (!in_contig)
13818 +               dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
13819 +                                         ivsize + req->nbytes, 0);
13820 +       else
13821 +               dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
13822 +                                     0);
13823 +
13824 +       if (req->src == req->dst) {
13825 +               if (!in_contig)
13826 +                       dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
13827 +                                            sizeof(*sg_table), req->nbytes, 0);
13828 +               else
13829 +                       dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
13830 +                                        req->nbytes, 0);
13831 +       } else if (mapped_dst_nents > 1) {
13832 +               dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
13833 +                                    sizeof(*sg_table), req->nbytes, 0);
13834 +       } else {
13835 +               dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
13836 +                                req->nbytes, 0);
13837 +       }
13838 +
13839 +       return edesc;
13840 +}
13841 +
13842 +static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
13843 +       struct skcipher_givcrypt_request *creq)
13844 +{
13845 +       struct ablkcipher_request *req = &creq->creq;
13846 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13847 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13848 +       struct device *qidev = ctx->qidev;
13849 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
13850 +                      GFP_KERNEL : GFP_ATOMIC;
13851 +       int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
13852 +       struct ablkcipher_edesc *edesc;
13853 +       dma_addr_t iv_dma;
13854 +       bool out_contig;
13855 +       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13856 +       struct qm_sg_entry *sg_table, *fd_sgt;
13857 +       int dst_sg_idx, qm_sg_ents;
13858 +       struct caam_drv_ctx *drv_ctx;
13859 +
13860 +       drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
13861 +       if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13862 +               return (struct ablkcipher_edesc *)drv_ctx;
13863 +
13864 +       src_nents = sg_nents_for_len(req->src, req->nbytes);
13865 +       if (unlikely(src_nents < 0)) {
13866 +               dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13867 +                       req->nbytes);
13868 +               return ERR_PTR(src_nents);
13869 +       }
13870 +
13871 +       if (unlikely(req->src != req->dst)) {
13872 +               dst_nents = sg_nents_for_len(req->dst, req->nbytes);
13873 +               if (unlikely(dst_nents < 0)) {
13874 +                       dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13875 +                               req->nbytes);
13876 +                       return ERR_PTR(dst_nents);
13877 +               }
13878 +
13879 +               mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13880 +                                             DMA_TO_DEVICE);
13881 +               if (unlikely(!mapped_src_nents)) {
13882 +                       dev_err(qidev, "unable to map source\n");
13883 +                       return ERR_PTR(-ENOMEM);
13884 +               }
13885 +
13886 +               mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
13887 +                                             DMA_FROM_DEVICE);
13888 +               if (unlikely(!mapped_dst_nents)) {
13889 +                       dev_err(qidev, "unable to map destination\n");
13890 +                       dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13891 +                       return ERR_PTR(-ENOMEM);
13892 +               }
13893 +       } else {
13894 +               mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13895 +                                             DMA_BIDIRECTIONAL);
13896 +               if (unlikely(!mapped_src_nents)) {
13897 +                       dev_err(qidev, "unable to map source\n");
13898 +                       return ERR_PTR(-ENOMEM);
13899 +               }
13900 +
13901 +               dst_nents = src_nents;
13902 +               mapped_dst_nents = src_nents;
13903 +       }
13904 +
13905 +       iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
13906 +       if (dma_mapping_error(qidev, iv_dma)) {
13907 +               dev_err(qidev, "unable to map IV\n");
13908 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
13909 +                          0, 0, 0, 0);
13910 +               return ERR_PTR(-ENOMEM);
13911 +       }
13912 +
13913 +       qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
13914 +       dst_sg_idx = qm_sg_ents;
13915 +       if (mapped_dst_nents == 1 &&
13916 +           iv_dma + ivsize == sg_dma_address(req->dst)) {
13917 +               out_contig = true;
13918 +       } else {
13919 +               out_contig = false;
13920 +               qm_sg_ents += 1 + mapped_dst_nents;
13921 +       }
13922 +
13923 +       if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
13924 +               dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
13925 +                       qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
13926 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13927 +                          iv_dma, ivsize, GIVENCRYPT, 0, 0);
13928 +               return ERR_PTR(-ENOMEM);
13929 +       }
13930 +
13931 +       /* allocate space for base edesc and link tables */
13932 +       edesc = qi_cache_alloc(GFP_DMA | flags);
13933 +       if (!edesc) {
13934 +               dev_err(qidev, "could not allocate extended descriptor\n");
13935 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13936 +                          iv_dma, ivsize, GIVENCRYPT, 0, 0);
13937 +               return ERR_PTR(-ENOMEM);
13938 +       }
13939 +
13940 +       edesc->src_nents = src_nents;
13941 +       edesc->dst_nents = dst_nents;
13942 +       edesc->iv_dma = iv_dma;
13943 +       sg_table = &edesc->sgt[0];
13944 +       edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13945 +       edesc->drv_req.app_ctx = req;
13946 +       edesc->drv_req.cbk = ablkcipher_done;
13947 +       edesc->drv_req.drv_ctx = drv_ctx;
13948 +
13949 +       if (mapped_src_nents > 1)
13950 +               sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
13951 +
13952 +       if (!out_contig) {
13953 +               dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
13954 +               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
13955 +                                dst_sg_idx + 1, 0);
13956 +       }
13957 +
13958 +       edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
13959 +                                         DMA_TO_DEVICE);
13960 +       if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
13961 +               dev_err(qidev, "unable to map S/G table\n");
13962 +               caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13963 +                          iv_dma, ivsize, GIVENCRYPT, 0, 0);
13964 +               qi_cache_free(edesc);
13965 +               return ERR_PTR(-ENOMEM);
13966 +       }
13967 +
13968 +       fd_sgt = &edesc->drv_req.fd_sgt[0];
13969 +
13970 +       if (mapped_src_nents > 1)
13971 +               dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
13972 +                                    0);
13973 +       else
13974 +               dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
13975 +                                req->nbytes, 0);
13976 +
13977 +       if (!out_contig)
13978 +               dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
13979 +                                    sizeof(*sg_table), ivsize + req->nbytes,
13980 +                                    0);
13981 +       else
13982 +               dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
13983 +                                ivsize + req->nbytes, 0);
13984 +
13985 +       return edesc;
13986 +}
13987 +
13988 +static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
13989 +{
13990 +       struct ablkcipher_edesc *edesc;
13991 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13992 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13993 +       int ret;
13994 +
13995 +       if (unlikely(caam_congested))
13996 +               return -EAGAIN;
13997 +
13998 +       /* allocate extended descriptor */
13999 +       edesc = ablkcipher_edesc_alloc(req, encrypt);
14000 +       if (IS_ERR(edesc))
14001 +               return PTR_ERR(edesc);
14002 +
14003 +       ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
14004 +       if (!ret) {
14005 +               ret = -EINPROGRESS;
14006 +       } else {
14007 +               ablkcipher_unmap(ctx->qidev, edesc, req);
14008 +               qi_cache_free(edesc);
14009 +       }
14010 +
14011 +       return ret;
14012 +}
14013 +
14014 +static int ablkcipher_encrypt(struct ablkcipher_request *req)
14015 +{
14016 +       return ablkcipher_crypt(req, true);
14017 +}
14018 +
14019 +static int ablkcipher_decrypt(struct ablkcipher_request *req)
14020 +{
14021 +       return ablkcipher_crypt(req, false);
14022 +}
14023 +
14024 +static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
14025 +{
14026 +       struct ablkcipher_request *req = &creq->creq;
14027 +       struct ablkcipher_edesc *edesc;
14028 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
14029 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
14030 +       int ret;
14031 +
14032 +       if (unlikely(caam_congested))
14033 +               return -EAGAIN;
14034 +
14035 +       /* allocate extended descriptor */
14036 +       edesc = ablkcipher_giv_edesc_alloc(creq);
14037 +       if (IS_ERR(edesc))
14038 +               return PTR_ERR(edesc);
14039 +
14040 +       ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
14041 +       if (!ret) {
14042 +               ret = -EINPROGRESS;
14043 +       } else {
14044 +               ablkcipher_unmap(ctx->qidev, edesc, req);
14045 +               qi_cache_free(edesc);
14046 +       }
14047 +
14048 +       return ret;
14049 +}
14050 +
14051 +#define template_ablkcipher    template_u.ablkcipher
14052 +struct caam_alg_template {
14053 +       char name[CRYPTO_MAX_ALG_NAME];
14054 +       char driver_name[CRYPTO_MAX_ALG_NAME];
14055 +       unsigned int blocksize;
14056 +       u32 type;
14057 +       union {
14058 +               struct ablkcipher_alg ablkcipher;
14059 +       } template_u;
14060 +       u32 class1_alg_type;
14061 +       u32 class2_alg_type;
14062 +};
14063 +
14064 +static struct caam_alg_template driver_algs[] = {
14065 +       /* ablkcipher descriptor */
14066 +       {
14067 +               .name = "cbc(aes)",
14068 +               .driver_name = "cbc-aes-caam-qi",
14069 +               .blocksize = AES_BLOCK_SIZE,
14070 +               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
14071 +               .template_ablkcipher = {
14072 +                       .setkey = ablkcipher_setkey,
14073 +                       .encrypt = ablkcipher_encrypt,
14074 +                       .decrypt = ablkcipher_decrypt,
14075 +                       .givencrypt = ablkcipher_givencrypt,
14076 +                       .geniv = "<built-in>",
14077 +                       .min_keysize = AES_MIN_KEY_SIZE,
14078 +                       .max_keysize = AES_MAX_KEY_SIZE,
14079 +                       .ivsize = AES_BLOCK_SIZE,
14080 +               },
14081 +               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14082 +       },
14083 +       {
14084 +               .name = "cbc(des3_ede)",
14085 +               .driver_name = "cbc-3des-caam-qi",
14086 +               .blocksize = DES3_EDE_BLOCK_SIZE,
14087 +               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
14088 +               .template_ablkcipher = {
14089 +                       .setkey = ablkcipher_setkey,
14090 +                       .encrypt = ablkcipher_encrypt,
14091 +                       .decrypt = ablkcipher_decrypt,
14092 +                       .givencrypt = ablkcipher_givencrypt,
14093 +                       .geniv = "<built-in>",
14094 +                       .min_keysize = DES3_EDE_KEY_SIZE,
14095 +                       .max_keysize = DES3_EDE_KEY_SIZE,
14096 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14097 +               },
14098 +               .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14099 +       },
14100 +       {
14101 +               .name = "cbc(des)",
14102 +               .driver_name = "cbc-des-caam-qi",
14103 +               .blocksize = DES_BLOCK_SIZE,
14104 +               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
14105 +               .template_ablkcipher = {
14106 +                       .setkey = ablkcipher_setkey,
14107 +                       .encrypt = ablkcipher_encrypt,
14108 +                       .decrypt = ablkcipher_decrypt,
14109 +                       .givencrypt = ablkcipher_givencrypt,
14110 +                       .geniv = "<built-in>",
14111 +                       .min_keysize = DES_KEY_SIZE,
14112 +                       .max_keysize = DES_KEY_SIZE,
14113 +                       .ivsize = DES_BLOCK_SIZE,
14114 +               },
14115 +               .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14116 +       },
14117 +       {
14118 +               .name = "ctr(aes)",
14119 +               .driver_name = "ctr-aes-caam-qi",
14120 +               .blocksize = 1,
14121 +               .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
14122 +               .template_ablkcipher = {
14123 +                       .setkey = ablkcipher_setkey,
14124 +                       .encrypt = ablkcipher_encrypt,
14125 +                       .decrypt = ablkcipher_decrypt,
14126 +                       .geniv = "chainiv",
14127 +                       .min_keysize = AES_MIN_KEY_SIZE,
14128 +                       .max_keysize = AES_MAX_KEY_SIZE,
14129 +                       .ivsize = AES_BLOCK_SIZE,
14130 +               },
14131 +               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
14132 +       },
14133 +       {
14134 +               .name = "rfc3686(ctr(aes))",
14135 +               .driver_name = "rfc3686-ctr-aes-caam-qi",
14136 +               .blocksize = 1,
14137 +               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
14138 +               .template_ablkcipher = {
14139 +                       .setkey = ablkcipher_setkey,
14140 +                       .encrypt = ablkcipher_encrypt,
14141 +                       .decrypt = ablkcipher_decrypt,
14142 +                       .givencrypt = ablkcipher_givencrypt,
14143 +                       .geniv = "<built-in>",
14144 +                       .min_keysize = AES_MIN_KEY_SIZE +
14145 +                                      CTR_RFC3686_NONCE_SIZE,
14146 +                       .max_keysize = AES_MAX_KEY_SIZE +
14147 +                                      CTR_RFC3686_NONCE_SIZE,
14148 +                       .ivsize = CTR_RFC3686_IV_SIZE,
14149 +               },
14150 +               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
14151 +       },
14152 +       {
14153 +               .name = "xts(aes)",
14154 +               .driver_name = "xts-aes-caam-qi",
14155 +               .blocksize = AES_BLOCK_SIZE,
14156 +               .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
14157 +               .template_ablkcipher = {
14158 +                       .setkey = xts_ablkcipher_setkey,
14159 +                       .encrypt = ablkcipher_encrypt,
14160 +                       .decrypt = ablkcipher_decrypt,
14161 +                       .geniv = "eseqiv",
14162 +                       .min_keysize = 2 * AES_MIN_KEY_SIZE,
14163 +                       .max_keysize = 2 * AES_MAX_KEY_SIZE,
14164 +                       .ivsize = AES_BLOCK_SIZE,
14165 +               },
14166 +               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
14167 +       },
14168 +};
14169 +
14170 +static struct caam_aead_alg driver_aeads[] = {
14171 +       /* single-pass ipsec_esp descriptor */
14172 +       {
14173 +               .aead = {
14174 +                       .base = {
14175 +                               .cra_name = "authenc(hmac(md5),cbc(aes))",
14176 +                               .cra_driver_name = "authenc-hmac-md5-"
14177 +                                                  "cbc-aes-caam-qi",
14178 +                               .cra_blocksize = AES_BLOCK_SIZE,
14179 +                       },
14180 +                       .setkey = aead_setkey,
14181 +                       .setauthsize = aead_setauthsize,
14182 +                       .encrypt = aead_encrypt,
14183 +                       .decrypt = aead_decrypt,
14184 +                       .ivsize = AES_BLOCK_SIZE,
14185 +                       .maxauthsize = MD5_DIGEST_SIZE,
14186 +               },
14187 +               .caam = {
14188 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14189 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14190 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14191 +               }
14192 +       },
14193 +       {
14194 +               .aead = {
14195 +                       .base = {
14196 +                               .cra_name = "echainiv(authenc(hmac(md5),"
14197 +                                           "cbc(aes)))",
14198 +                               .cra_driver_name = "echainiv-authenc-hmac-md5-"
14199 +                                                  "cbc-aes-caam-qi",
14200 +                               .cra_blocksize = AES_BLOCK_SIZE,
14201 +                       },
14202 +                       .setkey = aead_setkey,
14203 +                       .setauthsize = aead_setauthsize,
14204 +                       .encrypt = aead_encrypt,
14205 +                       .decrypt = aead_decrypt,
14206 +                       .ivsize = AES_BLOCK_SIZE,
14207 +                       .maxauthsize = MD5_DIGEST_SIZE,
14208 +               },
14209 +               .caam = {
14210 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14211 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14212 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14213 +                       .geniv = true,
14214 +               }
14215 +       },
14216 +       {
14217 +               .aead = {
14218 +                       .base = {
14219 +                               .cra_name = "authenc(hmac(sha1),cbc(aes))",
14220 +                               .cra_driver_name = "authenc-hmac-sha1-"
14221 +                                                  "cbc-aes-caam-qi",
14222 +                               .cra_blocksize = AES_BLOCK_SIZE,
14223 +                       },
14224 +                       .setkey = aead_setkey,
14225 +                       .setauthsize = aead_setauthsize,
14226 +                       .encrypt = aead_encrypt,
14227 +                       .decrypt = aead_decrypt,
14228 +                       .ivsize = AES_BLOCK_SIZE,
14229 +                       .maxauthsize = SHA1_DIGEST_SIZE,
14230 +               },
14231 +               .caam = {
14232 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14233 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14234 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14235 +               }
14236 +       },
14237 +       {
14238 +               .aead = {
14239 +                       .base = {
14240 +                               .cra_name = "echainiv(authenc(hmac(sha1),"
14241 +                                           "cbc(aes)))",
14242 +                               .cra_driver_name = "echainiv-authenc-"
14243 +                                                  "hmac-sha1-cbc-aes-caam-qi",
14244 +                               .cra_blocksize = AES_BLOCK_SIZE,
14245 +                       },
14246 +                       .setkey = aead_setkey,
14247 +                       .setauthsize = aead_setauthsize,
14248 +                       .encrypt = aead_encrypt,
14249 +                       .decrypt = aead_decrypt,
14250 +                       .ivsize = AES_BLOCK_SIZE,
14251 +                       .maxauthsize = SHA1_DIGEST_SIZE,
14252 +               },
14253 +               .caam = {
14254 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14255 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14256 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14257 +                       .geniv = true,
14258 +               },
14259 +       },
14260 +       {
14261 +               .aead = {
14262 +                       .base = {
14263 +                               .cra_name = "authenc(hmac(sha224),cbc(aes))",
14264 +                               .cra_driver_name = "authenc-hmac-sha224-"
14265 +                                                  "cbc-aes-caam-qi",
14266 +                               .cra_blocksize = AES_BLOCK_SIZE,
14267 +                       },
14268 +                       .setkey = aead_setkey,
14269 +                       .setauthsize = aead_setauthsize,
14270 +                       .encrypt = aead_encrypt,
14271 +                       .decrypt = aead_decrypt,
14272 +                       .ivsize = AES_BLOCK_SIZE,
14273 +                       .maxauthsize = SHA224_DIGEST_SIZE,
14274 +               },
14275 +               .caam = {
14276 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14277 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14278 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14279 +               }
14280 +       },
14281 +       {
14282 +               .aead = {
14283 +                       .base = {
14284 +                               .cra_name = "echainiv(authenc(hmac(sha224),"
14285 +                                           "cbc(aes)))",
14286 +                               .cra_driver_name = "echainiv-authenc-"
14287 +                                                  "hmac-sha224-cbc-aes-caam-qi",
14288 +                               .cra_blocksize = AES_BLOCK_SIZE,
14289 +                       },
14290 +                       .setkey = aead_setkey,
14291 +                       .setauthsize = aead_setauthsize,
14292 +                       .encrypt = aead_encrypt,
14293 +                       .decrypt = aead_decrypt,
14294 +                       .ivsize = AES_BLOCK_SIZE,
14295 +                       .maxauthsize = SHA224_DIGEST_SIZE,
14296 +               },
14297 +               .caam = {
14298 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14299 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14300 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14301 +                       .geniv = true,
14302 +               }
14303 +       },
14304 +       {
14305 +               .aead = {
14306 +                       .base = {
14307 +                               .cra_name = "authenc(hmac(sha256),cbc(aes))",
14308 +                               .cra_driver_name = "authenc-hmac-sha256-"
14309 +                                                  "cbc-aes-caam-qi",
14310 +                               .cra_blocksize = AES_BLOCK_SIZE,
14311 +                       },
14312 +                       .setkey = aead_setkey,
14313 +                       .setauthsize = aead_setauthsize,
14314 +                       .encrypt = aead_encrypt,
14315 +                       .decrypt = aead_decrypt,
14316 +                       .ivsize = AES_BLOCK_SIZE,
14317 +                       .maxauthsize = SHA256_DIGEST_SIZE,
14318 +               },
14319 +               .caam = {
14320 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14321 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14322 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14323 +               }
14324 +       },
14325 +       {
14326 +               .aead = {
14327 +                       .base = {
14328 +                               .cra_name = "echainiv(authenc(hmac(sha256),"
14329 +                                           "cbc(aes)))",
14330 +                               .cra_driver_name = "echainiv-authenc-"
14331 +                                                  "hmac-sha256-cbc-aes-"
14332 +                                                  "caam-qi",
14333 +                               .cra_blocksize = AES_BLOCK_SIZE,
14334 +                       },
14335 +                       .setkey = aead_setkey,
14336 +                       .setauthsize = aead_setauthsize,
14337 +                       .encrypt = aead_encrypt,
14338 +                       .decrypt = aead_decrypt,
14339 +                       .ivsize = AES_BLOCK_SIZE,
14340 +                       .maxauthsize = SHA256_DIGEST_SIZE,
14341 +               },
14342 +               .caam = {
14343 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14344 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14345 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14346 +                       .geniv = true,
14347 +               }
14348 +       },
14349 +       {
14350 +               .aead = {
14351 +                       .base = {
14352 +                               .cra_name = "authenc(hmac(sha384),cbc(aes))",
14353 +                               .cra_driver_name = "authenc-hmac-sha384-"
14354 +                                                  "cbc-aes-caam-qi",
14355 +                               .cra_blocksize = AES_BLOCK_SIZE,
14356 +                       },
14357 +                       .setkey = aead_setkey,
14358 +                       .setauthsize = aead_setauthsize,
14359 +                       .encrypt = aead_encrypt,
14360 +                       .decrypt = aead_decrypt,
14361 +                       .ivsize = AES_BLOCK_SIZE,
14362 +                       .maxauthsize = SHA384_DIGEST_SIZE,
14363 +               },
14364 +               .caam = {
14365 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14366 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14367 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14368 +               }
14369 +       },
14370 +       {
14371 +               .aead = {
14372 +                       .base = {
14373 +                               .cra_name = "echainiv(authenc(hmac(sha384),"
14374 +                                           "cbc(aes)))",
14375 +                               .cra_driver_name = "echainiv-authenc-"
14376 +                                                  "hmac-sha384-cbc-aes-"
14377 +                                                  "caam-qi",
14378 +                               .cra_blocksize = AES_BLOCK_SIZE,
14379 +                       },
14380 +                       .setkey = aead_setkey,
14381 +                       .setauthsize = aead_setauthsize,
14382 +                       .encrypt = aead_encrypt,
14383 +                       .decrypt = aead_decrypt,
14384 +                       .ivsize = AES_BLOCK_SIZE,
14385 +                       .maxauthsize = SHA384_DIGEST_SIZE,
14386 +               },
14387 +               .caam = {
14388 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14389 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14390 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14391 +                       .geniv = true,
14392 +               }
14393 +       },
14394 +       {
14395 +               .aead = {
14396 +                       .base = {
14397 +                               .cra_name = "authenc(hmac(sha512),cbc(aes))",
14398 +                               .cra_driver_name = "authenc-hmac-sha512-"
14399 +                                                  "cbc-aes-caam-qi",
14400 +                               .cra_blocksize = AES_BLOCK_SIZE,
14401 +                       },
14402 +                       .setkey = aead_setkey,
14403 +                       .setauthsize = aead_setauthsize,
14404 +                       .encrypt = aead_encrypt,
14405 +                       .decrypt = aead_decrypt,
14406 +                       .ivsize = AES_BLOCK_SIZE,
14407 +                       .maxauthsize = SHA512_DIGEST_SIZE,
14408 +               },
14409 +               .caam = {
14410 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14411 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14412 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14413 +               }
14414 +       },
14415 +       {
14416 +               .aead = {
14417 +                       .base = {
14418 +                               .cra_name = "echainiv(authenc(hmac(sha512),"
14419 +                                           "cbc(aes)))",
14420 +                               .cra_driver_name = "echainiv-authenc-"
14421 +                                                  "hmac-sha512-cbc-aes-"
14422 +                                                  "caam-qi",
14423 +                               .cra_blocksize = AES_BLOCK_SIZE,
14424 +                       },
14425 +                       .setkey = aead_setkey,
14426 +                       .setauthsize = aead_setauthsize,
14427 +                       .encrypt = aead_encrypt,
14428 +                       .decrypt = aead_decrypt,
14429 +                       .ivsize = AES_BLOCK_SIZE,
14430 +                       .maxauthsize = SHA512_DIGEST_SIZE,
14431 +               },
14432 +               .caam = {
14433 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14434 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14435 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14436 +                       .geniv = true,
14437 +               }
14438 +       },
14439 +       {
14440 +               .aead = {
14441 +                       .base = {
14442 +                               .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
14443 +                               .cra_driver_name = "authenc-hmac-md5-"
14444 +                                                  "cbc-des3_ede-caam-qi",
14445 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14446 +                       },
14447 +                       .setkey = aead_setkey,
14448 +                       .setauthsize = aead_setauthsize,
14449 +                       .encrypt = aead_encrypt,
14450 +                       .decrypt = aead_decrypt,
14451 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14452 +                       .maxauthsize = MD5_DIGEST_SIZE,
14453 +               },
14454 +               .caam = {
14455 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14456 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14457 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14458 +               }
14459 +       },
14460 +       {
14461 +               .aead = {
14462 +                       .base = {
14463 +                               .cra_name = "echainiv(authenc(hmac(md5),"
14464 +                                           "cbc(des3_ede)))",
14465 +                               .cra_driver_name = "echainiv-authenc-hmac-md5-"
14466 +                                                  "cbc-des3_ede-caam-qi",
14467 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14468 +                       },
14469 +                       .setkey = aead_setkey,
14470 +                       .setauthsize = aead_setauthsize,
14471 +                       .encrypt = aead_encrypt,
14472 +                       .decrypt = aead_decrypt,
14473 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14474 +                       .maxauthsize = MD5_DIGEST_SIZE,
14475 +               },
14476 +               .caam = {
14477 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14478 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14479 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14480 +                       .geniv = true,
14481 +               }
14482 +       },
14483 +       {
14484 +               .aead = {
14485 +                       .base = {
14486 +                               .cra_name = "authenc(hmac(sha1),"
14487 +                                           "cbc(des3_ede))",
14488 +                               .cra_driver_name = "authenc-hmac-sha1-"
14489 +                                                  "cbc-des3_ede-caam-qi",
14490 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14491 +                       },
14492 +                       .setkey = aead_setkey,
14493 +                       .setauthsize = aead_setauthsize,
14494 +                       .encrypt = aead_encrypt,
14495 +                       .decrypt = aead_decrypt,
14496 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14497 +                       .maxauthsize = SHA1_DIGEST_SIZE,
14498 +               },
14499 +               .caam = {
14500 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14501 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14502 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14503 +               },
14504 +       },
14505 +       {
14506 +               .aead = {
14507 +                       .base = {
14508 +                               .cra_name = "echainiv(authenc(hmac(sha1),"
14509 +                                           "cbc(des3_ede)))",
14510 +                               .cra_driver_name = "echainiv-authenc-"
14511 +                                                  "hmac-sha1-"
14512 +                                                  "cbc-des3_ede-caam-qi",
14513 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14514 +                       },
14515 +                       .setkey = aead_setkey,
14516 +                       .setauthsize = aead_setauthsize,
14517 +                       .encrypt = aead_encrypt,
14518 +                       .decrypt = aead_decrypt,
14519 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14520 +                       .maxauthsize = SHA1_DIGEST_SIZE,
14521 +               },
14522 +               .caam = {
14523 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14524 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14525 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14526 +                       .geniv = true,
14527 +               }
14528 +       },
14529 +       {
14530 +               .aead = {
14531 +                       .base = {
14532 +                               .cra_name = "authenc(hmac(sha224),"
14533 +                                           "cbc(des3_ede))",
14534 +                               .cra_driver_name = "authenc-hmac-sha224-"
14535 +                                                  "cbc-des3_ede-caam-qi",
14536 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14537 +                       },
14538 +                       .setkey = aead_setkey,
14539 +                       .setauthsize = aead_setauthsize,
14540 +                       .encrypt = aead_encrypt,
14541 +                       .decrypt = aead_decrypt,
14542 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14543 +                       .maxauthsize = SHA224_DIGEST_SIZE,
14544 +               },
14545 +               .caam = {
14546 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14547 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14548 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14549 +               },
14550 +       },
14551 +       {
14552 +               .aead = {
14553 +                       .base = {
14554 +                               .cra_name = "echainiv(authenc(hmac(sha224),"
14555 +                                           "cbc(des3_ede)))",
14556 +                               .cra_driver_name = "echainiv-authenc-"
14557 +                                                  "hmac-sha224-"
14558 +                                                  "cbc-des3_ede-caam-qi",
14559 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14560 +                       },
14561 +                       .setkey = aead_setkey,
14562 +                       .setauthsize = aead_setauthsize,
14563 +                       .encrypt = aead_encrypt,
14564 +                       .decrypt = aead_decrypt,
14565 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14566 +                       .maxauthsize = SHA224_DIGEST_SIZE,
14567 +               },
14568 +               .caam = {
14569 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14570 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14571 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14572 +                       .geniv = true,
14573 +               }
14574 +       },
14575 +       {
14576 +               .aead = {
14577 +                       .base = {
14578 +                               .cra_name = "authenc(hmac(sha256),"
14579 +                                           "cbc(des3_ede))",
14580 +                               .cra_driver_name = "authenc-hmac-sha256-"
14581 +                                                  "cbc-des3_ede-caam-qi",
14582 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14583 +                       },
14584 +                       .setkey = aead_setkey,
14585 +                       .setauthsize = aead_setauthsize,
14586 +                       .encrypt = aead_encrypt,
14587 +                       .decrypt = aead_decrypt,
14588 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14589 +                       .maxauthsize = SHA256_DIGEST_SIZE,
14590 +               },
14591 +               .caam = {
14592 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14593 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14594 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14595 +               },
14596 +       },
14597 +       {
14598 +               .aead = {
14599 +                       .base = {
14600 +                               .cra_name = "echainiv(authenc(hmac(sha256),"
14601 +                                           "cbc(des3_ede)))",
14602 +                               .cra_driver_name = "echainiv-authenc-"
14603 +                                                  "hmac-sha256-"
14604 +                                                  "cbc-des3_ede-caam-qi",
14605 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14606 +                       },
14607 +                       .setkey = aead_setkey,
14608 +                       .setauthsize = aead_setauthsize,
14609 +                       .encrypt = aead_encrypt,
14610 +                       .decrypt = aead_decrypt,
14611 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14612 +                       .maxauthsize = SHA256_DIGEST_SIZE,
14613 +               },
14614 +               .caam = {
14615 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14616 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14617 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14618 +                       .geniv = true,
14619 +               }
14620 +       },
14621 +       {
14622 +               .aead = {
14623 +                       .base = {
14624 +                               .cra_name = "authenc(hmac(sha384),"
14625 +                                           "cbc(des3_ede))",
14626 +                               .cra_driver_name = "authenc-hmac-sha384-"
14627 +                                                  "cbc-des3_ede-caam-qi",
14628 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14629 +                       },
14630 +                       .setkey = aead_setkey,
14631 +                       .setauthsize = aead_setauthsize,
14632 +                       .encrypt = aead_encrypt,
14633 +                       .decrypt = aead_decrypt,
14634 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14635 +                       .maxauthsize = SHA384_DIGEST_SIZE,
14636 +               },
14637 +               .caam = {
14638 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14639 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14640 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14641 +               },
14642 +       },
14643 +       {
14644 +               .aead = {
14645 +                       .base = {
14646 +                               .cra_name = "echainiv(authenc(hmac(sha384),"
14647 +                                           "cbc(des3_ede)))",
14648 +                               .cra_driver_name = "echainiv-authenc-"
14649 +                                                  "hmac-sha384-"
14650 +                                                  "cbc-des3_ede-caam-qi",
14651 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14652 +                       },
14653 +                       .setkey = aead_setkey,
14654 +                       .setauthsize = aead_setauthsize,
14655 +                       .encrypt = aead_encrypt,
14656 +                       .decrypt = aead_decrypt,
14657 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14658 +                       .maxauthsize = SHA384_DIGEST_SIZE,
14659 +               },
14660 +               .caam = {
14661 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14662 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14663 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14664 +                       .geniv = true,
14665 +               }
14666 +       },
14667 +       {
14668 +               .aead = {
14669 +                       .base = {
14670 +                               .cra_name = "authenc(hmac(sha512),"
14671 +                                           "cbc(des3_ede))",
14672 +                               .cra_driver_name = "authenc-hmac-sha512-"
14673 +                                                  "cbc-des3_ede-caam-qi",
14674 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14675 +                       },
14676 +                       .setkey = aead_setkey,
14677 +                       .setauthsize = aead_setauthsize,
14678 +                       .encrypt = aead_encrypt,
14679 +                       .decrypt = aead_decrypt,
14680 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14681 +                       .maxauthsize = SHA512_DIGEST_SIZE,
14682 +               },
14683 +               .caam = {
14684 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14685 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14686 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14687 +               },
14688 +       },
14689 +       {
14690 +               .aead = {
14691 +                       .base = {
14692 +                               .cra_name = "echainiv(authenc(hmac(sha512),"
14693 +                                           "cbc(des3_ede)))",
14694 +                               .cra_driver_name = "echainiv-authenc-"
14695 +                                                  "hmac-sha512-"
14696 +                                                  "cbc-des3_ede-caam-qi",
14697 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14698 +                       },
14699 +                       .setkey = aead_setkey,
14700 +                       .setauthsize = aead_setauthsize,
14701 +                       .encrypt = aead_encrypt,
14702 +                       .decrypt = aead_decrypt,
14703 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
14704 +                       .maxauthsize = SHA512_DIGEST_SIZE,
14705 +               },
14706 +               .caam = {
14707 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14708 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14709 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14710 +                       .geniv = true,
14711 +               }
14712 +       },
14713 +       {
14714 +               .aead = {
14715 +                       .base = {
14716 +                               .cra_name = "authenc(hmac(md5),cbc(des))",
14717 +                               .cra_driver_name = "authenc-hmac-md5-"
14718 +                                                  "cbc-des-caam-qi",
14719 +                               .cra_blocksize = DES_BLOCK_SIZE,
14720 +                       },
14721 +                       .setkey = aead_setkey,
14722 +                       .setauthsize = aead_setauthsize,
14723 +                       .encrypt = aead_encrypt,
14724 +                       .decrypt = aead_decrypt,
14725 +                       .ivsize = DES_BLOCK_SIZE,
14726 +                       .maxauthsize = MD5_DIGEST_SIZE,
14727 +               },
14728 +               .caam = {
14729 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14730 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14731 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14732 +               },
14733 +       },
14734 +       {
14735 +               .aead = {
14736 +                       .base = {
14737 +                               .cra_name = "echainiv(authenc(hmac(md5),"
14738 +                                           "cbc(des)))",
14739 +                               .cra_driver_name = "echainiv-authenc-hmac-md5-"
14740 +                                                  "cbc-des-caam-qi",
14741 +                               .cra_blocksize = DES_BLOCK_SIZE,
14742 +                       },
14743 +                       .setkey = aead_setkey,
14744 +                       .setauthsize = aead_setauthsize,
14745 +                       .encrypt = aead_encrypt,
14746 +                       .decrypt = aead_decrypt,
14747 +                       .ivsize = DES_BLOCK_SIZE,
14748 +                       .maxauthsize = MD5_DIGEST_SIZE,
14749 +               },
14750 +               .caam = {
14751 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14752 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14753 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14754 +                       .geniv = true,
14755 +               }
14756 +       },
14757 +       {
14758 +               .aead = {
14759 +                       .base = {
14760 +                               .cra_name = "authenc(hmac(sha1),cbc(des))",
14761 +                               .cra_driver_name = "authenc-hmac-sha1-"
14762 +                                                  "cbc-des-caam-qi",
14763 +                               .cra_blocksize = DES_BLOCK_SIZE,
14764 +                       },
14765 +                       .setkey = aead_setkey,
14766 +                       .setauthsize = aead_setauthsize,
14767 +                       .encrypt = aead_encrypt,
14768 +                       .decrypt = aead_decrypt,
14769 +                       .ivsize = DES_BLOCK_SIZE,
14770 +                       .maxauthsize = SHA1_DIGEST_SIZE,
14771 +               },
14772 +               .caam = {
14773 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14774 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14775 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14776 +               },
14777 +       },
14778 +       {
14779 +               .aead = {
14780 +                       .base = {
14781 +                               .cra_name = "echainiv(authenc(hmac(sha1),"
14782 +                                           "cbc(des)))",
14783 +                               .cra_driver_name = "echainiv-authenc-"
14784 +                                                  "hmac-sha1-cbc-des-caam-qi",
14785 +                               .cra_blocksize = DES_BLOCK_SIZE,
14786 +                       },
14787 +                       .setkey = aead_setkey,
14788 +                       .setauthsize = aead_setauthsize,
14789 +                       .encrypt = aead_encrypt,
14790 +                       .decrypt = aead_decrypt,
14791 +                       .ivsize = DES_BLOCK_SIZE,
14792 +                       .maxauthsize = SHA1_DIGEST_SIZE,
14793 +               },
14794 +               .caam = {
14795 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14796 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14797 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14798 +                       .geniv = true,
14799 +               }
14800 +       },
14801 +       {
14802 +               .aead = {
14803 +                       .base = {
14804 +                               .cra_name = "authenc(hmac(sha224),cbc(des))",
14805 +                               .cra_driver_name = "authenc-hmac-sha224-"
14806 +                                                  "cbc-des-caam-qi",
14807 +                               .cra_blocksize = DES_BLOCK_SIZE,
14808 +                       },
14809 +                       .setkey = aead_setkey,
14810 +                       .setauthsize = aead_setauthsize,
14811 +                       .encrypt = aead_encrypt,
14812 +                       .decrypt = aead_decrypt,
14813 +                       .ivsize = DES_BLOCK_SIZE,
14814 +                       .maxauthsize = SHA224_DIGEST_SIZE,
14815 +               },
14816 +               .caam = {
14817 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14818 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14819 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14820 +               },
14821 +       },
14822 +       {
14823 +               .aead = {
14824 +                       .base = {
14825 +                               .cra_name = "echainiv(authenc(hmac(sha224),"
14826 +                                           "cbc(des)))",
14827 +                               .cra_driver_name = "echainiv-authenc-"
14828 +                                                  "hmac-sha224-cbc-des-"
14829 +                                                  "caam-qi",
14830 +                               .cra_blocksize = DES_BLOCK_SIZE,
14831 +                       },
14832 +                       .setkey = aead_setkey,
14833 +                       .setauthsize = aead_setauthsize,
14834 +                       .encrypt = aead_encrypt,
14835 +                       .decrypt = aead_decrypt,
14836 +                       .ivsize = DES_BLOCK_SIZE,
14837 +                       .maxauthsize = SHA224_DIGEST_SIZE,
14838 +               },
14839 +               .caam = {
14840 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14841 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14842 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14843 +                       .geniv = true,
14844 +               }
14845 +       },
14846 +       {
14847 +               .aead = {
14848 +                       .base = {
14849 +                               .cra_name = "authenc(hmac(sha256),cbc(des))",
14850 +                               .cra_driver_name = "authenc-hmac-sha256-"
14851 +                                                  "cbc-des-caam-qi",
14852 +                               .cra_blocksize = DES_BLOCK_SIZE,
14853 +                       },
14854 +                       .setkey = aead_setkey,
14855 +                       .setauthsize = aead_setauthsize,
14856 +                       .encrypt = aead_encrypt,
14857 +                       .decrypt = aead_decrypt,
14858 +                       .ivsize = DES_BLOCK_SIZE,
14859 +                       .maxauthsize = SHA256_DIGEST_SIZE,
14860 +               },
14861 +               .caam = {
14862 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14863 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14864 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14865 +               },
14866 +       },
14867 +       {
14868 +               .aead = {
14869 +                       .base = {
14870 +                               .cra_name = "echainiv(authenc(hmac(sha256),"
14871 +                                           "cbc(des)))",
14872 +                               .cra_driver_name = "echainiv-authenc-"
14873 +                                                  "hmac-sha256-cbc-des-"
14874 +                                                  "caam-qi",
14875 +                               .cra_blocksize = DES_BLOCK_SIZE,
14876 +                       },
14877 +                       .setkey = aead_setkey,
14878 +                       .setauthsize = aead_setauthsize,
14879 +                       .encrypt = aead_encrypt,
14880 +                       .decrypt = aead_decrypt,
14881 +                       .ivsize = DES_BLOCK_SIZE,
14882 +                       .maxauthsize = SHA256_DIGEST_SIZE,
14883 +               },
14884 +               .caam = {
14885 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14886 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14887 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14888 +                       .geniv = true,
14889 +               },
14890 +       },
14891 +       {
14892 +               .aead = {
14893 +                       .base = {
14894 +                               .cra_name = "authenc(hmac(sha384),cbc(des))",
14895 +                               .cra_driver_name = "authenc-hmac-sha384-"
14896 +                                                  "cbc-des-caam-qi",
14897 +                               .cra_blocksize = DES_BLOCK_SIZE,
14898 +                       },
14899 +                       .setkey = aead_setkey,
14900 +                       .setauthsize = aead_setauthsize,
14901 +                       .encrypt = aead_encrypt,
14902 +                       .decrypt = aead_decrypt,
14903 +                       .ivsize = DES_BLOCK_SIZE,
14904 +                       .maxauthsize = SHA384_DIGEST_SIZE,
14905 +               },
14906 +               .caam = {
14907 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14908 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14909 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14910 +               },
14911 +       },
14912 +       {
14913 +               .aead = {
14914 +                       .base = {
14915 +                               .cra_name = "echainiv(authenc(hmac(sha384),"
14916 +                                           "cbc(des)))",
14917 +                               .cra_driver_name = "echainiv-authenc-"
14918 +                                                  "hmac-sha384-cbc-des-"
14919 +                                                  "caam-qi",
14920 +                               .cra_blocksize = DES_BLOCK_SIZE,
14921 +                       },
14922 +                       .setkey = aead_setkey,
14923 +                       .setauthsize = aead_setauthsize,
14924 +                       .encrypt = aead_encrypt,
14925 +                       .decrypt = aead_decrypt,
14926 +                       .ivsize = DES_BLOCK_SIZE,
14927 +                       .maxauthsize = SHA384_DIGEST_SIZE,
14928 +               },
14929 +               .caam = {
14930 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14931 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14932 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14933 +                       .geniv = true,
14934 +               }
14935 +       },
14936 +       {
14937 +               .aead = {
14938 +                       .base = {
14939 +                               .cra_name = "authenc(hmac(sha512),cbc(des))",
14940 +                               .cra_driver_name = "authenc-hmac-sha512-"
14941 +                                                  "cbc-des-caam-qi",
14942 +                               .cra_blocksize = DES_BLOCK_SIZE,
14943 +                       },
14944 +                       .setkey = aead_setkey,
14945 +                       .setauthsize = aead_setauthsize,
14946 +                       .encrypt = aead_encrypt,
14947 +                       .decrypt = aead_decrypt,
14948 +                       .ivsize = DES_BLOCK_SIZE,
14949 +                       .maxauthsize = SHA512_DIGEST_SIZE,
14950 +               },
14951 +               .caam = {
14952 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14953 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14954 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14955 +               }
14956 +       },
14957 +       {
14958 +               .aead = {
14959 +                       .base = {
14960 +                               .cra_name = "echainiv(authenc(hmac(sha512),"
14961 +                                           "cbc(des)))",
14962 +                               .cra_driver_name = "echainiv-authenc-"
14963 +                                                  "hmac-sha512-cbc-des-"
14964 +                                                  "caam-qi",
14965 +                               .cra_blocksize = DES_BLOCK_SIZE,
14966 +                       },
14967 +                       .setkey = aead_setkey,
14968 +                       .setauthsize = aead_setauthsize,
14969 +                       .encrypt = aead_encrypt,
14970 +                       .decrypt = aead_decrypt,
14971 +                       .ivsize = DES_BLOCK_SIZE,
14972 +                       .maxauthsize = SHA512_DIGEST_SIZE,
14973 +               },
14974 +               .caam = {
14975 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14976 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14977 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14978 +                       .geniv = true,
14979 +               }
14980 +       },
14981 +       {
14982 +               .aead = {
14983 +                       .base = {
14984 +                               .cra_name = "tls10(hmac(sha1),cbc(aes))",
14985 +                               .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi",
14986 +                               .cra_blocksize = AES_BLOCK_SIZE,
14987 +                       },
14988 +                       .setkey = tls_setkey,
14989 +                       .setauthsize = tls_setauthsize,
14990 +                       .encrypt = tls_encrypt,
14991 +                       .decrypt = tls_decrypt,
14992 +                       .ivsize = AES_BLOCK_SIZE,
14993 +                       .maxauthsize = SHA1_DIGEST_SIZE,
14994 +               },
14995 +               .caam = {
14996 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14997 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14998 +                                          OP_ALG_AAI_HMAC_PRECOMP,
14999 +               }
15000 +       }
15001 +};
15002 +
15003 +struct caam_crypto_alg {
15004 +       struct list_head entry;
15005 +       struct crypto_alg crypto_alg;
15006 +       struct caam_alg_entry caam;
15007 +};
15008 +
15009 +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
15010 +{
15011 +       struct caam_drv_private *priv;
15012 +       /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
15013 +       static const u8 digest_size[] = {
15014 +               MD5_DIGEST_SIZE,
15015 +               SHA1_DIGEST_SIZE,
15016 +               SHA224_DIGEST_SIZE,
15017 +               SHA256_DIGEST_SIZE,
15018 +               SHA384_DIGEST_SIZE,
15019 +               SHA512_DIGEST_SIZE
15020 +       };
15021 +       u8 op_id;
15022 +
15023 +       /*
15024 +        * distribute tfms across job rings to ensure in-order
15025 +        * crypto request processing per tfm
15026 +        */
15027 +       ctx->jrdev = caam_jr_alloc();
15028 +       if (IS_ERR(ctx->jrdev)) {
15029 +               pr_err("Job Ring Device allocation for transform failed\n");
15030 +               return PTR_ERR(ctx->jrdev);
15031 +       }
15032 +
15033 +       ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
15034 +                                     DMA_TO_DEVICE);
15035 +       if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
15036 +               dev_err(ctx->jrdev, "unable to map key\n");
15037 +               caam_jr_free(ctx->jrdev);
15038 +               return -ENOMEM;
15039 +       }
15040 +
15041 +       /* copy descriptor header template value */
15042 +       ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
15043 +       ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
15044 +
15045 +       if (ctx->adata.algtype) {
15046 +               op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK)
15047 +                               >> OP_ALG_ALGSEL_SHIFT;
15048 +               if (op_id < ARRAY_SIZE(digest_size)) {
15049 +                       ctx->authsize = digest_size[op_id];
15050 +               } else {
15051 +                       dev_err(ctx->jrdev,
15052 +                               "incorrect op_id %d; must be less than %zu\n",
15053 +                               op_id, ARRAY_SIZE(digest_size));
15054 +                       caam_jr_free(ctx->jrdev);
15055 +                       return -EINVAL;
15056 +               }
15057 +       } else {
15058 +               ctx->authsize = 0;
15059 +       }
15060 +
15061 +       priv = dev_get_drvdata(ctx->jrdev->parent);
15062 +       ctx->qidev = priv->qidev;
15063 +
15064 +       spin_lock_init(&ctx->lock);
15065 +       ctx->drv_ctx[ENCRYPT] = NULL;
15066 +       ctx->drv_ctx[DECRYPT] = NULL;
15067 +       ctx->drv_ctx[GIVENCRYPT] = NULL;
15068 +
15069 +       return 0;
15070 +}
15071 +
15072 +static int caam_cra_init(struct crypto_tfm *tfm)
15073 +{
15074 +       struct crypto_alg *alg = tfm->__crt_alg;
15075 +       struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
15076 +                                                       crypto_alg);
15077 +       struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
15078 +
15079 +       return caam_init_common(ctx, &caam_alg->caam);
15080 +}
15081 +
15082 +static int caam_aead_init(struct crypto_aead *tfm)
15083 +{
15084 +       struct aead_alg *alg = crypto_aead_alg(tfm);
15085 +       struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
15086 +                                                     aead);
15087 +       struct caam_ctx *ctx = crypto_aead_ctx(tfm);
15088 +
15089 +       return caam_init_common(ctx, &caam_alg->caam);
15090 +}
15091 +
15092 +static void caam_exit_common(struct caam_ctx *ctx)
15093 +{
15094 +       caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
15095 +       caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
15096 +       caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
15097 +
15098 +       dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
15099 +                        DMA_TO_DEVICE);
15100 +
15101 +       caam_jr_free(ctx->jrdev);
15102 +}
15103 +
15104 +static void caam_cra_exit(struct crypto_tfm *tfm)
15105 +{
15106 +       caam_exit_common(crypto_tfm_ctx(tfm));
15107 +}
15108 +
15109 +static void caam_aead_exit(struct crypto_aead *tfm)
15110 +{
15111 +       caam_exit_common(crypto_aead_ctx(tfm));
15112 +}
15113 +
15114 +static struct list_head alg_list;
15115 +static void __exit caam_qi_algapi_exit(void)
15116 +{
15117 +       struct caam_crypto_alg *t_alg, *n;
15118 +       int i;
15119 +
15120 +       for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
15121 +               struct caam_aead_alg *t_alg = driver_aeads + i;
15122 +
15123 +               if (t_alg->registered)
15124 +                       crypto_unregister_aead(&t_alg->aead);
15125 +       }
15126 +
15127 +       if (!alg_list.next)
15128 +               return;
15129 +
15130 +       list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
15131 +               crypto_unregister_alg(&t_alg->crypto_alg);
15132 +               list_del(&t_alg->entry);
15133 +               kfree(t_alg);
15134 +       }
15135 +}
15136 +
15137 +static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
15138 +                                             *template)
15139 +{
15140 +       struct caam_crypto_alg *t_alg;
15141 +       struct crypto_alg *alg;
15142 +
15143 +       t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
15144 +       if (!t_alg)
15145 +               return ERR_PTR(-ENOMEM);
15146 +
15147 +       alg = &t_alg->crypto_alg;
15148 +
15149 +       snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
15150 +       snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
15151 +                template->driver_name);
15152 +       alg->cra_module = THIS_MODULE;
15153 +       alg->cra_init = caam_cra_init;
15154 +       alg->cra_exit = caam_cra_exit;
15155 +       alg->cra_priority = CAAM_CRA_PRIORITY;
15156 +       alg->cra_blocksize = template->blocksize;
15157 +       alg->cra_alignmask = 0;
15158 +       alg->cra_ctxsize = sizeof(struct caam_ctx);
15159 +       alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
15160 +                        template->type;
15161 +       switch (template->type) {
15162 +       case CRYPTO_ALG_TYPE_GIVCIPHER:
15163 +               alg->cra_type = &crypto_givcipher_type;
15164 +               alg->cra_ablkcipher = template->template_ablkcipher;
15165 +               break;
15166 +       case CRYPTO_ALG_TYPE_ABLKCIPHER:
15167 +               alg->cra_type = &crypto_ablkcipher_type;
15168 +               alg->cra_ablkcipher = template->template_ablkcipher;
15169 +               break;
15170 +       }
15171 +
15172 +       t_alg->caam.class1_alg_type = template->class1_alg_type;
15173 +       t_alg->caam.class2_alg_type = template->class2_alg_type;
15174 +
15175 +       return t_alg;
15176 +}
15177 +
15178 +static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
15179 +{
15180 +       struct aead_alg *alg = &t_alg->aead;
15181 +
15182 +       alg->base.cra_module = THIS_MODULE;
15183 +       alg->base.cra_priority = CAAM_CRA_PRIORITY;
15184 +       alg->base.cra_ctxsize = sizeof(struct caam_ctx);
15185 +       alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
15186 +
15187 +       alg->init = caam_aead_init;
15188 +       alg->exit = caam_aead_exit;
15189 +}
15190 +
15191 +static int __init caam_qi_algapi_init(void)
15192 +{
15193 +       struct device_node *dev_node;
15194 +       struct platform_device *pdev;
15195 +       struct device *ctrldev;
15196 +       struct caam_drv_private *priv;
15197 +       int i = 0, err = 0;
15198 +       u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
15199 +       unsigned int md_limit = SHA512_DIGEST_SIZE;
15200 +       bool registered = false;
15201 +
15202 +       dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
15203 +       if (!dev_node) {
15204 +               dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
15205 +               if (!dev_node)
15206 +                       return -ENODEV;
15207 +       }
15208 +
15209 +       pdev = of_find_device_by_node(dev_node);
15210 +       of_node_put(dev_node);
15211 +       if (!pdev)
15212 +               return -ENODEV;
15213 +
15214 +       ctrldev = &pdev->dev;
15215 +       priv = dev_get_drvdata(ctrldev);
15216 +
15217 +       /*
15218 +        * If priv is NULL, it's probably because the caam driver wasn't
15219 +        * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
15220 +        */
15221 +       if (!priv || !priv->qi_present)
15222 +               return -ENODEV;
15223 +
15224 +       if (caam_dpaa2) {
15225 +               dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
15226 +               return -ENODEV;
15227 +       }
15228 +
15229 +       INIT_LIST_HEAD(&alg_list);
15230 +
15231 +       /*
15232 +        * Register crypto algorithms the device supports.
15233 +        * First, detect presence and attributes of DES, AES, and MD blocks.
15234 +        */
15235 +       cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
15236 +       cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
15237 +       des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
15238 +       aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
15239 +       md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
15240 +
15241 +       /* If MD is present, limit digest size based on LP256 */
15242 +       if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
15243 +               md_limit = SHA256_DIGEST_SIZE;
15244 +
15245 +       for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
15246 +               struct caam_crypto_alg *t_alg;
15247 +               struct caam_alg_template *alg = driver_algs + i;
15248 +               u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
15249 +
15250 +               /* Skip DES algorithms if not supported by device */
15251 +               if (!des_inst &&
15252 +                   ((alg_sel == OP_ALG_ALGSEL_3DES) ||
15253 +                    (alg_sel == OP_ALG_ALGSEL_DES)))
15254 +                       continue;
15255 +
15256 +               /* Skip AES algorithms if not supported by device */
15257 +               if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
15258 +                       continue;
15259 +
15260 +               t_alg = caam_alg_alloc(alg);
15261 +               if (IS_ERR(t_alg)) {
15262 +                       err = PTR_ERR(t_alg);
15263 +                       dev_warn(priv->qidev, "%s alg allocation failed\n",
15264 +                                alg->driver_name);
15265 +                       continue;
15266 +               }
15267 +
15268 +               err = crypto_register_alg(&t_alg->crypto_alg);
15269 +               if (err) {
15270 +                       dev_warn(priv->qidev, "%s alg registration failed\n",
15271 +                                t_alg->crypto_alg.cra_driver_name);
15272 +                       kfree(t_alg);
15273 +                       continue;
15274 +               }
15275 +
15276 +               list_add_tail(&t_alg->entry, &alg_list);
15277 +               registered = true;
15278 +       }
15279 +
15280 +       for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
15281 +               struct caam_aead_alg *t_alg = driver_aeads + i;
15282 +               u32 c1_alg_sel = t_alg->caam.class1_alg_type &
15283 +                                OP_ALG_ALGSEL_MASK;
15284 +               u32 c2_alg_sel = t_alg->caam.class2_alg_type &
15285 +                                OP_ALG_ALGSEL_MASK;
15286 +               u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
15287 +
15288 +               /* Skip DES algorithms if not supported by device */
15289 +               if (!des_inst &&
15290 +                   ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
15291 +                    (c1_alg_sel == OP_ALG_ALGSEL_DES)))
15292 +                       continue;
15293 +
15294 +               /* Skip AES algorithms if not supported by device */
15295 +               if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
15296 +                       continue;
15297 +
15298 +               /*
15299 +                * Check support for AES algorithms not available
15300 +                * on LP devices.
15301 +                */
15302 +               if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
15303 +                   (alg_aai == OP_ALG_AAI_GCM))
15304 +                       continue;
15305 +
15306 +               /*
15307 +                * Skip algorithms requiring message digests
15308 +                * if MD or MD size is not supported by device.
15309 +                */
15310 +               if (c2_alg_sel &&
15311 +                   (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
15312 +                       continue;
15313 +
15314 +               caam_aead_alg_init(t_alg);
15315 +
15316 +               err = crypto_register_aead(&t_alg->aead);
15317 +               if (err) {
15318 +                       pr_warn("%s alg registration failed\n",
15319 +                               t_alg->aead.base.cra_driver_name);
15320 +                       continue;
15321 +               }
15322 +
15323 +               t_alg->registered = true;
15324 +               registered = true;
15325 +       }
15326 +
15327 +       if (registered)
15328 +               dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
15329 +
15330 +       return err;
15331 +}
15332 +
15333 +module_init(caam_qi_algapi_init);
15334 +module_exit(caam_qi_algapi_exit);
15335 +
15336 +MODULE_LICENSE("GPL");
15337 +MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
15338 +MODULE_AUTHOR("Freescale Semiconductor");
15339 --- /dev/null
15340 +++ b/drivers/crypto/caam/caamalg_qi2.c
15341 @@ -0,0 +1,5920 @@
15342 +/*
15343 + * Copyright 2015-2016 Freescale Semiconductor Inc.
15344 + * Copyright 2017 NXP
15345 + *
15346 + * Redistribution and use in source and binary forms, with or without
15347 + * modification, are permitted provided that the following conditions are met:
15348 + *     * Redistributions of source code must retain the above copyright
15349 + *      notice, this list of conditions and the following disclaimer.
15350 + *     * Redistributions in binary form must reproduce the above copyright
15351 + *      notice, this list of conditions and the following disclaimer in the
15352 + *      documentation and/or other materials provided with the distribution.
15353 + *     * Neither the names of the above-listed copyright holders nor the
15354 + *      names of any contributors may be used to endorse or promote products
15355 + *      derived from this software without specific prior written permission.
15356 + *
15357 + *
15358 + * ALTERNATIVELY, this software may be distributed under the terms of the
15359 + * GNU General Public License ("GPL") as published by the Free Software
15360 + * Foundation, either version 2 of that License or (at your option) any
15361 + * later version.
15362 + *
15363 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15364 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15365 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
15366 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
15367 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
15368 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
15369 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
15370 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
15371 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
15372 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
15373 + * POSSIBILITY OF SUCH DAMAGE.
15374 + */
15375 +
15376 +#include "compat.h"
15377 +#include "regs.h"
15378 +#include "caamalg_qi2.h"
15379 +#include "dpseci_cmd.h"
15380 +#include "desc_constr.h"
15381 +#include "error.h"
15382 +#include "sg_sw_sec4.h"
15383 +#include "sg_sw_qm2.h"
15384 +#include "key_gen.h"
15385 +#include "caamalg_desc.h"
15386 +#include "caamhash_desc.h"
15387 +#include "../../../drivers/staging/fsl-mc/include/mc.h"
15388 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
15389 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
15390 +
15391 +#define CAAM_CRA_PRIORITY      2000
15392 +
15393 +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
15394 +#define CAAM_MAX_KEY_SIZE      (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
15395 +                                SHA512_DIGEST_SIZE * 2)
15396 +
15397 +#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM
15398 +bool caam_little_end;
15399 +EXPORT_SYMBOL(caam_little_end);
15400 +bool caam_imx;
15401 +EXPORT_SYMBOL(caam_imx);
15402 +#endif
15403 +
15404 +/*
15405 + * This is a a cache of buffers, from which the users of CAAM QI driver
15406 + * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
15407 + * NOTE: A more elegant solution would be to have some headroom in the frames
15408 + *       being processed. This can be added by the dpaa2-eth driver. This would
15409 + *       pose a problem for userspace application processing which cannot
15410 + *       know of this limitation. So for now, this will work.
15411 + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
15412 + */
15413 +static struct kmem_cache *qi_cache;
15414 +
15415 +struct caam_alg_entry {
15416 +       struct device *dev;
15417 +       int class1_alg_type;
15418 +       int class2_alg_type;
15419 +       bool rfc3686;
15420 +       bool geniv;
15421 +};
15422 +
15423 +struct caam_aead_alg {
15424 +       struct aead_alg aead;
15425 +       struct caam_alg_entry caam;
15426 +       bool registered;
15427 +};
15428 +
15429 +/**
15430 + * caam_ctx - per-session context
15431 + * @flc: Flow Contexts array
15432 + * @key:  virtual address of the key(s): [authentication key], encryption key
15433 + * @flc_dma: I/O virtual addresses of the Flow Contexts
15434 + * @key_dma: I/O virtual address of the key
15435 + * @dev: dpseci device
15436 + * @adata: authentication algorithm details
15437 + * @cdata: encryption algorithm details
15438 + * @authsize: authentication tag (a.k.a. ICV / MAC) size
15439 + */
15440 +struct caam_ctx {
15441 +       struct caam_flc flc[NUM_OP];
15442 +       u8 key[CAAM_MAX_KEY_SIZE];
15443 +       dma_addr_t flc_dma[NUM_OP];
15444 +       dma_addr_t key_dma;
15445 +       struct device *dev;
15446 +       struct alginfo adata;
15447 +       struct alginfo cdata;
15448 +       unsigned int authsize;
15449 +};
15450 +
15451 +void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
15452 +                             dma_addr_t iova_addr)
15453 +{
15454 +       phys_addr_t phys_addr;
15455 +
15456 +       phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
15457 +                                  iova_addr;
15458 +
15459 +       return phys_to_virt(phys_addr);
15460 +}
15461 +
15462 +/*
15463 + * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
15464 + *
15465 + * Allocate data on the hotpath. Instead of using kzalloc, one can use the
15466 + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
15467 + * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
15468 + * hosting 16 SG entries.
15469 + *
15470 + * @flags - flags that would be used for the equivalent kmalloc(..) call
15471 + *
15472 + * Returns a pointer to a retrieved buffer on success or NULL on failure.
15473 + */
15474 +static inline void *qi_cache_zalloc(gfp_t flags)
15475 +{
15476 +       return kmem_cache_zalloc(qi_cache, flags);
15477 +}
15478 +
15479 +/*
15480 + * qi_cache_free - Frees buffers allocated from CAAM-QI cache
15481 + *
15482 + * @obj - buffer previously allocated by qi_cache_zalloc
15483 + *
15484 + * No checking is being done, the call is a passthrough call to
15485 + * kmem_cache_free(...)
15486 + */
15487 +static inline void qi_cache_free(void *obj)
15488 +{
15489 +       kmem_cache_free(qi_cache, obj);
15490 +}
15491 +
15492 +static struct caam_request *to_caam_req(struct crypto_async_request *areq)
15493 +{
15494 +       switch (crypto_tfm_alg_type(areq->tfm)) {
15495 +       case CRYPTO_ALG_TYPE_ABLKCIPHER:
15496 +       case CRYPTO_ALG_TYPE_GIVCIPHER:
15497 +               return ablkcipher_request_ctx(ablkcipher_request_cast(areq));
15498 +       case CRYPTO_ALG_TYPE_AEAD:
15499 +               return aead_request_ctx(container_of(areq, struct aead_request,
15500 +                                                    base));
15501 +       case CRYPTO_ALG_TYPE_AHASH:
15502 +               return ahash_request_ctx(ahash_request_cast(areq));
15503 +       default:
15504 +               return ERR_PTR(-EINVAL);
15505 +       }
15506 +}
15507 +
15508 +static void caam_unmap(struct device *dev, struct scatterlist *src,
15509 +                      struct scatterlist *dst, int src_nents,
15510 +                      int dst_nents, dma_addr_t iv_dma, int ivsize,
15511 +                      enum optype op_type, dma_addr_t qm_sg_dma,
15512 +                      int qm_sg_bytes)
15513 +{
15514 +       if (dst != src) {
15515 +               if (src_nents)
15516 +                       dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
15517 +               dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
15518 +       } else {
15519 +               dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
15520 +       }
15521 +
15522 +       if (iv_dma)
15523 +               dma_unmap_single(dev, iv_dma, ivsize,
15524 +                                op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
15525 +                                                        DMA_TO_DEVICE);
15526 +
15527 +       if (qm_sg_bytes)
15528 +               dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
15529 +}
15530 +
15531 +static int aead_set_sh_desc(struct crypto_aead *aead)
15532 +{
15533 +       struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
15534 +                                                typeof(*alg), aead);
15535 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
15536 +       unsigned int ivsize = crypto_aead_ivsize(aead);
15537 +       struct device *dev = ctx->dev;
15538 +       struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
15539 +       struct caam_flc *flc;
15540 +       u32 *desc;
15541 +       u32 ctx1_iv_off = 0;
15542 +       u32 *nonce = NULL;
15543 +       unsigned int data_len[2];
15544 +       u32 inl_mask;
15545 +       const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
15546 +                              OP_ALG_AAI_CTR_MOD128);
15547 +       const bool is_rfc3686 = alg->caam.rfc3686;
15548 +
15549 +       if (!ctx->cdata.keylen || !ctx->authsize)
15550 +               return 0;
15551 +
15552 +       /*
15553 +        * AES-CTR needs to load IV in CONTEXT1 reg
15554 +        * at an offset of 128bits (16bytes)
15555 +        * CONTEXT1[255:128] = IV
15556 +        */
15557 +       if (ctr_mode)
15558 +               ctx1_iv_off = 16;
15559 +
15560 +       /*
15561 +        * RFC3686 specific:
15562 +        *      CONTEXT1[255:128] = {NONCE, IV, COUNTER}
15563 +        */
15564 +       if (is_rfc3686) {
15565 +               ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
15566 +               nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
15567 +                               ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
15568 +       }
15569 +
15570 +       data_len[0] = ctx->adata.keylen_pad;
15571 +       data_len[1] = ctx->cdata.keylen;
15572 +
15573 +       /* aead_encrypt shared descriptor */
15574 +       if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
15575 +                                                DESC_QI_AEAD_ENC_LEN) +
15576 +                             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
15577 +                             DESC_JOB_IO_LEN, data_len, &inl_mask,
15578 +                             ARRAY_SIZE(data_len)) < 0)
15579 +               return -EINVAL;
15580 +
15581 +       if (inl_mask & 1)
15582 +               ctx->adata.key_virt = ctx->key;
15583 +       else
15584 +               ctx->adata.key_dma = ctx->key_dma;
15585 +
15586 +       if (inl_mask & 2)
15587 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
15588 +       else
15589 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
15590 +
15591 +       ctx->adata.key_inline = !!(inl_mask & 1);
15592 +       ctx->cdata.key_inline = !!(inl_mask & 2);
15593 +
15594 +       flc = &ctx->flc[ENCRYPT];
15595 +       desc = flc->sh_desc;
15596 +
15597 +       if (alg->caam.geniv)
15598 +               cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
15599 +                                         ivsize, ctx->authsize, is_rfc3686,
15600 +                                         nonce, ctx1_iv_off, true,
15601 +                                         priv->sec_attr.era);
15602 +       else
15603 +               cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
15604 +                                      ivsize, ctx->authsize, is_rfc3686, nonce,
15605 +                                      ctx1_iv_off, true, priv->sec_attr.era);
15606 +
15607 +       flc->flc[1] = desc_len(desc); /* SDL */
15608 +       dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
15609 +                                  sizeof(flc->flc) + desc_bytes(desc),
15610 +                                  DMA_BIDIRECTIONAL);
15611 +
15612 +       /* aead_decrypt shared descriptor */
15613 +       if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
15614 +                             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
15615 +                             DESC_JOB_IO_LEN, data_len, &inl_mask,
15616 +                             ARRAY_SIZE(data_len)) < 0)
15617 +               return -EINVAL;
15618 +
15619 +       if (inl_mask & 1)
15620 +               ctx->adata.key_virt = ctx->key;
15621 +       else
15622 +               ctx->adata.key_dma = ctx->key_dma;
15623 +
15624 +       if (inl_mask & 2)
15625 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
15626 +       else
15627 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
15628 +
15629 +       ctx->adata.key_inline = !!(inl_mask & 1);
15630 +       ctx->cdata.key_inline = !!(inl_mask & 2);
15631 +
15632 +       flc = &ctx->flc[DECRYPT];
15633 +       desc = flc->sh_desc;
15634 +       cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
15635 +                              ivsize, ctx->authsize, alg->caam.geniv,
15636 +                              is_rfc3686, nonce, ctx1_iv_off, true,
15637 +                              priv->sec_attr.era);
15638 +       flc->flc[1] = desc_len(desc); /* SDL */
15639 +       dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
15640 +                                  sizeof(flc->flc) + desc_bytes(desc),
15641 +                                  DMA_BIDIRECTIONAL);
15642 +
15643 +       return 0;
15644 +}
15645 +
15646 +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
15647 +{
15648 +       struct caam_ctx *ctx = crypto_aead_ctx(authenc);
15649 +
15650 +       ctx->authsize = authsize;
15651 +       aead_set_sh_desc(authenc);
15652 +
15653 +       return 0;
15654 +}
15655 +
15656 +struct split_key_sh_result {
15657 +       struct completion completion;
15658 +       int err;
15659 +       struct device *dev;
15660 +};
15661 +
15662 +static void split_key_sh_done(void *cbk_ctx, u32 err)
15663 +{
15664 +       struct split_key_sh_result *res = cbk_ctx;
15665 +
15666 +#ifdef DEBUG
15667 +       dev_err(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
15668 +#endif
15669 +
15670 +       if (err)
15671 +               caam_qi2_strstatus(res->dev, err);
15672 +
15673 +       res->err = err;
15674 +       complete(&res->completion);
15675 +}
15676 +
15677 +static int aead_setkey(struct crypto_aead *aead, const u8 *key,
15678 +                      unsigned int keylen)
15679 +{
15680 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
15681 +       struct device *dev = ctx->dev;
15682 +       struct crypto_authenc_keys keys;
15683 +
15684 +       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
15685 +               goto badkey;
15686 +
15687 +#ifdef DEBUG
15688 +       dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
15689 +               keys.authkeylen + keys.enckeylen, keys.enckeylen,
15690 +               keys.authkeylen);
15691 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
15692 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
15693 +#endif
15694 +
15695 +       ctx->adata.keylen = keys.authkeylen;
15696 +       ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
15697 +                                             OP_ALG_ALGSEL_MASK);
15698 +
15699 +       if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
15700 +               goto badkey;
15701 +
15702 +       memcpy(ctx->key, keys.authkey, keys.authkeylen);
15703 +       memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
15704 +       dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
15705 +                                  keys.enckeylen, DMA_BIDIRECTIONAL);
15706 +#ifdef DEBUG
15707 +       print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
15708 +                      DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
15709 +                      ctx->adata.keylen_pad + keys.enckeylen, 1);
15710 +#endif
15711 +
15712 +       ctx->cdata.keylen = keys.enckeylen;
15713 +
15714 +       return aead_set_sh_desc(aead);
15715 +badkey:
15716 +       crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
15717 +       return -EINVAL;
15718 +}
15719 +
15720 +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
15721 +                                          bool encrypt)
15722 +{
15723 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
15724 +       struct caam_request *req_ctx = aead_request_ctx(req);
15725 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
15726 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
15727 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
15728 +       struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
15729 +                                                typeof(*alg), aead);
15730 +       struct device *dev = ctx->dev;
15731 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
15732 +                     GFP_KERNEL : GFP_ATOMIC;
15733 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
15734 +       struct aead_edesc *edesc;
15735 +       dma_addr_t qm_sg_dma, iv_dma = 0;
15736 +       int ivsize = 0;
15737 +       unsigned int authsize = ctx->authsize;
15738 +       int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
15739 +       int in_len, out_len;
15740 +       struct dpaa2_sg_entry *sg_table;
15741 +       enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
15742 +
15743 +       /* allocate space for base edesc and link tables */
15744 +       edesc = qi_cache_zalloc(GFP_DMA | flags);
15745 +       if (unlikely(!edesc)) {
15746 +               dev_err(dev, "could not allocate extended descriptor\n");
15747 +               return ERR_PTR(-ENOMEM);
15748 +       }
15749 +
15750 +       if (unlikely(req->dst != req->src)) {
15751 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
15752 +                                            req->cryptlen);
15753 +               if (unlikely(src_nents < 0)) {
15754 +                       dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15755 +                               req->assoclen + req->cryptlen);
15756 +                       qi_cache_free(edesc);
15757 +                       return ERR_PTR(src_nents);
15758 +               }
15759 +
15760 +               dst_nents = sg_nents_for_len(req->dst, req->assoclen +
15761 +                                            req->cryptlen +
15762 +                                            (encrypt ? authsize :
15763 +                                                       (-authsize)));
15764 +               if (unlikely(dst_nents < 0)) {
15765 +                       dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
15766 +                               req->assoclen + req->cryptlen +
15767 +                               (encrypt ? authsize : (-authsize)));
15768 +                       qi_cache_free(edesc);
15769 +                       return ERR_PTR(dst_nents);
15770 +               }
15771 +
15772 +               if (src_nents) {
15773 +                       mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
15774 +                                                     DMA_TO_DEVICE);
15775 +                       if (unlikely(!mapped_src_nents)) {
15776 +                               dev_err(dev, "unable to map source\n");
15777 +                               qi_cache_free(edesc);
15778 +                               return ERR_PTR(-ENOMEM);
15779 +                       }
15780 +               } else {
15781 +                       mapped_src_nents = 0;
15782 +               }
15783 +
15784 +               mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
15785 +                                             DMA_FROM_DEVICE);
15786 +               if (unlikely(!mapped_dst_nents)) {
15787 +                       dev_err(dev, "unable to map destination\n");
15788 +                       dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
15789 +                       qi_cache_free(edesc);
15790 +                       return ERR_PTR(-ENOMEM);
15791 +               }
15792 +       } else {
15793 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
15794 +                                            req->cryptlen +
15795 +                                               (encrypt ? authsize : 0));
15796 +               if (unlikely(src_nents < 0)) {
15797 +                       dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15798 +                               req->assoclen + req->cryptlen +
15799 +                               (encrypt ? authsize : 0));
15800 +                       qi_cache_free(edesc);
15801 +                       return ERR_PTR(src_nents);
15802 +               }
15803 +
15804 +               mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
15805 +                                             DMA_BIDIRECTIONAL);
15806 +               if (unlikely(!mapped_src_nents)) {
15807 +                       dev_err(dev, "unable to map source\n");
15808 +                       qi_cache_free(edesc);
15809 +                       return ERR_PTR(-ENOMEM);
15810 +               }
15811 +       }
15812 +
15813 +       if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
15814 +               ivsize = crypto_aead_ivsize(aead);
15815 +               iv_dma = dma_map_single(dev, req->iv, ivsize, DMA_TO_DEVICE);
15816 +               if (dma_mapping_error(dev, iv_dma)) {
15817 +                       dev_err(dev, "unable to map IV\n");
15818 +                       caam_unmap(dev, req->src, req->dst, src_nents,
15819 +                                  dst_nents, 0, 0, op_type, 0, 0);
15820 +                       qi_cache_free(edesc);
15821 +                       return ERR_PTR(-ENOMEM);
15822 +               }
15823 +       }
15824 +
15825 +       /*
15826 +        * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
15827 +        * Input is not contiguous.
15828 +        */
15829 +       qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
15830 +                     (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
15831 +       if (unlikely(qm_sg_nents > CAAM_QI_MAX_AEAD_SG)) {
15832 +               dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
15833 +                       qm_sg_nents, CAAM_QI_MAX_AEAD_SG);
15834 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
15835 +                          iv_dma, ivsize, op_type, 0, 0);
15836 +               qi_cache_free(edesc);
15837 +               return ERR_PTR(-ENOMEM);
15838 +       }
15839 +       sg_table = &edesc->sgt[0];
15840 +       qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
15841 +
15842 +       edesc->src_nents = src_nents;
15843 +       edesc->dst_nents = dst_nents;
15844 +       edesc->iv_dma = iv_dma;
15845 +
15846 +       edesc->assoclen_dma = dma_map_single(dev, &req->assoclen, 4,
15847 +                                            DMA_TO_DEVICE);
15848 +       if (dma_mapping_error(dev, edesc->assoclen_dma)) {
15849 +               dev_err(dev, "unable to map assoclen\n");
15850 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
15851 +                          iv_dma, ivsize, op_type, 0, 0);
15852 +               qi_cache_free(edesc);
15853 +               return ERR_PTR(-ENOMEM);
15854 +       }
15855 +
15856 +       dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
15857 +       qm_sg_index++;
15858 +       if (ivsize) {
15859 +               dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
15860 +               qm_sg_index++;
15861 +       }
15862 +       sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
15863 +       qm_sg_index += mapped_src_nents;
15864 +
15865 +       if (mapped_dst_nents > 1)
15866 +               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
15867 +                                qm_sg_index, 0);
15868 +
15869 +       qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
15870 +       if (dma_mapping_error(dev, qm_sg_dma)) {
15871 +               dev_err(dev, "unable to map S/G table\n");
15872 +               dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
15873 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
15874 +                          iv_dma, ivsize, op_type, 0, 0);
15875 +               qi_cache_free(edesc);
15876 +               return ERR_PTR(-ENOMEM);
15877 +       }
15878 +
15879 +       edesc->qm_sg_dma = qm_sg_dma;
15880 +       edesc->qm_sg_bytes = qm_sg_bytes;
15881 +
15882 +       out_len = req->assoclen + req->cryptlen +
15883 +                 (encrypt ? ctx->authsize : (-ctx->authsize));
15884 +       in_len = 4 + ivsize + req->assoclen + req->cryptlen;
15885 +
15886 +       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
15887 +       dpaa2_fl_set_final(in_fle, true);
15888 +       dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
15889 +       dpaa2_fl_set_addr(in_fle, qm_sg_dma);
15890 +       dpaa2_fl_set_len(in_fle, in_len);
15891 +
15892 +       if (req->dst == req->src) {
15893 +               if (mapped_src_nents == 1) {
15894 +                       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
15895 +                       dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
15896 +               } else {
15897 +                       dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
15898 +                       dpaa2_fl_set_addr(out_fle, qm_sg_dma +
15899 +                                         (1 + !!ivsize) * sizeof(*sg_table));
15900 +               }
15901 +       } else if (mapped_dst_nents == 1) {
15902 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
15903 +               dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
15904 +       } else {
15905 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
15906 +               dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
15907 +                                 sizeof(*sg_table));
15908 +       }
15909 +
15910 +       dpaa2_fl_set_len(out_fle, out_len);
15911 +
15912 +       return edesc;
15913 +}
15914 +
15915 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req,
15916 +                                        bool encrypt)
15917 +{
15918 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
15919 +       unsigned int blocksize = crypto_aead_blocksize(tls);
15920 +       unsigned int padsize, authsize;
15921 +       struct caam_request *req_ctx = aead_request_ctx(req);
15922 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
15923 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
15924 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
15925 +       struct caam_aead_alg *alg = container_of(crypto_aead_alg(tls),
15926 +                                                typeof(*alg), aead);
15927 +       struct device *dev = ctx->dev;
15928 +       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
15929 +                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
15930 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
15931 +       struct tls_edesc *edesc;
15932 +       dma_addr_t qm_sg_dma, iv_dma = 0;
15933 +       int ivsize = 0;
15934 +       int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
15935 +       int in_len, out_len;
15936 +       struct dpaa2_sg_entry *sg_table;
15937 +       enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
15938 +       struct scatterlist *dst;
15939 +
15940 +       if (encrypt) {
15941 +               padsize = blocksize - ((req->cryptlen + ctx->authsize) %
15942 +                                       blocksize);
15943 +               authsize = ctx->authsize + padsize;
15944 +       } else {
15945 +               authsize = ctx->authsize;
15946 +       }
15947 +
15948 +       /* allocate space for base edesc and link tables */
15949 +       edesc = qi_cache_zalloc(GFP_DMA | flags);
15950 +       if (unlikely(!edesc)) {
15951 +               dev_err(dev, "could not allocate extended descriptor\n");
15952 +               return ERR_PTR(-ENOMEM);
15953 +       }
15954 +
15955 +       if (likely(req->src == req->dst)) {
15956 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
15957 +                                            req->cryptlen +
15958 +                                            (encrypt ? authsize : 0));
15959 +               if (unlikely(src_nents < 0)) {
15960 +                       dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15961 +                               req->assoclen + req->cryptlen +
15962 +                               (encrypt ? authsize : 0));
15963 +                       qi_cache_free(edesc);
15964 +                       return ERR_PTR(src_nents);
15965 +               }
15966 +
15967 +               mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
15968 +                                             DMA_BIDIRECTIONAL);
15969 +               if (unlikely(!mapped_src_nents)) {
15970 +                       dev_err(dev, "unable to map source\n");
15971 +                       qi_cache_free(edesc);
15972 +                       return ERR_PTR(-ENOMEM);
15973 +               }
15974 +               dst = req->dst;
15975 +       } else {
15976 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
15977 +                                            req->cryptlen);
15978 +               if (unlikely(src_nents < 0)) {
15979 +                       dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15980 +                               req->assoclen + req->cryptlen);
15981 +                       qi_cache_free(edesc);
15982 +                       return ERR_PTR(src_nents);
15983 +               }
15984 +
15985 +               dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
15986 +               dst_nents = sg_nents_for_len(dst, req->cryptlen +
15987 +                                            (encrypt ? authsize : 0));
15988 +               if (unlikely(dst_nents < 0)) {
15989 +                       dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
15990 +                               req->cryptlen +
15991 +                               (encrypt ? authsize : 0));
15992 +                       qi_cache_free(edesc);
15993 +                       return ERR_PTR(dst_nents);
15994 +               }
15995 +
15996 +               if (src_nents) {
15997 +                       mapped_src_nents = dma_map_sg(dev, req->src,
15998 +                                                     src_nents, DMA_TO_DEVICE);
15999 +                       if (unlikely(!mapped_src_nents)) {
16000 +                               dev_err(dev, "unable to map source\n");
16001 +                               qi_cache_free(edesc);
16002 +                               return ERR_PTR(-ENOMEM);
16003 +                       }
16004 +               } else {
16005 +                       mapped_src_nents = 0;
16006 +               }
16007 +
16008 +               mapped_dst_nents = dma_map_sg(dev, dst, dst_nents,
16009 +                                             DMA_FROM_DEVICE);
16010 +               if (unlikely(!mapped_dst_nents)) {
16011 +                       dev_err(dev, "unable to map destination\n");
16012 +                       dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
16013 +                       qi_cache_free(edesc);
16014 +                       return ERR_PTR(-ENOMEM);
16015 +               }
16016 +       }
16017 +
16018 +       ivsize = crypto_aead_ivsize(tls);
16019 +       iv_dma = dma_map_single(dev, req->iv, ivsize, DMA_TO_DEVICE);
16020 +       if (dma_mapping_error(dev, iv_dma)) {
16021 +               dev_err(dev, "unable to map IV\n");
16022 +               caam_unmap(dev, req->src, dst, src_nents, dst_nents, 0, 0,
16023 +                          op_type, 0, 0);
16024 +               qi_cache_free(edesc);
16025 +               return ERR_PTR(-ENOMEM);
16026 +       }
16027 +
16028 +       /*
16029 +        * Create S/G table: IV, src, dst.
16030 +        * Input is not contiguous.
16031 +        */
16032 +       qm_sg_ents = 1 + mapped_src_nents +
16033 +                    (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
16034 +       sg_table = &edesc->sgt[0];
16035 +       qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
16036 +
16037 +       edesc->src_nents = src_nents;
16038 +       edesc->dst_nents = dst_nents;
16039 +       edesc->dst = dst;
16040 +       edesc->iv_dma = iv_dma;
16041 +
16042 +       dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
16043 +       qm_sg_index = 1;
16044 +
16045 +       sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
16046 +       qm_sg_index += mapped_src_nents;
16047 +
16048 +       if (mapped_dst_nents > 1)
16049 +               sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
16050 +                                qm_sg_index, 0);
16051 +
16052 +       qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
16053 +       if (dma_mapping_error(dev, qm_sg_dma)) {
16054 +               dev_err(dev, "unable to map S/G table\n");
16055 +               caam_unmap(dev, req->src, dst, src_nents, dst_nents, iv_dma,
16056 +                          ivsize, op_type, 0, 0);
16057 +               qi_cache_free(edesc);
16058 +               return ERR_PTR(-ENOMEM);
16059 +       }
16060 +
16061 +       edesc->qm_sg_dma = qm_sg_dma;
16062 +       edesc->qm_sg_bytes = qm_sg_bytes;
16063 +
16064 +       out_len = req->cryptlen + (encrypt ? authsize : 0);
16065 +       in_len = ivsize + req->assoclen + req->cryptlen;
16066 +
16067 +       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
16068 +       dpaa2_fl_set_final(in_fle, true);
16069 +       dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
16070 +       dpaa2_fl_set_addr(in_fle, qm_sg_dma);
16071 +       dpaa2_fl_set_len(in_fle, in_len);
16072 +
16073 +       if (req->dst == req->src) {
16074 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16075 +               dpaa2_fl_set_addr(out_fle, qm_sg_dma +
16076 +                                 (sg_nents_for_len(req->src, req->assoclen) +
16077 +                                  1) * sizeof(*sg_table));
16078 +       } else if (mapped_dst_nents == 1) {
16079 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16080 +               dpaa2_fl_set_addr(out_fle, sg_dma_address(dst));
16081 +       } else {
16082 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16083 +               dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
16084 +                                 sizeof(*sg_table));
16085 +       }
16086 +
16087 +       dpaa2_fl_set_len(out_fle, out_len);
16088 +
16089 +       return edesc;
16090 +}
16091 +
16092 +static int tls_set_sh_desc(struct crypto_aead *tls)
16093 +{
16094 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
16095 +       unsigned int ivsize = crypto_aead_ivsize(tls);
16096 +       unsigned int blocksize = crypto_aead_blocksize(tls);
16097 +       struct device *dev = ctx->dev;
16098 +       struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
16099 +       struct caam_flc *flc;
16100 +       u32 *desc;
16101 +       unsigned int assoclen = 13; /* always 13 bytes for TLS */
16102 +       unsigned int data_len[2];
16103 +       u32 inl_mask;
16104 +
16105 +       if (!ctx->cdata.keylen || !ctx->authsize)
16106 +               return 0;
16107 +
16108 +       /*
16109 +        * TLS 1.0 encrypt shared descriptor
16110 +        * Job Descriptor and Shared Descriptor
16111 +        * must fit into the 64-word Descriptor h/w Buffer
16112 +        */
16113 +       data_len[0] = ctx->adata.keylen_pad;
16114 +       data_len[1] = ctx->cdata.keylen;
16115 +
16116 +       if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
16117 +                             &inl_mask, ARRAY_SIZE(data_len)) < 0)
16118 +               return -EINVAL;
16119 +
16120 +       if (inl_mask & 1)
16121 +               ctx->adata.key_virt = ctx->key;
16122 +       else
16123 +               ctx->adata.key_dma = ctx->key_dma;
16124 +
16125 +       if (inl_mask & 2)
16126 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
16127 +       else
16128 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
16129 +
16130 +       ctx->adata.key_inline = !!(inl_mask & 1);
16131 +       ctx->cdata.key_inline = !!(inl_mask & 2);
16132 +
16133 +       flc = &ctx->flc[ENCRYPT];
16134 +       desc = flc->sh_desc;
16135 +       cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata,
16136 +                             assoclen, ivsize, ctx->authsize, blocksize,
16137 +                             priv->sec_attr.era);
16138 +       flc->flc[1] = desc_len(desc);
16139 +       dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
16140 +                                  sizeof(flc->flc) + desc_bytes(desc),
16141 +                                  DMA_BIDIRECTIONAL);
16142 +
16143 +       /*
16144 +        * TLS 1.0 decrypt shared descriptor
16145 +        * Keys do not fit inline, regardless of algorithms used
16146 +        */
16147 +       ctx->adata.key_inline = false;
16148 +       ctx->adata.key_dma = ctx->key_dma;
16149 +       ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
16150 +
16151 +       flc = &ctx->flc[DECRYPT];
16152 +       desc = flc->sh_desc;
16153 +       cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize,
16154 +                             ctx->authsize, blocksize, priv->sec_attr.era);
16155 +       flc->flc[1] = desc_len(desc); /* SDL */
16156 +       dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
16157 +                                  sizeof(flc->flc) + desc_bytes(desc),
16158 +                                  DMA_BIDIRECTIONAL);
16159 +
16160 +       return 0;
16161 +}
16162 +
16163 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
16164 +                     unsigned int keylen)
16165 +{
16166 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
16167 +       struct device *dev = ctx->dev;
16168 +       struct crypto_authenc_keys keys;
16169 +
16170 +       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
16171 +               goto badkey;
16172 +
16173 +#ifdef DEBUG
16174 +       dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
16175 +               keys.authkeylen + keys.enckeylen, keys.enckeylen,
16176 +               keys.authkeylen);
16177 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16178 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16179 +#endif
16180 +
16181 +       ctx->adata.keylen = keys.authkeylen;
16182 +       ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
16183 +                                             OP_ALG_ALGSEL_MASK);
16184 +
16185 +       if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
16186 +               goto badkey;
16187 +
16188 +       memcpy(ctx->key, keys.authkey, keys.authkeylen);
16189 +       memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
16190 +       dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
16191 +                                  keys.enckeylen, DMA_BIDIRECTIONAL);
16192 +#ifdef DEBUG
16193 +       print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
16194 +                      DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
16195 +                      ctx->adata.keylen_pad + keys.enckeylen, 1);
16196 +#endif
16197 +
16198 +       ctx->cdata.keylen = keys.enckeylen;
16199 +
16200 +       return tls_set_sh_desc(tls);
16201 +badkey:
16202 +       crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
16203 +       return -EINVAL;
16204 +}
16205 +
16206 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
16207 +{
16208 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
16209 +
16210 +       ctx->authsize = authsize;
16211 +       tls_set_sh_desc(tls);
16212 +
16213 +       return 0;
16214 +}
16215 +
16216 +static int gcm_set_sh_desc(struct crypto_aead *aead)
16217 +{
16218 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
16219 +       struct device *dev = ctx->dev;
16220 +       unsigned int ivsize = crypto_aead_ivsize(aead);
16221 +       struct caam_flc *flc;
16222 +       u32 *desc;
16223 +       int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16224 +                       ctx->cdata.keylen;
16225 +
16226 +       if (!ctx->cdata.keylen || !ctx->authsize)
16227 +               return 0;
16228 +
16229 +       /*
16230 +        * AES GCM encrypt shared descriptor
16231 +        * Job Descriptor and Shared Descriptor
16232 +        * must fit into the 64-word Descriptor h/w Buffer
16233 +        */
16234 +       if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
16235 +               ctx->cdata.key_inline = true;
16236 +               ctx->cdata.key_virt = ctx->key;
16237 +       } else {
16238 +               ctx->cdata.key_inline = false;
16239 +               ctx->cdata.key_dma = ctx->key_dma;
16240 +       }
16241 +
16242 +       flc = &ctx->flc[ENCRYPT];
16243 +       desc = flc->sh_desc;
16244 +       cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
16245 +       flc->flc[1] = desc_len(desc); /* SDL */
16246 +       dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
16247 +                                  sizeof(flc->flc) + desc_bytes(desc),
16248 +                                  DMA_BIDIRECTIONAL);
16249 +
16250 +       /*
16251 +        * Job Descriptor and Shared Descriptors
16252 +        * must all fit into the 64-word Descriptor h/w Buffer
16253 +        */
16254 +       if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
16255 +               ctx->cdata.key_inline = true;
16256 +               ctx->cdata.key_virt = ctx->key;
16257 +       } else {
16258 +               ctx->cdata.key_inline = false;
16259 +               ctx->cdata.key_dma = ctx->key_dma;
16260 +       }
16261 +
16262 +       flc = &ctx->flc[DECRYPT];
16263 +       desc = flc->sh_desc;
16264 +       cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
16265 +       flc->flc[1] = desc_len(desc); /* SDL */
16266 +       dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
16267 +                                  sizeof(flc->flc) + desc_bytes(desc),
16268 +                                  DMA_BIDIRECTIONAL);
16269 +
16270 +       return 0;
16271 +}
16272 +
16273 +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
16274 +{
16275 +       struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16276 +
16277 +       ctx->authsize = authsize;
16278 +       gcm_set_sh_desc(authenc);
16279 +
16280 +       return 0;
16281 +}
16282 +
16283 +static int gcm_setkey(struct crypto_aead *aead,
16284 +                     const u8 *key, unsigned int keylen)
16285 +{
16286 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
16287 +       struct device *dev = ctx->dev;
16288 +
16289 +#ifdef DEBUG
16290 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16291 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16292 +#endif
16293 +
16294 +       memcpy(ctx->key, key, keylen);
16295 +       dma_sync_single_for_device(dev, ctx->key_dma, keylen,
16296 +                                  DMA_BIDIRECTIONAL);
16297 +       ctx->cdata.keylen = keylen;
16298 +
16299 +       return gcm_set_sh_desc(aead);
16300 +}
16301 +
16302 +static int rfc4106_set_sh_desc(struct crypto_aead *aead)
16303 +{
16304 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
16305 +       struct device *dev = ctx->dev;
16306 +       unsigned int ivsize = crypto_aead_ivsize(aead);
16307 +       struct caam_flc *flc;
16308 +       u32 *desc;
16309 +       int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16310 +                       ctx->cdata.keylen;
16311 +
16312 +       if (!ctx->cdata.keylen || !ctx->authsize)
16313 +               return 0;
16314 +
16315 +       ctx->cdata.key_virt = ctx->key;
16316 +
16317 +       /*
16318 +        * RFC4106 encrypt shared descriptor
16319 +        * Job Descriptor and Shared Descriptor
16320 +        * must fit into the 64-word Descriptor h/w Buffer
16321 +        */
16322 +       if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
16323 +               ctx->cdata.key_inline = true;
16324 +       } else {
16325 +               ctx->cdata.key_inline = false;
16326 +               ctx->cdata.key_dma = ctx->key_dma;
16327 +       }
16328 +
16329 +       flc = &ctx->flc[ENCRYPT];
16330 +       desc = flc->sh_desc;
16331 +       cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
16332 +                                 true);
16333 +       flc->flc[1] = desc_len(desc); /* SDL */
16334 +       dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
16335 +                                  sizeof(flc->flc) + desc_bytes(desc),
16336 +                                  DMA_BIDIRECTIONAL);
16337 +
16338 +       /*
16339 +        * Job Descriptor and Shared Descriptors
16340 +        * must all fit into the 64-word Descriptor h/w Buffer
16341 +        */
16342 +       if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
16343 +               ctx->cdata.key_inline = true;
16344 +       } else {
16345 +               ctx->cdata.key_inline = false;
16346 +               ctx->cdata.key_dma = ctx->key_dma;
16347 +       }
16348 +
16349 +       flc = &ctx->flc[DECRYPT];
16350 +       desc = flc->sh_desc;
16351 +       cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
16352 +                                 true);
16353 +       flc->flc[1] = desc_len(desc); /* SDL */
16354 +       dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
16355 +                                  sizeof(flc->flc) + desc_bytes(desc),
16356 +                                  DMA_BIDIRECTIONAL);
16357 +
16358 +       return 0;
16359 +}
16360 +
16361 +static int rfc4106_setauthsize(struct crypto_aead *authenc,
16362 +                              unsigned int authsize)
16363 +{
16364 +       struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16365 +
16366 +       ctx->authsize = authsize;
16367 +       rfc4106_set_sh_desc(authenc);
16368 +
16369 +       return 0;
16370 +}
16371 +
16372 +static int rfc4106_setkey(struct crypto_aead *aead,
16373 +                         const u8 *key, unsigned int keylen)
16374 +{
16375 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
16376 +       struct device *dev = ctx->dev;
16377 +
16378 +       if (keylen < 4)
16379 +               return -EINVAL;
16380 +
16381 +#ifdef DEBUG
16382 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16383 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16384 +#endif
16385 +
16386 +       memcpy(ctx->key, key, keylen);
16387 +       /*
16388 +        * The last four bytes of the key material are used as the salt value
16389 +        * in the nonce. Update the AES key length.
16390 +        */
16391 +       ctx->cdata.keylen = keylen - 4;
16392 +       dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
16393 +                                  DMA_BIDIRECTIONAL);
16394 +
16395 +       return rfc4106_set_sh_desc(aead);
16396 +}
16397 +
16398 +static int rfc4543_set_sh_desc(struct crypto_aead *aead)
16399 +{
16400 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
16401 +       struct device *dev = ctx->dev;
16402 +       unsigned int ivsize = crypto_aead_ivsize(aead);
16403 +       struct caam_flc *flc;
16404 +       u32 *desc;
16405 +       int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16406 +                       ctx->cdata.keylen;
16407 +
16408 +       if (!ctx->cdata.keylen || !ctx->authsize)
16409 +               return 0;
16410 +
16411 +       ctx->cdata.key_virt = ctx->key;
16412 +
16413 +       /*
16414 +        * RFC4543 encrypt shared descriptor
16415 +        * Job Descriptor and Shared Descriptor
16416 +        * must fit into the 64-word Descriptor h/w Buffer
16417 +        */
16418 +       if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
16419 +               ctx->cdata.key_inline = true;
16420 +       } else {
16421 +               ctx->cdata.key_inline = false;
16422 +               ctx->cdata.key_dma = ctx->key_dma;
16423 +       }
16424 +
16425 +       flc = &ctx->flc[ENCRYPT];
16426 +       desc = flc->sh_desc;
16427 +       cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
16428 +                                 true);
16429 +       flc->flc[1] = desc_len(desc); /* SDL */
16430 +       dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
16431 +                                  sizeof(flc->flc) + desc_bytes(desc),
16432 +                                  DMA_BIDIRECTIONAL);
16433 +
16434 +       /*
16435 +        * Job Descriptor and Shared Descriptors
16436 +        * must all fit into the 64-word Descriptor h/w Buffer
16437 +        */
16438 +       if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
16439 +               ctx->cdata.key_inline = true;
16440 +       } else {
16441 +               ctx->cdata.key_inline = false;
16442 +               ctx->cdata.key_dma = ctx->key_dma;
16443 +       }
16444 +
16445 +       flc = &ctx->flc[DECRYPT];
16446 +       desc = flc->sh_desc;
16447 +       cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
16448 +                                 true);
16449 +       flc->flc[1] = desc_len(desc); /* SDL */
16450 +       dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
16451 +                                  sizeof(flc->flc) + desc_bytes(desc),
16452 +                                  DMA_BIDIRECTIONAL);
16453 +
16454 +       return 0;
16455 +}
16456 +
16457 +static int rfc4543_setauthsize(struct crypto_aead *authenc,
16458 +                              unsigned int authsize)
16459 +{
16460 +       struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16461 +
16462 +       ctx->authsize = authsize;
16463 +       rfc4543_set_sh_desc(authenc);
16464 +
16465 +       return 0;
16466 +}
16467 +
16468 +static int rfc4543_setkey(struct crypto_aead *aead,
16469 +                         const u8 *key, unsigned int keylen)
16470 +{
16471 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
16472 +       struct device *dev = ctx->dev;
16473 +
16474 +       if (keylen < 4)
16475 +               return -EINVAL;
16476 +
16477 +#ifdef DEBUG
16478 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16479 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16480 +#endif
16481 +
16482 +       memcpy(ctx->key, key, keylen);
16483 +       /*
16484 +        * The last four bytes of the key material are used as the salt value
16485 +        * in the nonce. Update the AES key length.
16486 +        */
16487 +       ctx->cdata.keylen = keylen - 4;
16488 +       dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
16489 +                                  DMA_BIDIRECTIONAL);
16490 +
16491 +       return rfc4543_set_sh_desc(aead);
16492 +}
16493 +
16494 +static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
16495 +                            const u8 *key, unsigned int keylen)
16496 +{
16497 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16498 +       struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
16499 +       const char *alg_name = crypto_tfm_alg_name(tfm);
16500 +       struct device *dev = ctx->dev;
16501 +       struct caam_flc *flc;
16502 +       unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16503 +       u32 *desc;
16504 +       u32 ctx1_iv_off = 0;
16505 +       const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
16506 +                              OP_ALG_AAI_CTR_MOD128);
16507 +       const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
16508 +
16509 +#ifdef DEBUG
16510 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16511 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16512 +#endif
16513 +       /*
16514 +        * AES-CTR needs to load IV in CONTEXT1 reg
16515 +        * at an offset of 128bits (16bytes)
16516 +        * CONTEXT1[255:128] = IV
16517 +        */
16518 +       if (ctr_mode)
16519 +               ctx1_iv_off = 16;
16520 +
16521 +       /*
16522 +        * RFC3686 specific:
16523 +        *      | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
16524 +        *      | *key = {KEY, NONCE}
16525 +        */
16526 +       if (is_rfc3686) {
16527 +               ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
16528 +               keylen -= CTR_RFC3686_NONCE_SIZE;
16529 +       }
16530 +
16531 +       ctx->cdata.keylen = keylen;
16532 +       ctx->cdata.key_virt = key;
16533 +       ctx->cdata.key_inline = true;
16534 +
16535 +       /* ablkcipher_encrypt shared descriptor */
16536 +       flc = &ctx->flc[ENCRYPT];
16537 +       desc = flc->sh_desc;
16538 +       cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize,
16539 +                                    is_rfc3686, ctx1_iv_off);
16540 +       flc->flc[1] = desc_len(desc); /* SDL */
16541 +       dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
16542 +                                  sizeof(flc->flc) + desc_bytes(desc),
16543 +                                  DMA_BIDIRECTIONAL);
16544 +
16545 +       /* ablkcipher_decrypt shared descriptor */
16546 +       flc = &ctx->flc[DECRYPT];
16547 +       desc = flc->sh_desc;
16548 +       cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize,
16549 +                                    is_rfc3686, ctx1_iv_off);
16550 +       flc->flc[1] = desc_len(desc); /* SDL */
16551 +       dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
16552 +                                  sizeof(flc->flc) + desc_bytes(desc),
16553 +                                  DMA_BIDIRECTIONAL);
16554 +
16555 +       /* ablkcipher_givencrypt shared descriptor */
16556 +       flc = &ctx->flc[GIVENCRYPT];
16557 +       desc = flc->sh_desc;
16558 +       cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata,
16559 +                                       ivsize, is_rfc3686, ctx1_iv_off);
16560 +       flc->flc[1] = desc_len(desc); /* SDL */
16561 +       dma_sync_single_for_device(dev, ctx->flc_dma[GIVENCRYPT],
16562 +                                  sizeof(flc->flc) + desc_bytes(desc),
16563 +                                  DMA_BIDIRECTIONAL);
16564 +
16565 +       return 0;
16566 +}
16567 +
16568 +static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
16569 +                                const u8 *key, unsigned int keylen)
16570 +{
16571 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16572 +       struct device *dev = ctx->dev;
16573 +       struct caam_flc *flc;
16574 +       u32 *desc;
16575 +
16576 +       if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
16577 +               dev_err(dev, "key size mismatch\n");
16578 +               crypto_ablkcipher_set_flags(ablkcipher,
16579 +                                           CRYPTO_TFM_RES_BAD_KEY_LEN);
16580 +               return -EINVAL;
16581 +       }
16582 +
16583 +       ctx->cdata.keylen = keylen;
16584 +       ctx->cdata.key_virt = key;
16585 +       ctx->cdata.key_inline = true;
16586 +
16587 +       /* xts_ablkcipher_encrypt shared descriptor */
16588 +       flc = &ctx->flc[ENCRYPT];
16589 +       desc = flc->sh_desc;
16590 +       cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
16591 +       flc->flc[1] = desc_len(desc); /* SDL */
16592 +       dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
16593 +                                  sizeof(flc->flc) + desc_bytes(desc),
16594 +                                  DMA_BIDIRECTIONAL);
16595 +
16596 +       /* xts_ablkcipher_decrypt shared descriptor */
16597 +       flc = &ctx->flc[DECRYPT];
16598 +       desc = flc->sh_desc;
16599 +       cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
16600 +       flc->flc[1] = desc_len(desc); /* SDL */
16601 +       dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
16602 +                                  sizeof(flc->flc) + desc_bytes(desc),
16603 +                                  DMA_BIDIRECTIONAL);
16604 +
16605 +       return 0;
16606 +}
16607 +
16608 +static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
16609 +                                                      *req, bool encrypt)
16610 +{
16611 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
16612 +       struct caam_request *req_ctx = ablkcipher_request_ctx(req);
16613 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
16614 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
16615 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16616 +       struct device *dev = ctx->dev;
16617 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
16618 +                      GFP_KERNEL : GFP_ATOMIC;
16619 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
16620 +       struct ablkcipher_edesc *edesc;
16621 +       dma_addr_t iv_dma;
16622 +       bool in_contig;
16623 +       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16624 +       int dst_sg_idx, qm_sg_ents;
16625 +       struct dpaa2_sg_entry *sg_table;
16626 +       enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
16627 +
16628 +       src_nents = sg_nents_for_len(req->src, req->nbytes);
16629 +       if (unlikely(src_nents < 0)) {
16630 +               dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
16631 +                       req->nbytes);
16632 +               return ERR_PTR(src_nents);
16633 +       }
16634 +
16635 +       if (unlikely(req->dst != req->src)) {
16636 +               dst_nents = sg_nents_for_len(req->dst, req->nbytes);
16637 +               if (unlikely(dst_nents < 0)) {
16638 +                       dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
16639 +                               req->nbytes);
16640 +                       return ERR_PTR(dst_nents);
16641 +               }
16642 +
16643 +               mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16644 +                                             DMA_TO_DEVICE);
16645 +               if (unlikely(!mapped_src_nents)) {
16646 +                       dev_err(dev, "unable to map source\n");
16647 +                       return ERR_PTR(-ENOMEM);
16648 +               }
16649 +
16650 +               mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
16651 +                                             DMA_FROM_DEVICE);
16652 +               if (unlikely(!mapped_dst_nents)) {
16653 +                       dev_err(dev, "unable to map destination\n");
16654 +                       dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
16655 +                       return ERR_PTR(-ENOMEM);
16656 +               }
16657 +       } else {
16658 +               mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16659 +                                             DMA_BIDIRECTIONAL);
16660 +               if (unlikely(!mapped_src_nents)) {
16661 +                       dev_err(dev, "unable to map source\n");
16662 +                       return ERR_PTR(-ENOMEM);
16663 +               }
16664 +       }
16665 +
16666 +       iv_dma = dma_map_single(dev, req->info, ivsize, DMA_TO_DEVICE);
16667 +       if (dma_mapping_error(dev, iv_dma)) {
16668 +               dev_err(dev, "unable to map IV\n");
16669 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
16670 +                          0, 0, 0, 0);
16671 +               return ERR_PTR(-ENOMEM);
16672 +       }
16673 +
16674 +       if (mapped_src_nents == 1 &&
16675 +           iv_dma + ivsize == sg_dma_address(req->src)) {
16676 +               in_contig = true;
16677 +               qm_sg_ents = 0;
16678 +       } else {
16679 +               in_contig = false;
16680 +               qm_sg_ents = 1 + mapped_src_nents;
16681 +       }
16682 +       dst_sg_idx = qm_sg_ents;
16683 +
16684 +       qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
16685 +       if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
16686 +               dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
16687 +                       qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
16688 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16689 +                          iv_dma, ivsize, op_type, 0, 0);
16690 +               return ERR_PTR(-ENOMEM);
16691 +       }
16692 +
16693 +       /* allocate space for base edesc and link tables */
16694 +       edesc = qi_cache_zalloc(GFP_DMA | flags);
16695 +       if (unlikely(!edesc)) {
16696 +               dev_err(dev, "could not allocate extended descriptor\n");
16697 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16698 +                          iv_dma, ivsize, op_type, 0, 0);
16699 +               return ERR_PTR(-ENOMEM);
16700 +       }
16701 +
16702 +       edesc->src_nents = src_nents;
16703 +       edesc->dst_nents = dst_nents;
16704 +       edesc->iv_dma = iv_dma;
16705 +       sg_table = &edesc->sgt[0];
16706 +       edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
16707 +
16708 +       if (!in_contig) {
16709 +               dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
16710 +               sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
16711 +       }
16712 +
16713 +       if (mapped_dst_nents > 1)
16714 +               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
16715 +                                dst_sg_idx, 0);
16716 +
16717 +       edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
16718 +                                         DMA_TO_DEVICE);
16719 +       if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
16720 +               dev_err(dev, "unable to map S/G table\n");
16721 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16722 +                          iv_dma, ivsize, op_type, 0, 0);
16723 +               qi_cache_free(edesc);
16724 +               return ERR_PTR(-ENOMEM);
16725 +       }
16726 +
16727 +       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
16728 +       dpaa2_fl_set_final(in_fle, true);
16729 +       dpaa2_fl_set_len(in_fle, req->nbytes + ivsize);
16730 +       dpaa2_fl_set_len(out_fle, req->nbytes);
16731 +
16732 +       if (!in_contig) {
16733 +               dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
16734 +               dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
16735 +       } else {
16736 +               dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
16737 +               dpaa2_fl_set_addr(in_fle, iv_dma);
16738 +       }
16739 +
16740 +       if (req->src == req->dst) {
16741 +               if (!in_contig) {
16742 +                       dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16743 +                       dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
16744 +                                         sizeof(*sg_table));
16745 +               } else {
16746 +                       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16747 +                       dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
16748 +               }
16749 +       } else if (mapped_dst_nents > 1) {
16750 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16751 +               dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
16752 +                                 sizeof(*sg_table));
16753 +       } else {
16754 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16755 +               dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
16756 +       }
16757 +
16758 +       return edesc;
16759 +}
16760 +
16761 +static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
16762 +       struct skcipher_givcrypt_request *greq)
16763 +{
16764 +       struct ablkcipher_request *req = &greq->creq;
16765 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
16766 +       struct caam_request *req_ctx = ablkcipher_request_ctx(req);
16767 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
16768 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
16769 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16770 +       struct device *dev = ctx->dev;
16771 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
16772 +                      GFP_KERNEL : GFP_ATOMIC;
16773 +       int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
16774 +       struct ablkcipher_edesc *edesc;
16775 +       dma_addr_t iv_dma;
16776 +       bool out_contig;
16777 +       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16778 +       struct dpaa2_sg_entry *sg_table;
16779 +       int dst_sg_idx, qm_sg_ents;
16780 +
16781 +       src_nents = sg_nents_for_len(req->src, req->nbytes);
16782 +       if (unlikely(src_nents < 0)) {
16783 +               dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
16784 +                       req->nbytes);
16785 +               return ERR_PTR(src_nents);
16786 +       }
16787 +
16788 +       if (unlikely(req->dst != req->src)) {
16789 +               dst_nents = sg_nents_for_len(req->dst, req->nbytes);
16790 +               if (unlikely(dst_nents < 0)) {
16791 +                       dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
16792 +                               req->nbytes);
16793 +                       return ERR_PTR(dst_nents);
16794 +               }
16795 +
16796 +               mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16797 +                                             DMA_TO_DEVICE);
16798 +               if (unlikely(!mapped_src_nents)) {
16799 +                       dev_err(dev, "unable to map source\n");
16800 +                       return ERR_PTR(-ENOMEM);
16801 +               }
16802 +
16803 +               mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
16804 +                                             DMA_FROM_DEVICE);
16805 +               if (unlikely(!mapped_dst_nents)) {
16806 +                       dev_err(dev, "unable to map destination\n");
16807 +                       dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
16808 +                       return ERR_PTR(-ENOMEM);
16809 +               }
16810 +       } else {
16811 +               mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16812 +                                             DMA_BIDIRECTIONAL);
16813 +               if (unlikely(!mapped_src_nents)) {
16814 +                       dev_err(dev, "unable to map source\n");
16815 +                       return ERR_PTR(-ENOMEM);
16816 +               }
16817 +
16818 +               dst_nents = src_nents;
16819 +               mapped_dst_nents = src_nents;
16820 +       }
16821 +
16822 +       iv_dma = dma_map_single(dev, greq->giv, ivsize, DMA_FROM_DEVICE);
16823 +       if (dma_mapping_error(dev, iv_dma)) {
16824 +               dev_err(dev, "unable to map IV\n");
16825 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
16826 +                          0, 0, 0, 0);
16827 +               return ERR_PTR(-ENOMEM);
16828 +       }
16829 +
16830 +       qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
16831 +       dst_sg_idx = qm_sg_ents;
16832 +       if (mapped_dst_nents == 1 &&
16833 +           iv_dma + ivsize == sg_dma_address(req->dst)) {
16834 +               out_contig = true;
16835 +       } else {
16836 +               out_contig = false;
16837 +               qm_sg_ents += 1 + mapped_dst_nents;
16838 +       }
16839 +
16840 +       if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
16841 +               dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
16842 +                       qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
16843 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16844 +                          iv_dma, ivsize, GIVENCRYPT, 0, 0);
16845 +               return ERR_PTR(-ENOMEM);
16846 +       }
16847 +
16848 +       /* allocate space for base edesc and link tables */
16849 +       edesc = qi_cache_zalloc(GFP_DMA | flags);
16850 +       if (!edesc) {
16851 +               dev_err(dev, "could not allocate extended descriptor\n");
16852 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16853 +                          iv_dma, ivsize, GIVENCRYPT, 0, 0);
16854 +               return ERR_PTR(-ENOMEM);
16855 +       }
16856 +
16857 +       edesc->src_nents = src_nents;
16858 +       edesc->dst_nents = dst_nents;
16859 +       edesc->iv_dma = iv_dma;
16860 +       sg_table = &edesc->sgt[0];
16861 +       edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
16862 +
16863 +       if (mapped_src_nents > 1)
16864 +               sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
16865 +
16866 +       if (!out_contig) {
16867 +               dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
16868 +               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
16869 +                                dst_sg_idx + 1, 0);
16870 +       }
16871 +
16872 +       edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
16873 +                                         DMA_TO_DEVICE);
16874 +       if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
16875 +               dev_err(dev, "unable to map S/G table\n");
16876 +               caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16877 +                          iv_dma, ivsize, GIVENCRYPT, 0, 0);
16878 +               qi_cache_free(edesc);
16879 +               return ERR_PTR(-ENOMEM);
16880 +       }
16881 +
16882 +       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
16883 +       dpaa2_fl_set_final(in_fle, true);
16884 +       dpaa2_fl_set_len(in_fle, req->nbytes);
16885 +       dpaa2_fl_set_len(out_fle, ivsize + req->nbytes);
16886 +
16887 +       if (mapped_src_nents > 1) {
16888 +               dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
16889 +               dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
16890 +       } else {
16891 +               dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
16892 +               dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
16893 +       }
16894 +
16895 +       if (!out_contig) {
16896 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16897 +               dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
16898 +                                 sizeof(*sg_table));
16899 +       } else {
16900 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16901 +               dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
16902 +       }
16903 +
16904 +       return edesc;
16905 +}
16906 +
16907 +static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
16908 +                      struct aead_request *req)
16909 +{
16910 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
16911 +       int ivsize = crypto_aead_ivsize(aead);
16912 +       struct caam_request *caam_req = aead_request_ctx(req);
16913 +
16914 +       caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
16915 +                  edesc->iv_dma, ivsize, caam_req->op_type,
16916 +                  edesc->qm_sg_dma, edesc->qm_sg_bytes);
16917 +       dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
16918 +}
16919 +
16920 +static void tls_unmap(struct device *dev, struct tls_edesc *edesc,
16921 +                     struct aead_request *req)
16922 +{
16923 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
16924 +       int ivsize = crypto_aead_ivsize(tls);
16925 +       struct caam_request *caam_req = aead_request_ctx(req);
16926 +
16927 +       caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
16928 +                  edesc->dst_nents, edesc->iv_dma, ivsize, caam_req->op_type,
16929 +                  edesc->qm_sg_dma, edesc->qm_sg_bytes);
16930 +}
16931 +
16932 +static void ablkcipher_unmap(struct device *dev,
16933 +                            struct ablkcipher_edesc *edesc,
16934 +                            struct ablkcipher_request *req)
16935 +{
16936 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
16937 +       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16938 +       struct caam_request *caam_req = ablkcipher_request_ctx(req);
16939 +
16940 +       caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
16941 +                  edesc->iv_dma, ivsize, caam_req->op_type,
16942 +                  edesc->qm_sg_dma, edesc->qm_sg_bytes);
16943 +}
16944 +
16945 +static void aead_encrypt_done(void *cbk_ctx, u32 status)
16946 +{
16947 +       struct crypto_async_request *areq = cbk_ctx;
16948 +       struct aead_request *req = container_of(areq, struct aead_request,
16949 +                                               base);
16950 +       struct caam_request *req_ctx = to_caam_req(areq);
16951 +       struct aead_edesc *edesc = req_ctx->edesc;
16952 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
16953 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
16954 +       int ecode = 0;
16955 +
16956 +#ifdef DEBUG
16957 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
16958 +#endif
16959 +
16960 +       if (unlikely(status)) {
16961 +               caam_qi2_strstatus(ctx->dev, status);
16962 +               ecode = -EIO;
16963 +       }
16964 +
16965 +       aead_unmap(ctx->dev, edesc, req);
16966 +       qi_cache_free(edesc);
16967 +       aead_request_complete(req, ecode);
16968 +}
16969 +
16970 +static void aead_decrypt_done(void *cbk_ctx, u32 status)
16971 +{
16972 +       struct crypto_async_request *areq = cbk_ctx;
16973 +       struct aead_request *req = container_of(areq, struct aead_request,
16974 +                                               base);
16975 +       struct caam_request *req_ctx = to_caam_req(areq);
16976 +       struct aead_edesc *edesc = req_ctx->edesc;
16977 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
16978 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
16979 +       int ecode = 0;
16980 +
16981 +#ifdef DEBUG
16982 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
16983 +#endif
16984 +
16985 +       if (unlikely(status)) {
16986 +               caam_qi2_strstatus(ctx->dev, status);
16987 +               /*
16988 +                * verify hw auth check passed else return -EBADMSG
16989 +                */
16990 +               if ((status & JRSTA_CCBERR_ERRID_MASK) ==
16991 +                    JRSTA_CCBERR_ERRID_ICVCHK)
16992 +                       ecode = -EBADMSG;
16993 +               else
16994 +                       ecode = -EIO;
16995 +       }
16996 +
16997 +       aead_unmap(ctx->dev, edesc, req);
16998 +       qi_cache_free(edesc);
16999 +       aead_request_complete(req, ecode);
17000 +}
17001 +
17002 +static int aead_encrypt(struct aead_request *req)
17003 +{
17004 +       struct aead_edesc *edesc;
17005 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
17006 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
17007 +       struct caam_request *caam_req = aead_request_ctx(req);
17008 +       int ret;
17009 +
17010 +       /* allocate extended descriptor */
17011 +       edesc = aead_edesc_alloc(req, true);
17012 +       if (IS_ERR(edesc))
17013 +               return PTR_ERR(edesc);
17014 +
17015 +       caam_req->flc = &ctx->flc[ENCRYPT];
17016 +       caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
17017 +       caam_req->op_type = ENCRYPT;
17018 +       caam_req->cbk = aead_encrypt_done;
17019 +       caam_req->ctx = &req->base;
17020 +       caam_req->edesc = edesc;
17021 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17022 +       if (ret != -EINPROGRESS &&
17023 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17024 +               aead_unmap(ctx->dev, edesc, req);
17025 +               qi_cache_free(edesc);
17026 +       }
17027 +
17028 +       return ret;
17029 +}
17030 +
17031 +static int aead_decrypt(struct aead_request *req)
17032 +{
17033 +       struct aead_edesc *edesc;
17034 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
17035 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
17036 +       struct caam_request *caam_req = aead_request_ctx(req);
17037 +       int ret;
17038 +
17039 +       /* allocate extended descriptor */
17040 +       edesc = aead_edesc_alloc(req, false);
17041 +       if (IS_ERR(edesc))
17042 +               return PTR_ERR(edesc);
17043 +
17044 +       caam_req->flc = &ctx->flc[DECRYPT];
17045 +       caam_req->flc_dma = ctx->flc_dma[DECRYPT];
17046 +       caam_req->op_type = DECRYPT;
17047 +       caam_req->cbk = aead_decrypt_done;
17048 +       caam_req->ctx = &req->base;
17049 +       caam_req->edesc = edesc;
17050 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17051 +       if (ret != -EINPROGRESS &&
17052 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17053 +               aead_unmap(ctx->dev, edesc, req);
17054 +               qi_cache_free(edesc);
17055 +       }
17056 +
17057 +       return ret;
17058 +}
17059 +
17060 +static void tls_encrypt_done(void *cbk_ctx, u32 status)
17061 +{
17062 +       struct crypto_async_request *areq = cbk_ctx;
17063 +       struct aead_request *req = container_of(areq, struct aead_request,
17064 +                                               base);
17065 +       struct caam_request *req_ctx = to_caam_req(areq);
17066 +       struct tls_edesc *edesc = req_ctx->edesc;
17067 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
17068 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
17069 +       int ecode = 0;
17070 +
17071 +#ifdef DEBUG
17072 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17073 +#endif
17074 +
17075 +       if (unlikely(status)) {
17076 +               caam_qi2_strstatus(ctx->dev, status);
17077 +               ecode = -EIO;
17078 +       }
17079 +
17080 +       tls_unmap(ctx->dev, edesc, req);
17081 +       qi_cache_free(edesc);
17082 +       aead_request_complete(req, ecode);
17083 +}
17084 +
17085 +static void tls_decrypt_done(void *cbk_ctx, u32 status)
17086 +{
17087 +       struct crypto_async_request *areq = cbk_ctx;
17088 +       struct aead_request *req = container_of(areq, struct aead_request,
17089 +                                               base);
17090 +       struct caam_request *req_ctx = to_caam_req(areq);
17091 +       struct tls_edesc *edesc = req_ctx->edesc;
17092 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
17093 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
17094 +       int ecode = 0;
17095 +
17096 +#ifdef DEBUG
17097 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17098 +#endif
17099 +
17100 +       if (unlikely(status)) {
17101 +               caam_qi2_strstatus(ctx->dev, status);
17102 +               /*
17103 +                * verify hw auth check passed else return -EBADMSG
17104 +                */
17105 +               if ((status & JRSTA_CCBERR_ERRID_MASK) ==
17106 +                    JRSTA_CCBERR_ERRID_ICVCHK)
17107 +                       ecode = -EBADMSG;
17108 +               else
17109 +                       ecode = -EIO;
17110 +       }
17111 +
17112 +       tls_unmap(ctx->dev, edesc, req);
17113 +       qi_cache_free(edesc);
17114 +       aead_request_complete(req, ecode);
17115 +}
17116 +
17117 +static int tls_encrypt(struct aead_request *req)
17118 +{
17119 +       struct tls_edesc *edesc;
17120 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
17121 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
17122 +       struct caam_request *caam_req = aead_request_ctx(req);
17123 +       int ret;
17124 +
17125 +       /* allocate extended descriptor */
17126 +       edesc = tls_edesc_alloc(req, true);
17127 +       if (IS_ERR(edesc))
17128 +               return PTR_ERR(edesc);
17129 +
17130 +       caam_req->flc = &ctx->flc[ENCRYPT];
17131 +       caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
17132 +       caam_req->op_type = ENCRYPT;
17133 +       caam_req->cbk = tls_encrypt_done;
17134 +       caam_req->ctx = &req->base;
17135 +       caam_req->edesc = edesc;
17136 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17137 +       if (ret != -EINPROGRESS &&
17138 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17139 +               tls_unmap(ctx->dev, edesc, req);
17140 +               qi_cache_free(edesc);
17141 +       }
17142 +
17143 +       return ret;
17144 +}
17145 +
17146 +static int tls_decrypt(struct aead_request *req)
17147 +{
17148 +       struct tls_edesc *edesc;
17149 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
17150 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
17151 +       struct caam_request *caam_req = aead_request_ctx(req);
17152 +       int ret;
17153 +
17154 +       /* allocate extended descriptor */
17155 +       edesc = tls_edesc_alloc(req, false);
17156 +       if (IS_ERR(edesc))
17157 +               return PTR_ERR(edesc);
17158 +
17159 +       caam_req->flc = &ctx->flc[DECRYPT];
17160 +       caam_req->flc_dma = ctx->flc_dma[DECRYPT];
17161 +       caam_req->op_type = DECRYPT;
17162 +       caam_req->cbk = tls_decrypt_done;
17163 +       caam_req->ctx = &req->base;
17164 +       caam_req->edesc = edesc;
17165 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17166 +       if (ret != -EINPROGRESS &&
17167 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17168 +               tls_unmap(ctx->dev, edesc, req);
17169 +               qi_cache_free(edesc);
17170 +       }
17171 +
17172 +       return ret;
17173 +}
17174 +
17175 +static int ipsec_gcm_encrypt(struct aead_request *req)
17176 +{
17177 +       if (req->assoclen < 8)
17178 +               return -EINVAL;
17179 +
17180 +       return aead_encrypt(req);
17181 +}
17182 +
17183 +static int ipsec_gcm_decrypt(struct aead_request *req)
17184 +{
17185 +       if (req->assoclen < 8)
17186 +               return -EINVAL;
17187 +
17188 +       return aead_decrypt(req);
17189 +}
17190 +
17191 +static void ablkcipher_done(void *cbk_ctx, u32 status)
17192 +{
17193 +       struct crypto_async_request *areq = cbk_ctx;
17194 +       struct ablkcipher_request *req = ablkcipher_request_cast(areq);
17195 +       struct caam_request *req_ctx = to_caam_req(areq);
17196 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17197 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17198 +       struct ablkcipher_edesc *edesc = req_ctx->edesc;
17199 +       int ecode = 0;
17200 +       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
17201 +
17202 +#ifdef DEBUG
17203 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17204 +#endif
17205 +
17206 +       if (unlikely(status)) {
17207 +               caam_qi2_strstatus(ctx->dev, status);
17208 +               ecode = -EIO;
17209 +       }
17210 +
17211 +#ifdef DEBUG
17212 +       print_hex_dump(KERN_ERR, "dstiv  @" __stringify(__LINE__)": ",
17213 +                      DUMP_PREFIX_ADDRESS, 16, 4, req->info,
17214 +                      edesc->src_nents > 1 ? 100 : ivsize, 1);
17215 +       caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
17216 +                    DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
17217 +                    edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
17218 +#endif
17219 +
17220 +       ablkcipher_unmap(ctx->dev, edesc, req);
17221 +       qi_cache_free(edesc);
17222 +
17223 +       /*
17224 +        * The crypto API expects us to set the IV (req->info) to the last
17225 +        * ciphertext block. This is used e.g. by the CTS mode.
17226 +        */
17227 +       scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
17228 +                                ivsize, 0);
17229 +
17230 +       ablkcipher_request_complete(req, ecode);
17231 +}
17232 +
17233 +static int ablkcipher_encrypt(struct ablkcipher_request *req)
17234 +{
17235 +       struct ablkcipher_edesc *edesc;
17236 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17237 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17238 +       struct caam_request *caam_req = ablkcipher_request_ctx(req);
17239 +       int ret;
17240 +
17241 +       /* allocate extended descriptor */
17242 +       edesc = ablkcipher_edesc_alloc(req, true);
17243 +       if (IS_ERR(edesc))
17244 +               return PTR_ERR(edesc);
17245 +
17246 +       caam_req->flc = &ctx->flc[ENCRYPT];
17247 +       caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
17248 +       caam_req->op_type = ENCRYPT;
17249 +       caam_req->cbk = ablkcipher_done;
17250 +       caam_req->ctx = &req->base;
17251 +       caam_req->edesc = edesc;
17252 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17253 +       if (ret != -EINPROGRESS &&
17254 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17255 +               ablkcipher_unmap(ctx->dev, edesc, req);
17256 +               qi_cache_free(edesc);
17257 +       }
17258 +
17259 +       return ret;
17260 +}
17261 +
17262 +static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *greq)
17263 +{
17264 +       struct ablkcipher_request *req = &greq->creq;
17265 +       struct ablkcipher_edesc *edesc;
17266 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17267 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17268 +       struct caam_request *caam_req = ablkcipher_request_ctx(req);
17269 +       int ret;
17270 +
17271 +       /* allocate extended descriptor */
17272 +       edesc = ablkcipher_giv_edesc_alloc(greq);
17273 +       if (IS_ERR(edesc))
17274 +               return PTR_ERR(edesc);
17275 +
17276 +       caam_req->flc = &ctx->flc[GIVENCRYPT];
17277 +       caam_req->flc_dma = ctx->flc_dma[GIVENCRYPT];
17278 +       caam_req->op_type = GIVENCRYPT;
17279 +       caam_req->cbk = ablkcipher_done;
17280 +       caam_req->ctx = &req->base;
17281 +       caam_req->edesc = edesc;
17282 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17283 +       if (ret != -EINPROGRESS &&
17284 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17285 +               ablkcipher_unmap(ctx->dev, edesc, req);
17286 +               qi_cache_free(edesc);
17287 +       }
17288 +
17289 +       return ret;
17290 +}
17291 +
17292 +static int ablkcipher_decrypt(struct ablkcipher_request *req)
17293 +{
17294 +       struct ablkcipher_edesc *edesc;
17295 +       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17296 +       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17297 +       struct caam_request *caam_req = ablkcipher_request_ctx(req);
17298 +       int ret;
17299 +
17300 +       /* allocate extended descriptor */
17301 +       edesc = ablkcipher_edesc_alloc(req, false);
17302 +       if (IS_ERR(edesc))
17303 +               return PTR_ERR(edesc);
17304 +
17305 +       caam_req->flc = &ctx->flc[DECRYPT];
17306 +       caam_req->flc_dma = ctx->flc_dma[DECRYPT];
17307 +       caam_req->op_type = DECRYPT;
17308 +       caam_req->cbk = ablkcipher_done;
17309 +       caam_req->ctx = &req->base;
17310 +       caam_req->edesc = edesc;
17311 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17312 +       if (ret != -EINPROGRESS &&
17313 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17314 +               ablkcipher_unmap(ctx->dev, edesc, req);
17315 +               qi_cache_free(edesc);
17316 +       }
17317 +
17318 +       return ret;
17319 +}
17320 +
17321 +struct caam_crypto_alg {
17322 +       struct list_head entry;
17323 +       struct crypto_alg crypto_alg;
17324 +       struct caam_alg_entry caam;
17325 +};
17326 +
17327 +static int caam_cra_init(struct crypto_tfm *tfm)
17328 +{
17329 +       struct crypto_alg *alg = tfm->__crt_alg;
17330 +       struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
17331 +                                                       crypto_alg);
17332 +       struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
17333 +       dma_addr_t dma_addr;
17334 +       int i;
17335 +
17336 +       /* copy descriptor header template value */
17337 +       ctx->cdata.algtype = OP_TYPE_CLASS1_ALG |
17338 +                            caam_alg->caam.class1_alg_type;
17339 +       ctx->adata.algtype = OP_TYPE_CLASS2_ALG |
17340 +                            caam_alg->caam.class2_alg_type;
17341 +
17342 +       ctx->dev = caam_alg->caam.dev;
17343 +
17344 +       dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
17345 +                                       offsetof(struct caam_ctx, flc_dma),
17346 +                                       DMA_BIDIRECTIONAL,
17347 +                                       DMA_ATTR_SKIP_CPU_SYNC);
17348 +       if (dma_mapping_error(ctx->dev, dma_addr)) {
17349 +               dev_err(ctx->dev, "unable to map key, shared descriptors\n");
17350 +               return -ENOMEM;
17351 +       }
17352 +
17353 +       for (i = 0; i < NUM_OP; i++)
17354 +               ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
17355 +       ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
17356 +
17357 +       return 0;
17358 +}
17359 +
17360 +static int caam_cra_init_ablkcipher(struct crypto_tfm *tfm)
17361 +{
17362 +       struct ablkcipher_tfm *ablkcipher_tfm =
17363 +               crypto_ablkcipher_crt(__crypto_ablkcipher_cast(tfm));
17364 +
17365 +       ablkcipher_tfm->reqsize = sizeof(struct caam_request);
17366 +       return caam_cra_init(tfm);
17367 +}
17368 +
17369 +static int caam_cra_init_aead(struct crypto_aead *tfm)
17370 +{
17371 +       crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
17372 +       return caam_cra_init(crypto_aead_tfm(tfm));
17373 +}
17374 +
17375 +static void caam_exit_common(struct caam_ctx *ctx)
17376 +{
17377 +       dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
17378 +                              offsetof(struct caam_ctx, flc_dma),
17379 +                              DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
17380 +}
17381 +
17382 +static void caam_cra_exit(struct crypto_tfm *tfm)
17383 +{
17384 +       caam_exit_common(crypto_tfm_ctx(tfm));
17385 +}
17386 +
17387 +static void caam_cra_exit_aead(struct crypto_aead *tfm)
17388 +{
17389 +       caam_exit_common(crypto_aead_ctx(tfm));
17390 +}
17391 +
17392 +#define template_ablkcipher    template_u.ablkcipher
17393 +struct caam_alg_template {
17394 +       char name[CRYPTO_MAX_ALG_NAME];
17395 +       char driver_name[CRYPTO_MAX_ALG_NAME];
17396 +       unsigned int blocksize;
17397 +       u32 type;
17398 +       union {
17399 +               struct ablkcipher_alg ablkcipher;
17400 +       } template_u;
17401 +       u32 class1_alg_type;
17402 +       u32 class2_alg_type;
17403 +};
17404 +
17405 +static struct caam_alg_template driver_algs[] = {
17406 +       /* ablkcipher descriptor */
17407 +       {
17408 +               .name = "cbc(aes)",
17409 +               .driver_name = "cbc-aes-caam-qi2",
17410 +               .blocksize = AES_BLOCK_SIZE,
17411 +               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17412 +               .template_ablkcipher = {
17413 +                       .setkey = ablkcipher_setkey,
17414 +                       .encrypt = ablkcipher_encrypt,
17415 +                       .decrypt = ablkcipher_decrypt,
17416 +                       .givencrypt = ablkcipher_givencrypt,
17417 +                       .geniv = "<built-in>",
17418 +                       .min_keysize = AES_MIN_KEY_SIZE,
17419 +                       .max_keysize = AES_MAX_KEY_SIZE,
17420 +                       .ivsize = AES_BLOCK_SIZE,
17421 +               },
17422 +               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17423 +       },
17424 +       {
17425 +               .name = "cbc(des3_ede)",
17426 +               .driver_name = "cbc-3des-caam-qi2",
17427 +               .blocksize = DES3_EDE_BLOCK_SIZE,
17428 +               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17429 +               .template_ablkcipher = {
17430 +                       .setkey = ablkcipher_setkey,
17431 +                       .encrypt = ablkcipher_encrypt,
17432 +                       .decrypt = ablkcipher_decrypt,
17433 +                       .givencrypt = ablkcipher_givencrypt,
17434 +                       .geniv = "<built-in>",
17435 +                       .min_keysize = DES3_EDE_KEY_SIZE,
17436 +                       .max_keysize = DES3_EDE_KEY_SIZE,
17437 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
17438 +               },
17439 +               .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17440 +       },
17441 +       {
17442 +               .name = "cbc(des)",
17443 +               .driver_name = "cbc-des-caam-qi2",
17444 +               .blocksize = DES_BLOCK_SIZE,
17445 +               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17446 +               .template_ablkcipher = {
17447 +                       .setkey = ablkcipher_setkey,
17448 +                       .encrypt = ablkcipher_encrypt,
17449 +                       .decrypt = ablkcipher_decrypt,
17450 +                       .givencrypt = ablkcipher_givencrypt,
17451 +                       .geniv = "<built-in>",
17452 +                       .min_keysize = DES_KEY_SIZE,
17453 +                       .max_keysize = DES_KEY_SIZE,
17454 +                       .ivsize = DES_BLOCK_SIZE,
17455 +               },
17456 +               .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
17457 +       },
17458 +       {
17459 +               .name = "ctr(aes)",
17460 +               .driver_name = "ctr-aes-caam-qi2",
17461 +               .blocksize = 1,
17462 +               .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
17463 +               .template_ablkcipher = {
17464 +                       .setkey = ablkcipher_setkey,
17465 +                       .encrypt = ablkcipher_encrypt,
17466 +                       .decrypt = ablkcipher_decrypt,
17467 +                       .geniv = "chainiv",
17468 +                       .min_keysize = AES_MIN_KEY_SIZE,
17469 +                       .max_keysize = AES_MAX_KEY_SIZE,
17470 +                       .ivsize = AES_BLOCK_SIZE,
17471 +               },
17472 +               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
17473 +       },
17474 +       {
17475 +               .name = "rfc3686(ctr(aes))",
17476 +               .driver_name = "rfc3686-ctr-aes-caam-qi2",
17477 +               .blocksize = 1,
17478 +               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17479 +               .template_ablkcipher = {
17480 +                       .setkey = ablkcipher_setkey,
17481 +                       .encrypt = ablkcipher_encrypt,
17482 +                       .decrypt = ablkcipher_decrypt,
17483 +                       .givencrypt = ablkcipher_givencrypt,
17484 +                       .geniv = "<built-in>",
17485 +                       .min_keysize = AES_MIN_KEY_SIZE +
17486 +                                      CTR_RFC3686_NONCE_SIZE,
17487 +                       .max_keysize = AES_MAX_KEY_SIZE +
17488 +                                      CTR_RFC3686_NONCE_SIZE,
17489 +                       .ivsize = CTR_RFC3686_IV_SIZE,
17490 +               },
17491 +               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
17492 +       },
17493 +       {
17494 +               .name = "xts(aes)",
17495 +               .driver_name = "xts-aes-caam-qi2",
17496 +               .blocksize = AES_BLOCK_SIZE,
17497 +               .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
17498 +               .template_ablkcipher = {
17499 +                       .setkey = xts_ablkcipher_setkey,
17500 +                       .encrypt = ablkcipher_encrypt,
17501 +                       .decrypt = ablkcipher_decrypt,
17502 +                       .geniv = "eseqiv",
17503 +                       .min_keysize = 2 * AES_MIN_KEY_SIZE,
17504 +                       .max_keysize = 2 * AES_MAX_KEY_SIZE,
17505 +                       .ivsize = AES_BLOCK_SIZE,
17506 +               },
17507 +               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
17508 +       }
17509 +};
17510 +
17511 +static struct caam_aead_alg driver_aeads[] = {
17512 +       {
17513 +               .aead = {
17514 +                       .base = {
17515 +                               .cra_name = "rfc4106(gcm(aes))",
17516 +                               .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
17517 +                               .cra_blocksize = 1,
17518 +                       },
17519 +                       .setkey = rfc4106_setkey,
17520 +                       .setauthsize = rfc4106_setauthsize,
17521 +                       .encrypt = ipsec_gcm_encrypt,
17522 +                       .decrypt = ipsec_gcm_decrypt,
17523 +                       .ivsize = 8,
17524 +                       .maxauthsize = AES_BLOCK_SIZE,
17525 +               },
17526 +               .caam = {
17527 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17528 +               },
17529 +       },
17530 +       {
17531 +               .aead = {
17532 +                       .base = {
17533 +                               .cra_name = "rfc4543(gcm(aes))",
17534 +                               .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
17535 +                               .cra_blocksize = 1,
17536 +                       },
17537 +                       .setkey = rfc4543_setkey,
17538 +                       .setauthsize = rfc4543_setauthsize,
17539 +                       .encrypt = ipsec_gcm_encrypt,
17540 +                       .decrypt = ipsec_gcm_decrypt,
17541 +                       .ivsize = 8,
17542 +                       .maxauthsize = AES_BLOCK_SIZE,
17543 +               },
17544 +               .caam = {
17545 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17546 +               },
17547 +       },
17548 +       /* Galois Counter Mode */
17549 +       {
17550 +               .aead = {
17551 +                       .base = {
17552 +                               .cra_name = "gcm(aes)",
17553 +                               .cra_driver_name = "gcm-aes-caam-qi2",
17554 +                               .cra_blocksize = 1,
17555 +                       },
17556 +                       .setkey = gcm_setkey,
17557 +                       .setauthsize = gcm_setauthsize,
17558 +                       .encrypt = aead_encrypt,
17559 +                       .decrypt = aead_decrypt,
17560 +                       .ivsize = 12,
17561 +                       .maxauthsize = AES_BLOCK_SIZE,
17562 +               },
17563 +               .caam = {
17564 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17565 +               }
17566 +       },
17567 +       /* single-pass ipsec_esp descriptor */
17568 +       {
17569 +               .aead = {
17570 +                       .base = {
17571 +                               .cra_name = "authenc(hmac(md5),cbc(aes))",
17572 +                               .cra_driver_name = "authenc-hmac-md5-"
17573 +                                                  "cbc-aes-caam-qi2",
17574 +                               .cra_blocksize = AES_BLOCK_SIZE,
17575 +                       },
17576 +                       .setkey = aead_setkey,
17577 +                       .setauthsize = aead_setauthsize,
17578 +                       .encrypt = aead_encrypt,
17579 +                       .decrypt = aead_decrypt,
17580 +                       .ivsize = AES_BLOCK_SIZE,
17581 +                       .maxauthsize = MD5_DIGEST_SIZE,
17582 +               },
17583 +               .caam = {
17584 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17585 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17586 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17587 +               }
17588 +       },
17589 +       {
17590 +               .aead = {
17591 +                       .base = {
17592 +                               .cra_name = "echainiv(authenc(hmac(md5),"
17593 +                                           "cbc(aes)))",
17594 +                               .cra_driver_name = "echainiv-authenc-hmac-md5-"
17595 +                                                  "cbc-aes-caam-qi2",
17596 +                               .cra_blocksize = AES_BLOCK_SIZE,
17597 +                       },
17598 +                       .setkey = aead_setkey,
17599 +                       .setauthsize = aead_setauthsize,
17600 +                       .encrypt = aead_encrypt,
17601 +                       .decrypt = aead_decrypt,
17602 +                       .ivsize = AES_BLOCK_SIZE,
17603 +                       .maxauthsize = MD5_DIGEST_SIZE,
17604 +               },
17605 +               .caam = {
17606 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17607 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17608 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17609 +                       .geniv = true,
17610 +               }
17611 +       },
17612 +       {
17613 +               .aead = {
17614 +                       .base = {
17615 +                               .cra_name = "authenc(hmac(sha1),cbc(aes))",
17616 +                               .cra_driver_name = "authenc-hmac-sha1-"
17617 +                                                  "cbc-aes-caam-qi2",
17618 +                               .cra_blocksize = AES_BLOCK_SIZE,
17619 +                       },
17620 +                       .setkey = aead_setkey,
17621 +                       .setauthsize = aead_setauthsize,
17622 +                       .encrypt = aead_encrypt,
17623 +                       .decrypt = aead_decrypt,
17624 +                       .ivsize = AES_BLOCK_SIZE,
17625 +                       .maxauthsize = SHA1_DIGEST_SIZE,
17626 +               },
17627 +               .caam = {
17628 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17629 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17630 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17631 +               }
17632 +       },
17633 +       {
17634 +               .aead = {
17635 +                       .base = {
17636 +                               .cra_name = "echainiv(authenc(hmac(sha1),"
17637 +                                           "cbc(aes)))",
17638 +                               .cra_driver_name = "echainiv-authenc-"
17639 +                                                  "hmac-sha1-cbc-aes-caam-qi2",
17640 +                               .cra_blocksize = AES_BLOCK_SIZE,
17641 +                       },
17642 +                       .setkey = aead_setkey,
17643 +                       .setauthsize = aead_setauthsize,
17644 +                       .encrypt = aead_encrypt,
17645 +                       .decrypt = aead_decrypt,
17646 +                       .ivsize = AES_BLOCK_SIZE,
17647 +                       .maxauthsize = SHA1_DIGEST_SIZE,
17648 +               },
17649 +               .caam = {
17650 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17651 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17652 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17653 +                       .geniv = true,
17654 +               },
17655 +       },
17656 +       {
17657 +               .aead = {
17658 +                       .base = {
17659 +                               .cra_name = "authenc(hmac(sha224),cbc(aes))",
17660 +                               .cra_driver_name = "authenc-hmac-sha224-"
17661 +                                                  "cbc-aes-caam-qi2",
17662 +                               .cra_blocksize = AES_BLOCK_SIZE,
17663 +                       },
17664 +                       .setkey = aead_setkey,
17665 +                       .setauthsize = aead_setauthsize,
17666 +                       .encrypt = aead_encrypt,
17667 +                       .decrypt = aead_decrypt,
17668 +                       .ivsize = AES_BLOCK_SIZE,
17669 +                       .maxauthsize = SHA224_DIGEST_SIZE,
17670 +               },
17671 +               .caam = {
17672 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17673 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
17674 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17675 +               }
17676 +       },
17677 +       {
17678 +               .aead = {
17679 +                       .base = {
17680 +                               .cra_name = "echainiv(authenc(hmac(sha224),"
17681 +                                           "cbc(aes)))",
17682 +                               .cra_driver_name = "echainiv-authenc-"
17683 +                                                  "hmac-sha224-cbc-aes-caam-qi2",
17684 +                               .cra_blocksize = AES_BLOCK_SIZE,
17685 +                       },
17686 +                       .setkey = aead_setkey,
17687 +                       .setauthsize = aead_setauthsize,
17688 +                       .encrypt = aead_encrypt,
17689 +                       .decrypt = aead_decrypt,
17690 +                       .ivsize = AES_BLOCK_SIZE,
17691 +                       .maxauthsize = SHA224_DIGEST_SIZE,
17692 +               },
17693 +               .caam = {
17694 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17695 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
17696 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17697 +                       .geniv = true,
17698 +               }
17699 +       },
17700 +       {
17701 +               .aead = {
17702 +                       .base = {
17703 +                               .cra_name = "authenc(hmac(sha256),cbc(aes))",
17704 +                               .cra_driver_name = "authenc-hmac-sha256-"
17705 +                                                  "cbc-aes-caam-qi2",
17706 +                               .cra_blocksize = AES_BLOCK_SIZE,
17707 +                       },
17708 +                       .setkey = aead_setkey,
17709 +                       .setauthsize = aead_setauthsize,
17710 +                       .encrypt = aead_encrypt,
17711 +                       .decrypt = aead_decrypt,
17712 +                       .ivsize = AES_BLOCK_SIZE,
17713 +                       .maxauthsize = SHA256_DIGEST_SIZE,
17714 +               },
17715 +               .caam = {
17716 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17717 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
17718 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17719 +               }
17720 +       },
17721 +       {
17722 +               .aead = {
17723 +                       .base = {
17724 +                               .cra_name = "echainiv(authenc(hmac(sha256),"
17725 +                                           "cbc(aes)))",
17726 +                               .cra_driver_name = "echainiv-authenc-"
17727 +                                                  "hmac-sha256-cbc-aes-"
17728 +                                                  "caam-qi2",
17729 +                               .cra_blocksize = AES_BLOCK_SIZE,
17730 +                       },
17731 +                       .setkey = aead_setkey,
17732 +                       .setauthsize = aead_setauthsize,
17733 +                       .encrypt = aead_encrypt,
17734 +                       .decrypt = aead_decrypt,
17735 +                       .ivsize = AES_BLOCK_SIZE,
17736 +                       .maxauthsize = SHA256_DIGEST_SIZE,
17737 +               },
17738 +               .caam = {
17739 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17740 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
17741 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17742 +                       .geniv = true,
17743 +               }
17744 +       },
17745 +       {
17746 +               .aead = {
17747 +                       .base = {
17748 +                               .cra_name = "authenc(hmac(sha384),cbc(aes))",
17749 +                               .cra_driver_name = "authenc-hmac-sha384-"
17750 +                                                  "cbc-aes-caam-qi2",
17751 +                               .cra_blocksize = AES_BLOCK_SIZE,
17752 +                       },
17753 +                       .setkey = aead_setkey,
17754 +                       .setauthsize = aead_setauthsize,
17755 +                       .encrypt = aead_encrypt,
17756 +                       .decrypt = aead_decrypt,
17757 +                       .ivsize = AES_BLOCK_SIZE,
17758 +                       .maxauthsize = SHA384_DIGEST_SIZE,
17759 +               },
17760 +               .caam = {
17761 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17762 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
17763 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17764 +               }
17765 +       },
17766 +       {
17767 +               .aead = {
17768 +                       .base = {
17769 +                               .cra_name = "echainiv(authenc(hmac(sha384),"
17770 +                                           "cbc(aes)))",
17771 +                               .cra_driver_name = "echainiv-authenc-"
17772 +                                                  "hmac-sha384-cbc-aes-"
17773 +                                                  "caam-qi2",
17774 +                               .cra_blocksize = AES_BLOCK_SIZE,
17775 +                       },
17776 +                       .setkey = aead_setkey,
17777 +                       .setauthsize = aead_setauthsize,
17778 +                       .encrypt = aead_encrypt,
17779 +                       .decrypt = aead_decrypt,
17780 +                       .ivsize = AES_BLOCK_SIZE,
17781 +                       .maxauthsize = SHA384_DIGEST_SIZE,
17782 +               },
17783 +               .caam = {
17784 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17785 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
17786 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17787 +                       .geniv = true,
17788 +               }
17789 +       },
17790 +       {
17791 +               .aead = {
17792 +                       .base = {
17793 +                               .cra_name = "authenc(hmac(sha512),cbc(aes))",
17794 +                               .cra_driver_name = "authenc-hmac-sha512-"
17795 +                                                  "cbc-aes-caam-qi2",
17796 +                               .cra_blocksize = AES_BLOCK_SIZE,
17797 +                       },
17798 +                       .setkey = aead_setkey,
17799 +                       .setauthsize = aead_setauthsize,
17800 +                       .encrypt = aead_encrypt,
17801 +                       .decrypt = aead_decrypt,
17802 +                       .ivsize = AES_BLOCK_SIZE,
17803 +                       .maxauthsize = SHA512_DIGEST_SIZE,
17804 +               },
17805 +               .caam = {
17806 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17807 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
17808 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17809 +               }
17810 +       },
17811 +       {
17812 +               .aead = {
17813 +                       .base = {
17814 +                               .cra_name = "echainiv(authenc(hmac(sha512),"
17815 +                                           "cbc(aes)))",
17816 +                               .cra_driver_name = "echainiv-authenc-"
17817 +                                                  "hmac-sha512-cbc-aes-"
17818 +                                                  "caam-qi2",
17819 +                               .cra_blocksize = AES_BLOCK_SIZE,
17820 +                       },
17821 +                       .setkey = aead_setkey,
17822 +                       .setauthsize = aead_setauthsize,
17823 +                       .encrypt = aead_encrypt,
17824 +                       .decrypt = aead_decrypt,
17825 +                       .ivsize = AES_BLOCK_SIZE,
17826 +                       .maxauthsize = SHA512_DIGEST_SIZE,
17827 +               },
17828 +               .caam = {
17829 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17830 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
17831 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17832 +                       .geniv = true,
17833 +               }
17834 +       },
17835 +       {
17836 +               .aead = {
17837 +                       .base = {
17838 +                               .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
17839 +                               .cra_driver_name = "authenc-hmac-md5-"
17840 +                                                  "cbc-des3_ede-caam-qi2",
17841 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17842 +                       },
17843 +                       .setkey = aead_setkey,
17844 +                       .setauthsize = aead_setauthsize,
17845 +                       .encrypt = aead_encrypt,
17846 +                       .decrypt = aead_decrypt,
17847 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
17848 +                       .maxauthsize = MD5_DIGEST_SIZE,
17849 +               },
17850 +               .caam = {
17851 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17852 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17853 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17854 +               }
17855 +       },
17856 +       {
17857 +               .aead = {
17858 +                       .base = {
17859 +                               .cra_name = "echainiv(authenc(hmac(md5),"
17860 +                                           "cbc(des3_ede)))",
17861 +                               .cra_driver_name = "echainiv-authenc-hmac-md5-"
17862 +                                                  "cbc-des3_ede-caam-qi2",
17863 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17864 +                       },
17865 +                       .setkey = aead_setkey,
17866 +                       .setauthsize = aead_setauthsize,
17867 +                       .encrypt = aead_encrypt,
17868 +                       .decrypt = aead_decrypt,
17869 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
17870 +                       .maxauthsize = MD5_DIGEST_SIZE,
17871 +               },
17872 +               .caam = {
17873 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17874 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17875 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17876 +                       .geniv = true,
17877 +               }
17878 +       },
17879 +       {
17880 +               .aead = {
17881 +                       .base = {
17882 +                               .cra_name = "authenc(hmac(sha1),"
17883 +                                           "cbc(des3_ede))",
17884 +                               .cra_driver_name = "authenc-hmac-sha1-"
17885 +                                                  "cbc-des3_ede-caam-qi2",
17886 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17887 +                       },
17888 +                       .setkey = aead_setkey,
17889 +                       .setauthsize = aead_setauthsize,
17890 +                       .encrypt = aead_encrypt,
17891 +                       .decrypt = aead_decrypt,
17892 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
17893 +                       .maxauthsize = SHA1_DIGEST_SIZE,
17894 +               },
17895 +               .caam = {
17896 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17897 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17898 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17899 +               },
17900 +       },
17901 +       {
17902 +               .aead = {
17903 +                       .base = {
17904 +                               .cra_name = "echainiv(authenc(hmac(sha1),"
17905 +                                           "cbc(des3_ede)))",
17906 +                               .cra_driver_name = "echainiv-authenc-"
17907 +                                                  "hmac-sha1-"
17908 +                                                  "cbc-des3_ede-caam-qi2",
17909 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17910 +                       },
17911 +                       .setkey = aead_setkey,
17912 +                       .setauthsize = aead_setauthsize,
17913 +                       .encrypt = aead_encrypt,
17914 +                       .decrypt = aead_decrypt,
17915 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
17916 +                       .maxauthsize = SHA1_DIGEST_SIZE,
17917 +               },
17918 +               .caam = {
17919 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17920 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17921 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17922 +                       .geniv = true,
17923 +               }
17924 +       },
17925 +       {
17926 +               .aead = {
17927 +                       .base = {
17928 +                               .cra_name = "authenc(hmac(sha224),"
17929 +                                           "cbc(des3_ede))",
17930 +                               .cra_driver_name = "authenc-hmac-sha224-"
17931 +                                                  "cbc-des3_ede-caam-qi2",
17932 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17933 +                       },
17934 +                       .setkey = aead_setkey,
17935 +                       .setauthsize = aead_setauthsize,
17936 +                       .encrypt = aead_encrypt,
17937 +                       .decrypt = aead_decrypt,
17938 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
17939 +                       .maxauthsize = SHA224_DIGEST_SIZE,
17940 +               },
17941 +               .caam = {
17942 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17943 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
17944 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17945 +               },
17946 +       },
17947 +       {
17948 +               .aead = {
17949 +                       .base = {
17950 +                               .cra_name = "echainiv(authenc(hmac(sha224),"
17951 +                                           "cbc(des3_ede)))",
17952 +                               .cra_driver_name = "echainiv-authenc-"
17953 +                                                  "hmac-sha224-"
17954 +                                                  "cbc-des3_ede-caam-qi2",
17955 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17956 +                       },
17957 +                       .setkey = aead_setkey,
17958 +                       .setauthsize = aead_setauthsize,
17959 +                       .encrypt = aead_encrypt,
17960 +                       .decrypt = aead_decrypt,
17961 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
17962 +                       .maxauthsize = SHA224_DIGEST_SIZE,
17963 +               },
17964 +               .caam = {
17965 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17966 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
17967 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17968 +                       .geniv = true,
17969 +               }
17970 +       },
17971 +       {
17972 +               .aead = {
17973 +                       .base = {
17974 +                               .cra_name = "authenc(hmac(sha256),"
17975 +                                           "cbc(des3_ede))",
17976 +                               .cra_driver_name = "authenc-hmac-sha256-"
17977 +                                                  "cbc-des3_ede-caam-qi2",
17978 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17979 +                       },
17980 +                       .setkey = aead_setkey,
17981 +                       .setauthsize = aead_setauthsize,
17982 +                       .encrypt = aead_encrypt,
17983 +                       .decrypt = aead_decrypt,
17984 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
17985 +                       .maxauthsize = SHA256_DIGEST_SIZE,
17986 +               },
17987 +               .caam = {
17988 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17989 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
17990 +                                          OP_ALG_AAI_HMAC_PRECOMP,
17991 +               },
17992 +       },
17993 +       {
17994 +               .aead = {
17995 +                       .base = {
17996 +                               .cra_name = "echainiv(authenc(hmac(sha256),"
17997 +                                           "cbc(des3_ede)))",
17998 +                               .cra_driver_name = "echainiv-authenc-"
17999 +                                                  "hmac-sha256-"
18000 +                                                  "cbc-des3_ede-caam-qi2",
18001 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18002 +                       },
18003 +                       .setkey = aead_setkey,
18004 +                       .setauthsize = aead_setauthsize,
18005 +                       .encrypt = aead_encrypt,
18006 +                       .decrypt = aead_decrypt,
18007 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
18008 +                       .maxauthsize = SHA256_DIGEST_SIZE,
18009 +               },
18010 +               .caam = {
18011 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18012 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18013 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18014 +                       .geniv = true,
18015 +               }
18016 +       },
18017 +       {
18018 +               .aead = {
18019 +                       .base = {
18020 +                               .cra_name = "authenc(hmac(sha384),"
18021 +                                           "cbc(des3_ede))",
18022 +                               .cra_driver_name = "authenc-hmac-sha384-"
18023 +                                                  "cbc-des3_ede-caam-qi2",
18024 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18025 +                       },
18026 +                       .setkey = aead_setkey,
18027 +                       .setauthsize = aead_setauthsize,
18028 +                       .encrypt = aead_encrypt,
18029 +                       .decrypt = aead_decrypt,
18030 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
18031 +                       .maxauthsize = SHA384_DIGEST_SIZE,
18032 +               },
18033 +               .caam = {
18034 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18035 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18036 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18037 +               },
18038 +       },
18039 +       {
18040 +               .aead = {
18041 +                       .base = {
18042 +                               .cra_name = "echainiv(authenc(hmac(sha384),"
18043 +                                           "cbc(des3_ede)))",
18044 +                               .cra_driver_name = "echainiv-authenc-"
18045 +                                                  "hmac-sha384-"
18046 +                                                  "cbc-des3_ede-caam-qi2",
18047 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18048 +                       },
18049 +                       .setkey = aead_setkey,
18050 +                       .setauthsize = aead_setauthsize,
18051 +                       .encrypt = aead_encrypt,
18052 +                       .decrypt = aead_decrypt,
18053 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
18054 +                       .maxauthsize = SHA384_DIGEST_SIZE,
18055 +               },
18056 +               .caam = {
18057 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18058 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18059 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18060 +                       .geniv = true,
18061 +               }
18062 +       },
18063 +       {
18064 +               .aead = {
18065 +                       .base = {
18066 +                               .cra_name = "authenc(hmac(sha512),"
18067 +                                           "cbc(des3_ede))",
18068 +                               .cra_driver_name = "authenc-hmac-sha512-"
18069 +                                                  "cbc-des3_ede-caam-qi2",
18070 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18071 +                       },
18072 +                       .setkey = aead_setkey,
18073 +                       .setauthsize = aead_setauthsize,
18074 +                       .encrypt = aead_encrypt,
18075 +                       .decrypt = aead_decrypt,
18076 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
18077 +                       .maxauthsize = SHA512_DIGEST_SIZE,
18078 +               },
18079 +               .caam = {
18080 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18081 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18082 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18083 +               },
18084 +       },
18085 +       {
18086 +               .aead = {
18087 +                       .base = {
18088 +                               .cra_name = "echainiv(authenc(hmac(sha512),"
18089 +                                           "cbc(des3_ede)))",
18090 +                               .cra_driver_name = "echainiv-authenc-"
18091 +                                                  "hmac-sha512-"
18092 +                                                  "cbc-des3_ede-caam-qi2",
18093 +                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18094 +                       },
18095 +                       .setkey = aead_setkey,
18096 +                       .setauthsize = aead_setauthsize,
18097 +                       .encrypt = aead_encrypt,
18098 +                       .decrypt = aead_decrypt,
18099 +                       .ivsize = DES3_EDE_BLOCK_SIZE,
18100 +                       .maxauthsize = SHA512_DIGEST_SIZE,
18101 +               },
18102 +               .caam = {
18103 +                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18104 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18105 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18106 +                       .geniv = true,
18107 +               }
18108 +       },
18109 +       {
18110 +               .aead = {
18111 +                       .base = {
18112 +                               .cra_name = "authenc(hmac(md5),cbc(des))",
18113 +                               .cra_driver_name = "authenc-hmac-md5-"
18114 +                                                  "cbc-des-caam-qi2",
18115 +                               .cra_blocksize = DES_BLOCK_SIZE,
18116 +                       },
18117 +                       .setkey = aead_setkey,
18118 +                       .setauthsize = aead_setauthsize,
18119 +                       .encrypt = aead_encrypt,
18120 +                       .decrypt = aead_decrypt,
18121 +                       .ivsize = DES_BLOCK_SIZE,
18122 +                       .maxauthsize = MD5_DIGEST_SIZE,
18123 +               },
18124 +               .caam = {
18125 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18126 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18127 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18128 +               },
18129 +       },
18130 +       {
18131 +               .aead = {
18132 +                       .base = {
18133 +                               .cra_name = "echainiv(authenc(hmac(md5),"
18134 +                                           "cbc(des)))",
18135 +                               .cra_driver_name = "echainiv-authenc-hmac-md5-"
18136 +                                                  "cbc-des-caam-qi2",
18137 +                               .cra_blocksize = DES_BLOCK_SIZE,
18138 +                       },
18139 +                       .setkey = aead_setkey,
18140 +                       .setauthsize = aead_setauthsize,
18141 +                       .encrypt = aead_encrypt,
18142 +                       .decrypt = aead_decrypt,
18143 +                       .ivsize = DES_BLOCK_SIZE,
18144 +                       .maxauthsize = MD5_DIGEST_SIZE,
18145 +               },
18146 +               .caam = {
18147 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18148 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18149 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18150 +                       .geniv = true,
18151 +               }
18152 +       },
18153 +       {
18154 +               .aead = {
18155 +                       .base = {
18156 +                               .cra_name = "authenc(hmac(sha1),cbc(des))",
18157 +                               .cra_driver_name = "authenc-hmac-sha1-"
18158 +                                                  "cbc-des-caam-qi2",
18159 +                               .cra_blocksize = DES_BLOCK_SIZE,
18160 +                       },
18161 +                       .setkey = aead_setkey,
18162 +                       .setauthsize = aead_setauthsize,
18163 +                       .encrypt = aead_encrypt,
18164 +                       .decrypt = aead_decrypt,
18165 +                       .ivsize = DES_BLOCK_SIZE,
18166 +                       .maxauthsize = SHA1_DIGEST_SIZE,
18167 +               },
18168 +               .caam = {
18169 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18170 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18171 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18172 +               },
18173 +       },
18174 +       {
18175 +               .aead = {
18176 +                       .base = {
18177 +                               .cra_name = "echainiv(authenc(hmac(sha1),"
18178 +                                           "cbc(des)))",
18179 +                               .cra_driver_name = "echainiv-authenc-"
18180 +                                                  "hmac-sha1-cbc-des-caam-qi2",
18181 +                               .cra_blocksize = DES_BLOCK_SIZE,
18182 +                       },
18183 +                       .setkey = aead_setkey,
18184 +                       .setauthsize = aead_setauthsize,
18185 +                       .encrypt = aead_encrypt,
18186 +                       .decrypt = aead_decrypt,
18187 +                       .ivsize = DES_BLOCK_SIZE,
18188 +                       .maxauthsize = SHA1_DIGEST_SIZE,
18189 +               },
18190 +               .caam = {
18191 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18192 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18193 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18194 +                       .geniv = true,
18195 +               }
18196 +       },
18197 +       {
18198 +               .aead = {
18199 +                       .base = {
18200 +                               .cra_name = "authenc(hmac(sha224),cbc(des))",
18201 +                               .cra_driver_name = "authenc-hmac-sha224-"
18202 +                                                  "cbc-des-caam-qi2",
18203 +                               .cra_blocksize = DES_BLOCK_SIZE,
18204 +                       },
18205 +                       .setkey = aead_setkey,
18206 +                       .setauthsize = aead_setauthsize,
18207 +                       .encrypt = aead_encrypt,
18208 +                       .decrypt = aead_decrypt,
18209 +                       .ivsize = DES_BLOCK_SIZE,
18210 +                       .maxauthsize = SHA224_DIGEST_SIZE,
18211 +               },
18212 +               .caam = {
18213 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18214 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18215 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18216 +               },
18217 +       },
18218 +       {
18219 +               .aead = {
18220 +                       .base = {
18221 +                               .cra_name = "echainiv(authenc(hmac(sha224),"
18222 +                                           "cbc(des)))",
18223 +                               .cra_driver_name = "echainiv-authenc-"
18224 +                                                  "hmac-sha224-cbc-des-"
18225 +                                                  "caam-qi2",
18226 +                               .cra_blocksize = DES_BLOCK_SIZE,
18227 +                       },
18228 +                       .setkey = aead_setkey,
18229 +                       .setauthsize = aead_setauthsize,
18230 +                       .encrypt = aead_encrypt,
18231 +                       .decrypt = aead_decrypt,
18232 +                       .ivsize = DES_BLOCK_SIZE,
18233 +                       .maxauthsize = SHA224_DIGEST_SIZE,
18234 +               },
18235 +               .caam = {
18236 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18237 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18238 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18239 +                       .geniv = true,
18240 +               }
18241 +       },
18242 +       {
18243 +               .aead = {
18244 +                       .base = {
18245 +                               .cra_name = "authenc(hmac(sha256),cbc(des))",
18246 +                               .cra_driver_name = "authenc-hmac-sha256-"
18247 +                                                  "cbc-des-caam-qi2",
18248 +                               .cra_blocksize = DES_BLOCK_SIZE,
18249 +                       },
18250 +                       .setkey = aead_setkey,
18251 +                       .setauthsize = aead_setauthsize,
18252 +                       .encrypt = aead_encrypt,
18253 +                       .decrypt = aead_decrypt,
18254 +                       .ivsize = DES_BLOCK_SIZE,
18255 +                       .maxauthsize = SHA256_DIGEST_SIZE,
18256 +               },
18257 +               .caam = {
18258 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18259 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18260 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18261 +               },
18262 +       },
18263 +       {
18264 +               .aead = {
18265 +                       .base = {
18266 +                               .cra_name = "echainiv(authenc(hmac(sha256),"
18267 +                                           "cbc(des)))",
18268 +                               .cra_driver_name = "echainiv-authenc-"
18269 +                                                  "hmac-sha256-cbc-desi-"
18270 +                                                  "caam-qi2",
18271 +                               .cra_blocksize = DES_BLOCK_SIZE,
18272 +                       },
18273 +                       .setkey = aead_setkey,
18274 +                       .setauthsize = aead_setauthsize,
18275 +                       .encrypt = aead_encrypt,
18276 +                       .decrypt = aead_decrypt,
18277 +                       .ivsize = DES_BLOCK_SIZE,
18278 +                       .maxauthsize = SHA256_DIGEST_SIZE,
18279 +               },
18280 +               .caam = {
18281 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18282 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18283 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18284 +                       .geniv = true,
18285 +               },
18286 +       },
18287 +       {
18288 +               .aead = {
18289 +                       .base = {
18290 +                               .cra_name = "authenc(hmac(sha384),cbc(des))",
18291 +                               .cra_driver_name = "authenc-hmac-sha384-"
18292 +                                                  "cbc-des-caam-qi2",
18293 +                               .cra_blocksize = DES_BLOCK_SIZE,
18294 +                       },
18295 +                       .setkey = aead_setkey,
18296 +                       .setauthsize = aead_setauthsize,
18297 +                       .encrypt = aead_encrypt,
18298 +                       .decrypt = aead_decrypt,
18299 +                       .ivsize = DES_BLOCK_SIZE,
18300 +                       .maxauthsize = SHA384_DIGEST_SIZE,
18301 +               },
18302 +               .caam = {
18303 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18304 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18305 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18306 +               },
18307 +       },
18308 +       {
18309 +               .aead = {
18310 +                       .base = {
18311 +                               .cra_name = "echainiv(authenc(hmac(sha384),"
18312 +                                           "cbc(des)))",
18313 +                               .cra_driver_name = "echainiv-authenc-"
18314 +                                                  "hmac-sha384-cbc-des-"
18315 +                                                  "caam-qi2",
18316 +                               .cra_blocksize = DES_BLOCK_SIZE,
18317 +                       },
18318 +                       .setkey = aead_setkey,
18319 +                       .setauthsize = aead_setauthsize,
18320 +                       .encrypt = aead_encrypt,
18321 +                       .decrypt = aead_decrypt,
18322 +                       .ivsize = DES_BLOCK_SIZE,
18323 +                       .maxauthsize = SHA384_DIGEST_SIZE,
18324 +               },
18325 +               .caam = {
18326 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18327 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18328 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18329 +                       .geniv = true,
18330 +               }
18331 +       },
18332 +       {
18333 +               .aead = {
18334 +                       .base = {
18335 +                               .cra_name = "authenc(hmac(sha512),cbc(des))",
18336 +                               .cra_driver_name = "authenc-hmac-sha512-"
18337 +                                                  "cbc-des-caam-qi2",
18338 +                               .cra_blocksize = DES_BLOCK_SIZE,
18339 +                       },
18340 +                       .setkey = aead_setkey,
18341 +                       .setauthsize = aead_setauthsize,
18342 +                       .encrypt = aead_encrypt,
18343 +                       .decrypt = aead_decrypt,
18344 +                       .ivsize = DES_BLOCK_SIZE,
18345 +                       .maxauthsize = SHA512_DIGEST_SIZE,
18346 +               },
18347 +               .caam = {
18348 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18349 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18350 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18351 +               }
18352 +       },
18353 +       {
18354 +               .aead = {
18355 +                       .base = {
18356 +                               .cra_name = "echainiv(authenc(hmac(sha512),"
18357 +                                           "cbc(des)))",
18358 +                               .cra_driver_name = "echainiv-authenc-"
18359 +                                                  "hmac-sha512-cbc-des-"
18360 +                                                  "caam-qi2",
18361 +                               .cra_blocksize = DES_BLOCK_SIZE,
18362 +                       },
18363 +                       .setkey = aead_setkey,
18364 +                       .setauthsize = aead_setauthsize,
18365 +                       .encrypt = aead_encrypt,
18366 +                       .decrypt = aead_decrypt,
18367 +                       .ivsize = DES_BLOCK_SIZE,
18368 +                       .maxauthsize = SHA512_DIGEST_SIZE,
18369 +               },
18370 +               .caam = {
18371 +                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18372 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18373 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18374 +                       .geniv = true,
18375 +               }
18376 +       },
18377 +       {
18378 +               .aead = {
18379 +                       .base = {
18380 +                               .cra_name = "authenc(hmac(md5),"
18381 +                                           "rfc3686(ctr(aes)))",
18382 +                               .cra_driver_name = "authenc-hmac-md5-"
18383 +                                                  "rfc3686-ctr-aes-caam-qi2",
18384 +                               .cra_blocksize = 1,
18385 +                       },
18386 +                       .setkey = aead_setkey,
18387 +                       .setauthsize = aead_setauthsize,
18388 +                       .encrypt = aead_encrypt,
18389 +                       .decrypt = aead_decrypt,
18390 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18391 +                       .maxauthsize = MD5_DIGEST_SIZE,
18392 +               },
18393 +               .caam = {
18394 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18395 +                                          OP_ALG_AAI_CTR_MOD128,
18396 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18397 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18398 +                       .rfc3686 = true,
18399 +               },
18400 +       },
18401 +       {
18402 +               .aead = {
18403 +                       .base = {
18404 +                               .cra_name = "seqiv(authenc("
18405 +                                           "hmac(md5),rfc3686(ctr(aes))))",
18406 +                               .cra_driver_name = "seqiv-authenc-hmac-md5-"
18407 +                                                  "rfc3686-ctr-aes-caam-qi2",
18408 +                               .cra_blocksize = 1,
18409 +                       },
18410 +                       .setkey = aead_setkey,
18411 +                       .setauthsize = aead_setauthsize,
18412 +                       .encrypt = aead_encrypt,
18413 +                       .decrypt = aead_decrypt,
18414 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18415 +                       .maxauthsize = MD5_DIGEST_SIZE,
18416 +               },
18417 +               .caam = {
18418 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18419 +                                          OP_ALG_AAI_CTR_MOD128,
18420 +                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18421 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18422 +                       .rfc3686 = true,
18423 +                       .geniv = true,
18424 +               },
18425 +       },
18426 +       {
18427 +               .aead = {
18428 +                       .base = {
18429 +                               .cra_name = "authenc(hmac(sha1),"
18430 +                                           "rfc3686(ctr(aes)))",
18431 +                               .cra_driver_name = "authenc-hmac-sha1-"
18432 +                                                  "rfc3686-ctr-aes-caam-qi2",
18433 +                               .cra_blocksize = 1,
18434 +                       },
18435 +                       .setkey = aead_setkey,
18436 +                       .setauthsize = aead_setauthsize,
18437 +                       .encrypt = aead_encrypt,
18438 +                       .decrypt = aead_decrypt,
18439 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18440 +                       .maxauthsize = SHA1_DIGEST_SIZE,
18441 +               },
18442 +               .caam = {
18443 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18444 +                                          OP_ALG_AAI_CTR_MOD128,
18445 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18446 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18447 +                       .rfc3686 = true,
18448 +               },
18449 +       },
18450 +       {
18451 +               .aead = {
18452 +                       .base = {
18453 +                               .cra_name = "seqiv(authenc("
18454 +                                           "hmac(sha1),rfc3686(ctr(aes))))",
18455 +                               .cra_driver_name = "seqiv-authenc-hmac-sha1-"
18456 +                                                  "rfc3686-ctr-aes-caam-qi2",
18457 +                               .cra_blocksize = 1,
18458 +                       },
18459 +                       .setkey = aead_setkey,
18460 +                       .setauthsize = aead_setauthsize,
18461 +                       .encrypt = aead_encrypt,
18462 +                       .decrypt = aead_decrypt,
18463 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18464 +                       .maxauthsize = SHA1_DIGEST_SIZE,
18465 +               },
18466 +               .caam = {
18467 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18468 +                                          OP_ALG_AAI_CTR_MOD128,
18469 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18470 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18471 +                       .rfc3686 = true,
18472 +                       .geniv = true,
18473 +               },
18474 +       },
18475 +       {
18476 +               .aead = {
18477 +                       .base = {
18478 +                               .cra_name = "authenc(hmac(sha224),"
18479 +                                           "rfc3686(ctr(aes)))",
18480 +                               .cra_driver_name = "authenc-hmac-sha224-"
18481 +                                                  "rfc3686-ctr-aes-caam-qi2",
18482 +                               .cra_blocksize = 1,
18483 +                       },
18484 +                       .setkey = aead_setkey,
18485 +                       .setauthsize = aead_setauthsize,
18486 +                       .encrypt = aead_encrypt,
18487 +                       .decrypt = aead_decrypt,
18488 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18489 +                       .maxauthsize = SHA224_DIGEST_SIZE,
18490 +               },
18491 +               .caam = {
18492 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18493 +                                          OP_ALG_AAI_CTR_MOD128,
18494 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18495 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18496 +                       .rfc3686 = true,
18497 +               },
18498 +       },
18499 +       {
18500 +               .aead = {
18501 +                       .base = {
18502 +                               .cra_name = "seqiv(authenc("
18503 +                                           "hmac(sha224),rfc3686(ctr(aes))))",
18504 +                               .cra_driver_name = "seqiv-authenc-hmac-sha224-"
18505 +                                                  "rfc3686-ctr-aes-caam-qi2",
18506 +                               .cra_blocksize = 1,
18507 +                       },
18508 +                       .setkey = aead_setkey,
18509 +                       .setauthsize = aead_setauthsize,
18510 +                       .encrypt = aead_encrypt,
18511 +                       .decrypt = aead_decrypt,
18512 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18513 +                       .maxauthsize = SHA224_DIGEST_SIZE,
18514 +               },
18515 +               .caam = {
18516 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18517 +                                          OP_ALG_AAI_CTR_MOD128,
18518 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18519 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18520 +                       .rfc3686 = true,
18521 +                       .geniv = true,
18522 +               },
18523 +       },
18524 +       {
18525 +               .aead = {
18526 +                       .base = {
18527 +                               .cra_name = "authenc(hmac(sha256),"
18528 +                                           "rfc3686(ctr(aes)))",
18529 +                               .cra_driver_name = "authenc-hmac-sha256-"
18530 +                                                  "rfc3686-ctr-aes-caam-qi2",
18531 +                               .cra_blocksize = 1,
18532 +                       },
18533 +                       .setkey = aead_setkey,
18534 +                       .setauthsize = aead_setauthsize,
18535 +                       .encrypt = aead_encrypt,
18536 +                       .decrypt = aead_decrypt,
18537 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18538 +                       .maxauthsize = SHA256_DIGEST_SIZE,
18539 +               },
18540 +               .caam = {
18541 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18542 +                                          OP_ALG_AAI_CTR_MOD128,
18543 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18544 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18545 +                       .rfc3686 = true,
18546 +               },
18547 +       },
18548 +       {
18549 +               .aead = {
18550 +                       .base = {
18551 +                               .cra_name = "seqiv(authenc(hmac(sha256),"
18552 +                                           "rfc3686(ctr(aes))))",
18553 +                               .cra_driver_name = "seqiv-authenc-hmac-sha256-"
18554 +                                                  "rfc3686-ctr-aes-caam-qi2",
18555 +                               .cra_blocksize = 1,
18556 +                       },
18557 +                       .setkey = aead_setkey,
18558 +                       .setauthsize = aead_setauthsize,
18559 +                       .encrypt = aead_encrypt,
18560 +                       .decrypt = aead_decrypt,
18561 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18562 +                       .maxauthsize = SHA256_DIGEST_SIZE,
18563 +               },
18564 +               .caam = {
18565 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18566 +                                          OP_ALG_AAI_CTR_MOD128,
18567 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18568 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18569 +                       .rfc3686 = true,
18570 +                       .geniv = true,
18571 +               },
18572 +       },
18573 +       {
18574 +               .aead = {
18575 +                       .base = {
18576 +                               .cra_name = "authenc(hmac(sha384),"
18577 +                                           "rfc3686(ctr(aes)))",
18578 +                               .cra_driver_name = "authenc-hmac-sha384-"
18579 +                                                  "rfc3686-ctr-aes-caam-qi2",
18580 +                               .cra_blocksize = 1,
18581 +                       },
18582 +                       .setkey = aead_setkey,
18583 +                       .setauthsize = aead_setauthsize,
18584 +                       .encrypt = aead_encrypt,
18585 +                       .decrypt = aead_decrypt,
18586 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18587 +                       .maxauthsize = SHA384_DIGEST_SIZE,
18588 +               },
18589 +               .caam = {
18590 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18591 +                                          OP_ALG_AAI_CTR_MOD128,
18592 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18593 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18594 +                       .rfc3686 = true,
18595 +               },
18596 +       },
18597 +       {
18598 +               .aead = {
18599 +                       .base = {
18600 +                               .cra_name = "seqiv(authenc(hmac(sha384),"
18601 +                                           "rfc3686(ctr(aes))))",
18602 +                               .cra_driver_name = "seqiv-authenc-hmac-sha384-"
18603 +                                                  "rfc3686-ctr-aes-caam-qi2",
18604 +                               .cra_blocksize = 1,
18605 +                       },
18606 +                       .setkey = aead_setkey,
18607 +                       .setauthsize = aead_setauthsize,
18608 +                       .encrypt = aead_encrypt,
18609 +                       .decrypt = aead_decrypt,
18610 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18611 +                       .maxauthsize = SHA384_DIGEST_SIZE,
18612 +               },
18613 +               .caam = {
18614 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18615 +                                          OP_ALG_AAI_CTR_MOD128,
18616 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18617 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18618 +                       .rfc3686 = true,
18619 +                       .geniv = true,
18620 +               },
18621 +       },
18622 +       {
18623 +               .aead = {
18624 +                       .base = {
18625 +                               .cra_name = "authenc(hmac(sha512),"
18626 +                                           "rfc3686(ctr(aes)))",
18627 +                               .cra_driver_name = "authenc-hmac-sha512-"
18628 +                                                  "rfc3686-ctr-aes-caam-qi2",
18629 +                               .cra_blocksize = 1,
18630 +                       },
18631 +                       .setkey = aead_setkey,
18632 +                       .setauthsize = aead_setauthsize,
18633 +                       .encrypt = aead_encrypt,
18634 +                       .decrypt = aead_decrypt,
18635 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18636 +                       .maxauthsize = SHA512_DIGEST_SIZE,
18637 +               },
18638 +               .caam = {
18639 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18640 +                                          OP_ALG_AAI_CTR_MOD128,
18641 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18642 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18643 +                       .rfc3686 = true,
18644 +               },
18645 +       },
18646 +       {
18647 +               .aead = {
18648 +                       .base = {
18649 +                               .cra_name = "seqiv(authenc(hmac(sha512),"
18650 +                                           "rfc3686(ctr(aes))))",
18651 +                               .cra_driver_name = "seqiv-authenc-hmac-sha512-"
18652 +                                                  "rfc3686-ctr-aes-caam-qi2",
18653 +                               .cra_blocksize = 1,
18654 +                       },
18655 +                       .setkey = aead_setkey,
18656 +                       .setauthsize = aead_setauthsize,
18657 +                       .encrypt = aead_encrypt,
18658 +                       .decrypt = aead_decrypt,
18659 +                       .ivsize = CTR_RFC3686_IV_SIZE,
18660 +                       .maxauthsize = SHA512_DIGEST_SIZE,
18661 +               },
18662 +               .caam = {
18663 +                       .class1_alg_type = OP_ALG_ALGSEL_AES |
18664 +                                          OP_ALG_AAI_CTR_MOD128,
18665 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18666 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18667 +                       .rfc3686 = true,
18668 +                       .geniv = true,
18669 +               },
18670 +       },
18671 +       {
18672 +               .aead = {
18673 +                       .base = {
18674 +                               .cra_name = "tls10(hmac(sha1),cbc(aes))",
18675 +                               .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2",
18676 +                               .cra_blocksize = AES_BLOCK_SIZE,
18677 +                       },
18678 +                       .setkey = tls_setkey,
18679 +                       .setauthsize = tls_setauthsize,
18680 +                       .encrypt = tls_encrypt,
18681 +                       .decrypt = tls_decrypt,
18682 +                       .ivsize = AES_BLOCK_SIZE,
18683 +                       .maxauthsize = SHA1_DIGEST_SIZE,
18684 +               },
18685 +               .caam = {
18686 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
18687 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18688 +                                          OP_ALG_AAI_HMAC_PRECOMP,
18689 +               },
18690 +       },
18691 +};
18692 +
18693 +static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
18694 +                                             *template)
18695 +{
18696 +       struct caam_crypto_alg *t_alg;
18697 +       struct crypto_alg *alg;
18698 +
18699 +       t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
18700 +       if (!t_alg)
18701 +               return ERR_PTR(-ENOMEM);
18702 +
18703 +       alg = &t_alg->crypto_alg;
18704 +
18705 +       snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
18706 +       snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
18707 +                template->driver_name);
18708 +       alg->cra_module = THIS_MODULE;
18709 +       alg->cra_exit = caam_cra_exit;
18710 +       alg->cra_priority = CAAM_CRA_PRIORITY;
18711 +       alg->cra_blocksize = template->blocksize;
18712 +       alg->cra_alignmask = 0;
18713 +       alg->cra_ctxsize = sizeof(struct caam_ctx);
18714 +       alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
18715 +                        template->type;
18716 +       switch (template->type) {
18717 +       case CRYPTO_ALG_TYPE_GIVCIPHER:
18718 +               alg->cra_init = caam_cra_init_ablkcipher;
18719 +               alg->cra_type = &crypto_givcipher_type;
18720 +               alg->cra_ablkcipher = template->template_ablkcipher;
18721 +               break;
18722 +       case CRYPTO_ALG_TYPE_ABLKCIPHER:
18723 +               alg->cra_init = caam_cra_init_ablkcipher;
18724 +               alg->cra_type = &crypto_ablkcipher_type;
18725 +               alg->cra_ablkcipher = template->template_ablkcipher;
18726 +               break;
18727 +       }
18728 +
18729 +       t_alg->caam.class1_alg_type = template->class1_alg_type;
18730 +       t_alg->caam.class2_alg_type = template->class2_alg_type;
18731 +
18732 +       return t_alg;
18733 +}
18734 +
18735 +static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
18736 +{
18737 +       struct aead_alg *alg = &t_alg->aead;
18738 +
18739 +       alg->base.cra_module = THIS_MODULE;
18740 +       alg->base.cra_priority = CAAM_CRA_PRIORITY;
18741 +       alg->base.cra_ctxsize = sizeof(struct caam_ctx);
18742 +       alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
18743 +
18744 +       alg->init = caam_cra_init_aead;
18745 +       alg->exit = caam_cra_exit_aead;
18746 +}
18747 +
18748 +/* max hash key is max split key size */
18749 +#define CAAM_MAX_HASH_KEY_SIZE         (SHA512_DIGEST_SIZE * 2)
18750 +
18751 +#define CAAM_MAX_HASH_BLOCK_SIZE       SHA512_BLOCK_SIZE
18752 +#define CAAM_MAX_HASH_DIGEST_SIZE      SHA512_DIGEST_SIZE
18753 +
18754 +#define DESC_HASH_MAX_USED_BYTES       (DESC_AHASH_FINAL_LEN + \
18755 +                                        CAAM_MAX_HASH_KEY_SIZE)
18756 +#define DESC_HASH_MAX_USED_LEN         (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
18757 +
18758 +/* caam context sizes for hashes: running digest + 8 */
18759 +#define HASH_MSG_LEN                   8
18760 +#define MAX_CTX_LEN                    (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
18761 +
18762 +enum hash_optype {
18763 +       UPDATE = 0,
18764 +       UPDATE_FIRST,
18765 +       FINALIZE,
18766 +       DIGEST,
18767 +       HASH_NUM_OP
18768 +};
18769 +
18770 +/**
18771 + * caam_hash_ctx - ahash per-session context
18772 + * @flc: Flow Contexts array
18773 + * @flc_dma: I/O virtual addresses of the Flow Contexts
18774 + * @key:  virtual address of the authentication key
18775 + * @dev: dpseci device
18776 + * @ctx_len: size of Context Register
18777 + * @adata: hashing algorithm details
18778 + */
18779 +struct caam_hash_ctx {
18780 +       struct caam_flc flc[HASH_NUM_OP];
18781 +       dma_addr_t flc_dma[HASH_NUM_OP];
18782 +       u8 key[CAAM_MAX_HASH_KEY_SIZE];
18783 +       struct device *dev;
18784 +       int ctx_len;
18785 +       struct alginfo adata;
18786 +};
18787 +
18788 +/* ahash state */
18789 +struct caam_hash_state {
18790 +       struct caam_request caam_req;
18791 +       dma_addr_t buf_dma;
18792 +       dma_addr_t ctx_dma;
18793 +       u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
18794 +       int buflen_0;
18795 +       u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
18796 +       int buflen_1;
18797 +       u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
18798 +       int (*update)(struct ahash_request *req);
18799 +       int (*final)(struct ahash_request *req);
18800 +       int (*finup)(struct ahash_request *req);
18801 +       int current_buf;
18802 +};
18803 +
18804 +struct caam_export_state {
18805 +       u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
18806 +       u8 caam_ctx[MAX_CTX_LEN];
18807 +       int buflen;
18808 +       int (*update)(struct ahash_request *req);
18809 +       int (*final)(struct ahash_request *req);
18810 +       int (*finup)(struct ahash_request *req);
18811 +};
18812 +
18813 +static inline void switch_buf(struct caam_hash_state *state)
18814 +{
18815 +       state->current_buf ^= 1;
18816 +}
18817 +
18818 +static inline u8 *current_buf(struct caam_hash_state *state)
18819 +{
18820 +       return state->current_buf ? state->buf_1 : state->buf_0;
18821 +}
18822 +
18823 +static inline u8 *alt_buf(struct caam_hash_state *state)
18824 +{
18825 +       return state->current_buf ? state->buf_0 : state->buf_1;
18826 +}
18827 +
18828 +static inline int *current_buflen(struct caam_hash_state *state)
18829 +{
18830 +       return state->current_buf ? &state->buflen_1 : &state->buflen_0;
18831 +}
18832 +
18833 +static inline int *alt_buflen(struct caam_hash_state *state)
18834 +{
18835 +       return state->current_buf ? &state->buflen_0 : &state->buflen_1;
18836 +}
18837 +
18838 +/* Map current buffer in state (if length > 0) and put it in link table */
18839 +static inline int buf_map_to_qm_sg(struct device *dev,
18840 +                                  struct dpaa2_sg_entry *qm_sg,
18841 +                                  struct caam_hash_state *state)
18842 +{
18843 +       int buflen = *current_buflen(state);
18844 +
18845 +       if (!buflen)
18846 +               return 0;
18847 +
18848 +       state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
18849 +                                       DMA_TO_DEVICE);
18850 +       if (dma_mapping_error(dev, state->buf_dma)) {
18851 +               dev_err(dev, "unable to map buf\n");
18852 +               state->buf_dma = 0;
18853 +               return -ENOMEM;
18854 +       }
18855 +
18856 +       dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
18857 +
18858 +       return 0;
18859 +}
18860 +
18861 +/* Map state->caam_ctx, and add it to link table */
18862 +static inline int ctx_map_to_qm_sg(struct device *dev,
18863 +                                  struct caam_hash_state *state, int ctx_len,
18864 +                                  struct dpaa2_sg_entry *qm_sg, u32 flag)
18865 +{
18866 +       state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
18867 +       if (dma_mapping_error(dev, state->ctx_dma)) {
18868 +               dev_err(dev, "unable to map ctx\n");
18869 +               state->ctx_dma = 0;
18870 +               return -ENOMEM;
18871 +       }
18872 +
18873 +       dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
18874 +
18875 +       return 0;
18876 +}
18877 +
18878 +static int ahash_set_sh_desc(struct crypto_ahash *ahash)
18879 +{
18880 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
18881 +       int digestsize = crypto_ahash_digestsize(ahash);
18882 +       struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
18883 +       struct caam_flc *flc;
18884 +       u32 *desc;
18885 +
18886 +       ctx->adata.key_virt = ctx->key;
18887 +       ctx->adata.key_inline = true;
18888 +
18889 +       /* ahash_update shared descriptor */
18890 +       flc = &ctx->flc[UPDATE];
18891 +       desc = flc->sh_desc;
18892 +       cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
18893 +                         ctx->ctx_len, true, priv->sec_attr.era);
18894 +       flc->flc[1] = desc_len(desc); /* SDL */
18895 +       dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
18896 +                                  desc_bytes(desc), DMA_BIDIRECTIONAL);
18897 +#ifdef DEBUG
18898 +       print_hex_dump(KERN_ERR,
18899 +                      "ahash update shdesc@" __stringify(__LINE__)": ",
18900 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
18901 +#endif
18902 +
18903 +       /* ahash_update_first shared descriptor */
18904 +       flc = &ctx->flc[UPDATE_FIRST];
18905 +       desc = flc->sh_desc;
18906 +       cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
18907 +                         ctx->ctx_len, false, priv->sec_attr.era);
18908 +       flc->flc[1] = desc_len(desc); /* SDL */
18909 +       dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
18910 +                                  desc_bytes(desc), DMA_BIDIRECTIONAL);
18911 +#ifdef DEBUG
18912 +       print_hex_dump(KERN_ERR,
18913 +                      "ahash update first shdesc@" __stringify(__LINE__)": ",
18914 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
18915 +#endif
18916 +
18917 +       /* ahash_final shared descriptor */
18918 +       flc = &ctx->flc[FINALIZE];
18919 +       desc = flc->sh_desc;
18920 +       cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
18921 +                         ctx->ctx_len, true, priv->sec_attr.era);
18922 +       flc->flc[1] = desc_len(desc); /* SDL */
18923 +       dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
18924 +                                  desc_bytes(desc), DMA_BIDIRECTIONAL);
18925 +#ifdef DEBUG
18926 +       print_hex_dump(KERN_ERR,
18927 +                      "ahash final shdesc@" __stringify(__LINE__)": ",
18928 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
18929 +#endif
18930 +
18931 +       /* ahash_digest shared descriptor */
18932 +       flc = &ctx->flc[DIGEST];
18933 +       desc = flc->sh_desc;
18934 +       cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
18935 +                         ctx->ctx_len, false, priv->sec_attr.era);
18936 +       flc->flc[1] = desc_len(desc); /* SDL */
18937 +       dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
18938 +                                  desc_bytes(desc), DMA_BIDIRECTIONAL);
18939 +#ifdef DEBUG
18940 +       print_hex_dump(KERN_ERR,
18941 +                      "ahash digest shdesc@" __stringify(__LINE__)": ",
18942 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
18943 +#endif
18944 +
18945 +       return 0;
18946 +}
18947 +
18948 +/* Digest hash size if it is too large */
18949 +static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
18950 +                          u32 *keylen, u8 *key_out, u32 digestsize)
18951 +{
18952 +       struct caam_request *req_ctx;
18953 +       u32 *desc;
18954 +       struct split_key_sh_result result;
18955 +       dma_addr_t src_dma, dst_dma;
18956 +       struct caam_flc *flc;
18957 +       dma_addr_t flc_dma;
18958 +       int ret = -ENOMEM;
18959 +       struct dpaa2_fl_entry *in_fle, *out_fle;
18960 +
18961 +       req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
18962 +       if (!req_ctx)
18963 +               return -ENOMEM;
18964 +
18965 +       in_fle = &req_ctx->fd_flt[1];
18966 +       out_fle = &req_ctx->fd_flt[0];
18967 +
18968 +       flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
18969 +       if (!flc)
18970 +               goto err_flc;
18971 +
18972 +       src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
18973 +                                DMA_TO_DEVICE);
18974 +       if (dma_mapping_error(ctx->dev, src_dma)) {
18975 +               dev_err(ctx->dev, "unable to map key input memory\n");
18976 +               goto err_src_dma;
18977 +       }
18978 +       dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
18979 +                                DMA_FROM_DEVICE);
18980 +       if (dma_mapping_error(ctx->dev, dst_dma)) {
18981 +               dev_err(ctx->dev, "unable to map key output memory\n");
18982 +               goto err_dst_dma;
18983 +       }
18984 +
18985 +       desc = flc->sh_desc;
18986 +
18987 +       init_sh_desc(desc, 0);
18988 +
18989 +       /* descriptor to perform unkeyed hash on key_in */
18990 +       append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
18991 +                        OP_ALG_AS_INITFINAL);
18992 +       append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
18993 +                            FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
18994 +       append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
18995 +                        LDST_SRCDST_BYTE_CONTEXT);
18996 +
18997 +       flc->flc[1] = desc_len(desc); /* SDL */
18998 +       flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
18999 +                                desc_bytes(desc), DMA_TO_DEVICE);
19000 +       if (dma_mapping_error(ctx->dev, flc_dma)) {
19001 +               dev_err(ctx->dev, "unable to map shared descriptor\n");
19002 +               goto err_flc_dma;
19003 +       }
19004 +
19005 +       dpaa2_fl_set_final(in_fle, true);
19006 +       dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
19007 +       dpaa2_fl_set_addr(in_fle, src_dma);
19008 +       dpaa2_fl_set_len(in_fle, *keylen);
19009 +       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19010 +       dpaa2_fl_set_addr(out_fle, dst_dma);
19011 +       dpaa2_fl_set_len(out_fle, digestsize);
19012 +
19013 +#ifdef DEBUG
19014 +       print_hex_dump(KERN_ERR, "key_in@" __stringify(__LINE__)": ",
19015 +                      DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
19016 +       print_hex_dump(KERN_ERR, "shdesc@" __stringify(__LINE__)": ",
19017 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
19018 +#endif
19019 +
19020 +       result.err = 0;
19021 +       init_completion(&result.completion);
19022 +       result.dev = ctx->dev;
19023 +
19024 +       req_ctx->flc = flc;
19025 +       req_ctx->flc_dma = flc_dma;
19026 +       req_ctx->cbk = split_key_sh_done;
19027 +       req_ctx->ctx = &result;
19028 +
19029 +       ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19030 +       if (ret == -EINPROGRESS) {
19031 +               /* in progress */
19032 +               wait_for_completion(&result.completion);
19033 +               ret = result.err;
19034 +#ifdef DEBUG
19035 +               print_hex_dump(KERN_ERR,
19036 +                              "digested key@" __stringify(__LINE__)": ",
19037 +                              DUMP_PREFIX_ADDRESS, 16, 4, key_in, digestsize,
19038 +                              1);
19039 +#endif
19040 +       }
19041 +
19042 +       dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
19043 +                        DMA_TO_DEVICE);
19044 +err_flc_dma:
19045 +       dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
19046 +err_dst_dma:
19047 +       dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
19048 +err_src_dma:
19049 +       kfree(flc);
19050 +err_flc:
19051 +       kfree(req_ctx);
19052 +
19053 +       *keylen = digestsize;
19054 +
19055 +       return ret;
19056 +}
19057 +
19058 +static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
19059 +                       unsigned int keylen)
19060 +{
19061 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19062 +       unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
19063 +       unsigned int digestsize = crypto_ahash_digestsize(ahash);
19064 +       int ret;
19065 +       u8 *hashed_key = NULL;
19066 +
19067 +#ifdef DEBUG
19068 +       dev_err(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
19069 +#endif
19070 +
19071 +       if (keylen > blocksize) {
19072 +               hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
19073 +                                          GFP_KERNEL | GFP_DMA);
19074 +               if (!hashed_key)
19075 +                       return -ENOMEM;
19076 +               ret = hash_digest_key(ctx, key, &keylen, hashed_key,
19077 +                                     digestsize);
19078 +               if (ret)
19079 +                       goto bad_free_key;
19080 +               key = hashed_key;
19081 +       }
19082 +
19083 +       ctx->adata.keylen = keylen;
19084 +       ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
19085 +                                             OP_ALG_ALGSEL_MASK);
19086 +       if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
19087 +               goto bad_free_key;
19088 +
19089 +       memcpy(ctx->key, key, keylen);
19090 +
19091 +       kfree(hashed_key);
19092 +       return ahash_set_sh_desc(ahash);
19093 +bad_free_key:
19094 +       kfree(hashed_key);
19095 +       crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
19096 +       return -EINVAL;
19097 +}
19098 +
19099 +static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
19100 +                              struct ahash_request *req, int dst_len)
19101 +{
19102 +       struct caam_hash_state *state = ahash_request_ctx(req);
19103 +
19104 +       if (edesc->src_nents)
19105 +               dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
19106 +       if (edesc->dst_dma)
19107 +               dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
19108 +
19109 +       if (edesc->qm_sg_bytes)
19110 +               dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
19111 +                                DMA_TO_DEVICE);
19112 +
19113 +       if (state->buf_dma) {
19114 +               dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
19115 +                                DMA_TO_DEVICE);
19116 +               state->buf_dma = 0;
19117 +       }
19118 +}
19119 +
19120 +static inline void ahash_unmap_ctx(struct device *dev,
19121 +                                  struct ahash_edesc *edesc,
19122 +                                  struct ahash_request *req, int dst_len,
19123 +                                  u32 flag)
19124 +{
19125 +       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19126 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19127 +       struct caam_hash_state *state = ahash_request_ctx(req);
19128 +
19129 +       if (state->ctx_dma) {
19130 +               dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
19131 +               state->ctx_dma = 0;
19132 +       }
19133 +       ahash_unmap(dev, edesc, req, dst_len);
19134 +}
19135 +
19136 +static void ahash_done(void *cbk_ctx, u32 status)
19137 +{
19138 +       struct crypto_async_request *areq = cbk_ctx;
19139 +       struct ahash_request *req = ahash_request_cast(areq);
19140 +       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19141 +       struct caam_hash_state *state = ahash_request_ctx(req);
19142 +       struct ahash_edesc *edesc = state->caam_req.edesc;
19143 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19144 +       int digestsize = crypto_ahash_digestsize(ahash);
19145 +       int ecode = 0;
19146 +
19147 +#ifdef DEBUG
19148 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
19149 +#endif
19150 +
19151 +       if (unlikely(status)) {
19152 +               caam_qi2_strstatus(ctx->dev, status);
19153 +               ecode = -EIO;
19154 +       }
19155 +
19156 +       ahash_unmap(ctx->dev, edesc, req, digestsize);
19157 +       qi_cache_free(edesc);
19158 +
19159 +#ifdef DEBUG
19160 +       print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
19161 +                      DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
19162 +                      ctx->ctx_len, 1);
19163 +       if (req->result)
19164 +               print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
19165 +                              DUMP_PREFIX_ADDRESS, 16, 4, req->result,
19166 +                              digestsize, 1);
19167 +#endif
19168 +
19169 +       req->base.complete(&req->base, ecode);
19170 +}
19171 +
19172 +static void ahash_done_bi(void *cbk_ctx, u32 status)
19173 +{
19174 +       struct crypto_async_request *areq = cbk_ctx;
19175 +       struct ahash_request *req = ahash_request_cast(areq);
19176 +       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19177 +       struct caam_hash_state *state = ahash_request_ctx(req);
19178 +       struct ahash_edesc *edesc = state->caam_req.edesc;
19179 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19180 +       int ecode = 0;
19181 +#ifdef DEBUG
19182 +       int digestsize = crypto_ahash_digestsize(ahash);
19183 +
19184 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
19185 +#endif
19186 +
19187 +       if (unlikely(status)) {
19188 +               caam_qi2_strstatus(ctx->dev, status);
19189 +               ecode = -EIO;
19190 +       }
19191 +
19192 +       ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
19193 +       switch_buf(state);
19194 +       qi_cache_free(edesc);
19195 +
19196 +#ifdef DEBUG
19197 +       print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
19198 +                      DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
19199 +                      ctx->ctx_len, 1);
19200 +       if (req->result)
19201 +               print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
19202 +                              DUMP_PREFIX_ADDRESS, 16, 4, req->result,
19203 +                              digestsize, 1);
19204 +#endif
19205 +
19206 +       req->base.complete(&req->base, ecode);
19207 +}
19208 +
19209 +static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
19210 +{
19211 +       struct crypto_async_request *areq = cbk_ctx;
19212 +       struct ahash_request *req = ahash_request_cast(areq);
19213 +       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19214 +       struct caam_hash_state *state = ahash_request_ctx(req);
19215 +       struct ahash_edesc *edesc = state->caam_req.edesc;
19216 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19217 +       int digestsize = crypto_ahash_digestsize(ahash);
19218 +       int ecode = 0;
19219 +
19220 +#ifdef DEBUG
19221 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
19222 +#endif
19223 +
19224 +       if (unlikely(status)) {
19225 +               caam_qi2_strstatus(ctx->dev, status);
19226 +               ecode = -EIO;
19227 +       }
19228 +
19229 +       ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
19230 +       qi_cache_free(edesc);
19231 +
19232 +#ifdef DEBUG
19233 +       print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
19234 +                      DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
19235 +                      ctx->ctx_len, 1);
19236 +       if (req->result)
19237 +               print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
19238 +                              DUMP_PREFIX_ADDRESS, 16, 4, req->result,
19239 +                              digestsize, 1);
19240 +#endif
19241 +
19242 +       req->base.complete(&req->base, ecode);
19243 +}
19244 +
19245 +static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
19246 +{
19247 +       struct crypto_async_request *areq = cbk_ctx;
19248 +       struct ahash_request *req = ahash_request_cast(areq);
19249 +       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19250 +       struct caam_hash_state *state = ahash_request_ctx(req);
19251 +       struct ahash_edesc *edesc = state->caam_req.edesc;
19252 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19253 +       int ecode = 0;
19254 +#ifdef DEBUG
19255 +       int digestsize = crypto_ahash_digestsize(ahash);
19256 +
19257 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
19258 +#endif
19259 +
19260 +       if (unlikely(status)) {
19261 +               caam_qi2_strstatus(ctx->dev, status);
19262 +               ecode = -EIO;
19263 +       }
19264 +
19265 +       ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
19266 +       switch_buf(state);
19267 +       qi_cache_free(edesc);
19268 +
19269 +#ifdef DEBUG
19270 +       print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
19271 +                      DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
19272 +                      ctx->ctx_len, 1);
19273 +       if (req->result)
19274 +               print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
19275 +                              DUMP_PREFIX_ADDRESS, 16, 4, req->result,
19276 +                              digestsize, 1);
19277 +#endif
19278 +
19279 +       req->base.complete(&req->base, ecode);
19280 +}
19281 +
19282 +static int ahash_update_ctx(struct ahash_request *req)
19283 +{
19284 +       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19285 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19286 +       struct caam_hash_state *state = ahash_request_ctx(req);
19287 +       struct caam_request *req_ctx = &state->caam_req;
19288 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19289 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19290 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19291 +                     GFP_KERNEL : GFP_ATOMIC;
19292 +       u8 *buf = current_buf(state);
19293 +       int *buflen = current_buflen(state);
19294 +       u8 *next_buf = alt_buf(state);
19295 +       int *next_buflen = alt_buflen(state), last_buflen;
19296 +       int in_len = *buflen + req->nbytes, to_hash;
19297 +       int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
19298 +       struct ahash_edesc *edesc;
19299 +       int ret = 0;
19300 +
19301 +       last_buflen = *next_buflen;
19302 +       *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
19303 +       to_hash = in_len - *next_buflen;
19304 +
19305 +       if (to_hash) {
19306 +               struct dpaa2_sg_entry *sg_table;
19307 +
19308 +               src_nents = sg_nents_for_len(req->src,
19309 +                                            req->nbytes - (*next_buflen));
19310 +               if (src_nents < 0) {
19311 +                       dev_err(ctx->dev, "Invalid number of src SG.\n");
19312 +                       return src_nents;
19313 +               }
19314 +
19315 +               if (src_nents) {
19316 +                       mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
19317 +                                                 DMA_TO_DEVICE);
19318 +                       if (!mapped_nents) {
19319 +                               dev_err(ctx->dev, "unable to DMA map source\n");
19320 +                               return -ENOMEM;
19321 +                       }
19322 +               } else {
19323 +                       mapped_nents = 0;
19324 +               }
19325 +
19326 +               /* allocate space for base edesc and link tables */
19327 +               edesc = qi_cache_zalloc(GFP_DMA | flags);
19328 +               if (!edesc) {
19329 +                       dma_unmap_sg(ctx->dev, req->src, src_nents,
19330 +                                    DMA_TO_DEVICE);
19331 +                       return -ENOMEM;
19332 +               }
19333 +
19334 +               edesc->src_nents = src_nents;
19335 +               qm_sg_src_index = 1 + (*buflen ? 1 : 0);
19336 +               qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
19337 +                             sizeof(*sg_table);
19338 +               sg_table = &edesc->sgt[0];
19339 +
19340 +               ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
19341 +                                      DMA_BIDIRECTIONAL);
19342 +               if (ret)
19343 +                       goto unmap_ctx;
19344 +
19345 +               ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
19346 +               if (ret)
19347 +                       goto unmap_ctx;
19348 +
19349 +               if (mapped_nents) {
19350 +                       sg_to_qm_sg_last(req->src, mapped_nents,
19351 +                                        sg_table + qm_sg_src_index, 0);
19352 +                       if (*next_buflen)
19353 +                               scatterwalk_map_and_copy(next_buf, req->src,
19354 +                                                        to_hash - *buflen,
19355 +                                                        *next_buflen, 0);
19356 +               } else {
19357 +                       dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
19358 +                                          true);
19359 +               }
19360 +
19361 +               edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
19362 +                                                 qm_sg_bytes, DMA_TO_DEVICE);
19363 +               if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
19364 +                       dev_err(ctx->dev, "unable to map S/G table\n");
19365 +                       ret = -ENOMEM;
19366 +                       goto unmap_ctx;
19367 +               }
19368 +               edesc->qm_sg_bytes = qm_sg_bytes;
19369 +
19370 +               memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
19371 +               dpaa2_fl_set_final(in_fle, true);
19372 +               dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
19373 +               dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
19374 +               dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
19375 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19376 +               dpaa2_fl_set_addr(out_fle, state->ctx_dma);
19377 +               dpaa2_fl_set_len(out_fle, ctx->ctx_len);
19378 +
19379 +               req_ctx->flc = &ctx->flc[UPDATE];
19380 +               req_ctx->flc_dma = ctx->flc_dma[UPDATE];
19381 +               req_ctx->cbk = ahash_done_bi;
19382 +               req_ctx->ctx = &req->base;
19383 +               req_ctx->edesc = edesc;
19384 +
19385 +               ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19386 +               if (ret != -EINPROGRESS &&
19387 +                   !(ret == -EBUSY &&
19388 +                     req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
19389 +                       goto unmap_ctx;
19390 +       } else if (*next_buflen) {
19391 +               scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
19392 +                                        req->nbytes, 0);
19393 +               *buflen = *next_buflen;
19394 +               *next_buflen = last_buflen;
19395 +       }
19396 +#ifdef DEBUG
19397 +       print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
19398 +                      DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
19399 +       print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
19400 +                      DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
19401 +                      *next_buflen, 1);
19402 +#endif
19403 +
19404 +       return ret;
19405 +unmap_ctx:
19406 +       ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
19407 +       qi_cache_free(edesc);
19408 +       return ret;
19409 +}
19410 +
19411 +static int ahash_final_ctx(struct ahash_request *req)
19412 +{
19413 +       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19414 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19415 +       struct caam_hash_state *state = ahash_request_ctx(req);
19416 +       struct caam_request *req_ctx = &state->caam_req;
19417 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19418 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19419 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19420 +                     GFP_KERNEL : GFP_ATOMIC;
19421 +       int buflen = *current_buflen(state);
19422 +       int qm_sg_bytes, qm_sg_src_index;
19423 +       int digestsize = crypto_ahash_digestsize(ahash);
19424 +       struct ahash_edesc *edesc;
19425 +       struct dpaa2_sg_entry *sg_table;
19426 +       int ret;
19427 +
19428 +       /* allocate space for base edesc and link tables */
19429 +       edesc = qi_cache_zalloc(GFP_DMA | flags);
19430 +       if (!edesc)
19431 +               return -ENOMEM;
19432 +
19433 +       qm_sg_src_index = 1 + (buflen ? 1 : 0);
19434 +       qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
19435 +       sg_table = &edesc->sgt[0];
19436 +
19437 +       ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
19438 +                              DMA_TO_DEVICE);
19439 +       if (ret)
19440 +               goto unmap_ctx;
19441 +
19442 +       ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
19443 +       if (ret)
19444 +               goto unmap_ctx;
19445 +
19446 +       dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
19447 +
19448 +       edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
19449 +                                         DMA_TO_DEVICE);
19450 +       if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
19451 +               dev_err(ctx->dev, "unable to map S/G table\n");
19452 +               ret = -ENOMEM;
19453 +               goto unmap_ctx;
19454 +       }
19455 +       edesc->qm_sg_bytes = qm_sg_bytes;
19456 +
19457 +       edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
19458 +                                       DMA_FROM_DEVICE);
19459 +       if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
19460 +               dev_err(ctx->dev, "unable to map dst\n");
19461 +               edesc->dst_dma = 0;
19462 +               ret = -ENOMEM;
19463 +               goto unmap_ctx;
19464 +       }
19465 +
19466 +       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
19467 +       dpaa2_fl_set_final(in_fle, true);
19468 +       dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
19469 +       dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
19470 +       dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
19471 +       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19472 +       dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
19473 +       dpaa2_fl_set_len(out_fle, digestsize);
19474 +
19475 +       req_ctx->flc = &ctx->flc[FINALIZE];
19476 +       req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
19477 +       req_ctx->cbk = ahash_done_ctx_src;
19478 +       req_ctx->ctx = &req->base;
19479 +       req_ctx->edesc = edesc;
19480 +
19481 +       ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19482 +       if (ret == -EINPROGRESS ||
19483 +           (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
19484 +               return ret;
19485 +
19486 +unmap_ctx:
19487 +       ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
19488 +       qi_cache_free(edesc);
19489 +       return ret;
19490 +}
19491 +
19492 +static int ahash_finup_ctx(struct ahash_request *req)
19493 +{
19494 +       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19495 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19496 +       struct caam_hash_state *state = ahash_request_ctx(req);
19497 +       struct caam_request *req_ctx = &state->caam_req;
19498 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19499 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19500 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19501 +                     GFP_KERNEL : GFP_ATOMIC;
19502 +       int buflen = *current_buflen(state);
19503 +       int qm_sg_bytes, qm_sg_src_index;
19504 +       int src_nents, mapped_nents;
19505 +       int digestsize = crypto_ahash_digestsize(ahash);
19506 +       struct ahash_edesc *edesc;
19507 +       struct dpaa2_sg_entry *sg_table;
19508 +       int ret;
19509 +
19510 +       src_nents = sg_nents_for_len(req->src, req->nbytes);
19511 +       if (src_nents < 0) {
19512 +               dev_err(ctx->dev, "Invalid number of src SG.\n");
19513 +               return src_nents;
19514 +       }
19515 +
19516 +       if (src_nents) {
19517 +               mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
19518 +                                         DMA_TO_DEVICE);
19519 +               if (!mapped_nents) {
19520 +                       dev_err(ctx->dev, "unable to DMA map source\n");
19521 +                       return -ENOMEM;
19522 +               }
19523 +       } else {
19524 +               mapped_nents = 0;
19525 +       }
19526 +
19527 +       /* allocate space for base edesc and link tables */
19528 +       edesc = qi_cache_zalloc(GFP_DMA | flags);
19529 +       if (!edesc) {
19530 +               dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
19531 +               return -ENOMEM;
19532 +       }
19533 +
19534 +       edesc->src_nents = src_nents;
19535 +       qm_sg_src_index = 1 + (buflen ? 1 : 0);
19536 +       qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
19537 +       sg_table = &edesc->sgt[0];
19538 +
19539 +       ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
19540 +                              DMA_TO_DEVICE);
19541 +       if (ret)
19542 +               goto unmap_ctx;
19543 +
19544 +       ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
19545 +       if (ret)
19546 +               goto unmap_ctx;
19547 +
19548 +       sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
19549 +
19550 +       edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
19551 +                                         DMA_TO_DEVICE);
19552 +       if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
19553 +               dev_err(ctx->dev, "unable to map S/G table\n");
19554 +               ret = -ENOMEM;
19555 +               goto unmap_ctx;
19556 +       }
19557 +       edesc->qm_sg_bytes = qm_sg_bytes;
19558 +
19559 +       edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
19560 +                                       DMA_FROM_DEVICE);
19561 +       if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
19562 +               dev_err(ctx->dev, "unable to map dst\n");
19563 +               edesc->dst_dma = 0;
19564 +               ret = -ENOMEM;
19565 +               goto unmap_ctx;
19566 +       }
19567 +
19568 +       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
19569 +       dpaa2_fl_set_final(in_fle, true);
19570 +       dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
19571 +       dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
19572 +       dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
19573 +       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19574 +       dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
19575 +       dpaa2_fl_set_len(out_fle, digestsize);
19576 +
19577 +       req_ctx->flc = &ctx->flc[FINALIZE];
19578 +       req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
19579 +       req_ctx->cbk = ahash_done_ctx_src;
19580 +       req_ctx->ctx = &req->base;
19581 +       req_ctx->edesc = edesc;
19582 +
19583 +       ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19584 +       if (ret == -EINPROGRESS ||
19585 +           (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
19586 +               return ret;
19587 +
19588 +unmap_ctx:
19589 +       ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
19590 +       qi_cache_free(edesc);
19591 +       return ret;
19592 +}
19593 +
19594 +static int ahash_digest(struct ahash_request *req)
19595 +{
19596 +       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19597 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19598 +       struct caam_hash_state *state = ahash_request_ctx(req);
19599 +       struct caam_request *req_ctx = &state->caam_req;
19600 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19601 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19602 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19603 +                     GFP_KERNEL : GFP_ATOMIC;
19604 +       int digestsize = crypto_ahash_digestsize(ahash);
19605 +       int src_nents, mapped_nents;
19606 +       struct ahash_edesc *edesc;
19607 +       int ret = -ENOMEM;
19608 +
19609 +       state->buf_dma = 0;
19610 +
19611 +       src_nents = sg_nents_for_len(req->src, req->nbytes);
19612 +       if (src_nents < 0) {
19613 +               dev_err(ctx->dev, "Invalid number of src SG.\n");
19614 +               return src_nents;
19615 +       }
19616 +
19617 +       if (src_nents) {
19618 +               mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
19619 +                                         DMA_TO_DEVICE);
19620 +               if (!mapped_nents) {
19621 +                       dev_err(ctx->dev, "unable to map source for DMA\n");
19622 +                       return ret;
19623 +               }
19624 +       } else {
19625 +               mapped_nents = 0;
19626 +       }
19627 +
19628 +       /* allocate space for base edesc and link tables */
19629 +       edesc = qi_cache_zalloc(GFP_DMA | flags);
19630 +       if (!edesc) {
19631 +               dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
19632 +               return ret;
19633 +       }
19634 +
19635 +       edesc->src_nents = src_nents;
19636 +       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
19637 +
19638 +       if (mapped_nents > 1) {
19639 +               int qm_sg_bytes;
19640 +               struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
19641 +
19642 +               qm_sg_bytes = mapped_nents * sizeof(*sg_table);
19643 +               sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
19644 +               edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
19645 +                                                 qm_sg_bytes, DMA_TO_DEVICE);
19646 +               if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
19647 +                       dev_err(ctx->dev, "unable to map S/G table\n");
19648 +                       goto unmap;
19649 +               }
19650 +               edesc->qm_sg_bytes = qm_sg_bytes;
19651 +               dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
19652 +               dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
19653 +       } else {
19654 +               dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
19655 +               dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
19656 +       }
19657 +
19658 +       edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
19659 +                                       DMA_FROM_DEVICE);
19660 +       if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
19661 +               dev_err(ctx->dev, "unable to map dst\n");
19662 +               edesc->dst_dma = 0;
19663 +               goto unmap;
19664 +       }
19665 +
19666 +       dpaa2_fl_set_final(in_fle, true);
19667 +       dpaa2_fl_set_len(in_fle, req->nbytes);
19668 +       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19669 +       dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
19670 +       dpaa2_fl_set_len(out_fle, digestsize);
19671 +
19672 +       req_ctx->flc = &ctx->flc[DIGEST];
19673 +       req_ctx->flc_dma = ctx->flc_dma[DIGEST];
19674 +       req_ctx->cbk = ahash_done;
19675 +       req_ctx->ctx = &req->base;
19676 +       req_ctx->edesc = edesc;
19677 +       ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19678 +       if (ret == -EINPROGRESS ||
19679 +           (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
19680 +               return ret;
19681 +
19682 +unmap:
19683 +       ahash_unmap(ctx->dev, edesc, req, digestsize);
19684 +       qi_cache_free(edesc);
19685 +       return ret;
19686 +}
19687 +
19688 +static int ahash_final_no_ctx(struct ahash_request *req)
19689 +{
19690 +       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19691 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19692 +       struct caam_hash_state *state = ahash_request_ctx(req);
19693 +       struct caam_request *req_ctx = &state->caam_req;
19694 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19695 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19696 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19697 +                     GFP_KERNEL : GFP_ATOMIC;
19698 +       u8 *buf = current_buf(state);
19699 +       int buflen = *current_buflen(state);
19700 +       int digestsize = crypto_ahash_digestsize(ahash);
19701 +       struct ahash_edesc *edesc;
19702 +       int ret = -ENOMEM;
19703 +
19704 +       /* allocate space for base edesc and link tables */
19705 +       edesc = qi_cache_zalloc(GFP_DMA | flags);
19706 +       if (!edesc)
19707 +               return ret;
19708 +
19709 +       state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
19710 +       if (dma_mapping_error(ctx->dev, state->buf_dma)) {
19711 +               dev_err(ctx->dev, "unable to map src\n");
19712 +               goto unmap;
19713 +       }
19714 +
19715 +       edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
19716 +                                       DMA_FROM_DEVICE);
19717 +       if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
19718 +               dev_err(ctx->dev, "unable to map dst\n");
19719 +               edesc->dst_dma = 0;
19720 +               goto unmap;
19721 +       }
19722 +
19723 +       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
19724 +       dpaa2_fl_set_final(in_fle, true);
19725 +       dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
19726 +       dpaa2_fl_set_addr(in_fle, state->buf_dma);
19727 +       dpaa2_fl_set_len(in_fle, buflen);
19728 +       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19729 +       dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
19730 +       dpaa2_fl_set_len(out_fle, digestsize);
19731 +
19732 +       req_ctx->flc = &ctx->flc[DIGEST];
19733 +       req_ctx->flc_dma = ctx->flc_dma[DIGEST];
19734 +       req_ctx->cbk = ahash_done;
19735 +       req_ctx->ctx = &req->base;
19736 +       req_ctx->edesc = edesc;
19737 +
19738 +       ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19739 +       if (ret == -EINPROGRESS ||
19740 +           (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
19741 +               return ret;
19742 +
19743 +unmap:
19744 +       ahash_unmap(ctx->dev, edesc, req, digestsize);
19745 +       qi_cache_free(edesc);
19746 +       return ret;
19747 +}
19748 +
19749 +static int ahash_update_no_ctx(struct ahash_request *req)
19750 +{
19751 +       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19752 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19753 +       struct caam_hash_state *state = ahash_request_ctx(req);
19754 +       struct caam_request *req_ctx = &state->caam_req;
19755 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19756 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19757 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19758 +                     GFP_KERNEL : GFP_ATOMIC;
19759 +       u8 *buf = current_buf(state);
19760 +       int *buflen = current_buflen(state);
19761 +       u8 *next_buf = alt_buf(state);
19762 +       int *next_buflen = alt_buflen(state);
19763 +       int in_len = *buflen + req->nbytes, to_hash;
19764 +       int qm_sg_bytes, src_nents, mapped_nents;
19765 +       struct ahash_edesc *edesc;
19766 +       int ret = 0;
19767 +
19768 +       *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
19769 +       to_hash = in_len - *next_buflen;
19770 +
19771 +       if (to_hash) {
19772 +               struct dpaa2_sg_entry *sg_table;
19773 +
19774 +               src_nents = sg_nents_for_len(req->src,
19775 +                                            req->nbytes - *next_buflen);
19776 +               if (src_nents < 0) {
19777 +                       dev_err(ctx->dev, "Invalid number of src SG.\n");
19778 +                       return src_nents;
19779 +               }
19780 +
19781 +               if (src_nents) {
19782 +                       mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
19783 +                                                 DMA_TO_DEVICE);
19784 +                       if (!mapped_nents) {
19785 +                               dev_err(ctx->dev, "unable to DMA map source\n");
19786 +                               return -ENOMEM;
19787 +                       }
19788 +               } else {
19789 +                       mapped_nents = 0;
19790 +               }
19791 +
19792 +               /* allocate space for base edesc and link tables */
19793 +               edesc = qi_cache_zalloc(GFP_DMA | flags);
19794 +               if (!edesc) {
19795 +                       dma_unmap_sg(ctx->dev, req->src, src_nents,
19796 +                                    DMA_TO_DEVICE);
19797 +                       return -ENOMEM;
19798 +               }
19799 +
19800 +               edesc->src_nents = src_nents;
19801 +               qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
19802 +               sg_table = &edesc->sgt[0];
19803 +
19804 +               ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
19805 +               if (ret)
19806 +                       goto unmap_ctx;
19807 +
19808 +               sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
19809 +
19810 +               if (*next_buflen)
19811 +                       scatterwalk_map_and_copy(next_buf, req->src,
19812 +                                                to_hash - *buflen,
19813 +                                                *next_buflen, 0);
19814 +
19815 +               edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
19816 +                                                 qm_sg_bytes, DMA_TO_DEVICE);
19817 +               if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
19818 +                       dev_err(ctx->dev, "unable to map S/G table\n");
19819 +                       ret = -ENOMEM;
19820 +                       goto unmap_ctx;
19821 +               }
19822 +               edesc->qm_sg_bytes = qm_sg_bytes;
19823 +
19824 +               state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
19825 +                                               ctx->ctx_len, DMA_FROM_DEVICE);
19826 +               if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
19827 +                       dev_err(ctx->dev, "unable to map ctx\n");
19828 +                       state->ctx_dma = 0;
19829 +                       ret = -ENOMEM;
19830 +                       goto unmap_ctx;
19831 +               }
19832 +
19833 +               memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
19834 +               dpaa2_fl_set_final(in_fle, true);
19835 +               dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
19836 +               dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
19837 +               dpaa2_fl_set_len(in_fle, to_hash);
19838 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19839 +               dpaa2_fl_set_addr(out_fle, state->ctx_dma);
19840 +               dpaa2_fl_set_len(out_fle, ctx->ctx_len);
19841 +
19842 +               req_ctx->flc = &ctx->flc[UPDATE_FIRST];
19843 +               req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
19844 +               req_ctx->cbk = ahash_done_ctx_dst;
19845 +               req_ctx->ctx = &req->base;
19846 +               req_ctx->edesc = edesc;
19847 +
19848 +               ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19849 +               if (ret != -EINPROGRESS &&
19850 +                   !(ret == -EBUSY &&
19851 +                     req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
19852 +                       goto unmap_ctx;
19853 +
19854 +               state->update = ahash_update_ctx;
19855 +               state->finup = ahash_finup_ctx;
19856 +               state->final = ahash_final_ctx;
19857 +       } else if (*next_buflen) {
19858 +               scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
19859 +                                        req->nbytes, 0);
19860 +               *buflen = *next_buflen;
19861 +               *next_buflen = 0;
19862 +       }
19863 +#ifdef DEBUG
19864 +       print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
19865 +                      DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
19866 +       print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
19867 +                      DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
19868 +                      *next_buflen, 1);
19869 +#endif
19870 +
19871 +       return ret;
19872 +unmap_ctx:
19873 +       ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
19874 +       qi_cache_free(edesc);
19875 +       return ret;
19876 +}
19877 +
19878 +static int ahash_finup_no_ctx(struct ahash_request *req)
19879 +{
19880 +       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19881 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19882 +       struct caam_hash_state *state = ahash_request_ctx(req);
19883 +       struct caam_request *req_ctx = &state->caam_req;
19884 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19885 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19886 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19887 +                     GFP_KERNEL : GFP_ATOMIC;
19888 +       int buflen = *current_buflen(state);
19889 +       int qm_sg_bytes, src_nents, mapped_nents;
19890 +       int digestsize = crypto_ahash_digestsize(ahash);
19891 +       struct ahash_edesc *edesc;
19892 +       struct dpaa2_sg_entry *sg_table;
19893 +       int ret;
19894 +
19895 +       src_nents = sg_nents_for_len(req->src, req->nbytes);
19896 +       if (src_nents < 0) {
19897 +               dev_err(ctx->dev, "Invalid number of src SG.\n");
19898 +               return src_nents;
19899 +       }
19900 +
19901 +       if (src_nents) {
19902 +               mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
19903 +                                         DMA_TO_DEVICE);
19904 +               if (!mapped_nents) {
19905 +                       dev_err(ctx->dev, "unable to DMA map source\n");
19906 +                       return -ENOMEM;
19907 +               }
19908 +       } else {
19909 +               mapped_nents = 0;
19910 +       }
19911 +
19912 +       /* allocate space for base edesc and link tables */
19913 +       edesc = qi_cache_zalloc(GFP_DMA | flags);
19914 +       if (!edesc) {
19915 +               dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
19916 +               return -ENOMEM;
19917 +       }
19918 +
19919 +       edesc->src_nents = src_nents;
19920 +       qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
19921 +       sg_table = &edesc->sgt[0];
19922 +
19923 +       ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
19924 +       if (ret)
19925 +               goto unmap;
19926 +
19927 +       sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
19928 +
19929 +       edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
19930 +                                         DMA_TO_DEVICE);
19931 +       if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
19932 +               dev_err(ctx->dev, "unable to map S/G table\n");
19933 +               ret = -ENOMEM;
19934 +               goto unmap;
19935 +       }
19936 +       edesc->qm_sg_bytes = qm_sg_bytes;
19937 +
19938 +       edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
19939 +                                       DMA_FROM_DEVICE);
19940 +       if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
19941 +               dev_err(ctx->dev, "unable to map dst\n");
19942 +               edesc->dst_dma = 0;
19943 +               ret = -ENOMEM;
19944 +               goto unmap;
19945 +       }
19946 +
19947 +       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
19948 +       dpaa2_fl_set_final(in_fle, true);
19949 +       dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
19950 +       dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
19951 +       dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
19952 +       dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19953 +       dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
19954 +       dpaa2_fl_set_len(out_fle, digestsize);
19955 +
19956 +       req_ctx->flc = &ctx->flc[DIGEST];
19957 +       req_ctx->flc_dma = ctx->flc_dma[DIGEST];
19958 +       req_ctx->cbk = ahash_done;
19959 +       req_ctx->ctx = &req->base;
19960 +       req_ctx->edesc = edesc;
19961 +       ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19962 +       if (ret != -EINPROGRESS &&
19963 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
19964 +               goto unmap;
19965 +
19966 +       return ret;
19967 +unmap:
19968 +       ahash_unmap(ctx->dev, edesc, req, digestsize);
19969 +       qi_cache_free(edesc);
19970 +       return -ENOMEM;
19971 +}
19972 +
19973 +static int ahash_update_first(struct ahash_request *req)
19974 +{
19975 +       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19976 +       struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19977 +       struct caam_hash_state *state = ahash_request_ctx(req);
19978 +       struct caam_request *req_ctx = &state->caam_req;
19979 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19980 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19981 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19982 +                     GFP_KERNEL : GFP_ATOMIC;
19983 +       u8 *next_buf = alt_buf(state);
19984 +       int *next_buflen = alt_buflen(state);
19985 +       int to_hash;
19986 +       int src_nents, mapped_nents;
19987 +       struct ahash_edesc *edesc;
19988 +       int ret = 0;
19989 +
19990 +       *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
19991 +                                     1);
19992 +       to_hash = req->nbytes - *next_buflen;
19993 +
19994 +       if (to_hash) {
19995 +               struct dpaa2_sg_entry *sg_table;
19996 +
19997 +               src_nents = sg_nents_for_len(req->src,
19998 +                                            req->nbytes - (*next_buflen));
19999 +               if (src_nents < 0) {
20000 +                       dev_err(ctx->dev, "Invalid number of src SG.\n");
20001 +                       return src_nents;
20002 +               }
20003 +
20004 +               if (src_nents) {
20005 +                       mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
20006 +                                                 DMA_TO_DEVICE);
20007 +                       if (!mapped_nents) {
20008 +                               dev_err(ctx->dev, "unable to map source for DMA\n");
20009 +                               return -ENOMEM;
20010 +                       }
20011 +               } else {
20012 +                       mapped_nents = 0;
20013 +               }
20014 +
20015 +               /* allocate space for base edesc and link tables */
20016 +               edesc = qi_cache_zalloc(GFP_DMA | flags);
20017 +               if (!edesc) {
20018 +                       dma_unmap_sg(ctx->dev, req->src, src_nents,
20019 +                                    DMA_TO_DEVICE);
20020 +                       return -ENOMEM;
20021 +               }
20022 +
20023 +               edesc->src_nents = src_nents;
20024 +               sg_table = &edesc->sgt[0];
20025 +
20026 +               memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
20027 +               dpaa2_fl_set_final(in_fle, true);
20028 +               dpaa2_fl_set_len(in_fle, to_hash);
20029 +
20030 +               if (mapped_nents > 1) {
20031 +                       int qm_sg_bytes;
20032 +
20033 +                       sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
20034 +                       qm_sg_bytes = mapped_nents * sizeof(*sg_table);
20035 +                       edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
20036 +                                                         qm_sg_bytes,
20037 +                                                         DMA_TO_DEVICE);
20038 +                       if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
20039 +                               dev_err(ctx->dev, "unable to map S/G table\n");
20040 +                               ret = -ENOMEM;
20041 +                               goto unmap_ctx;
20042 +                       }
20043 +                       edesc->qm_sg_bytes = qm_sg_bytes;
20044 +                       dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
20045 +                       dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
20046 +               } else {
20047 +                       dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
20048 +                       dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
20049 +               }
20050 +
20051 +               if (*next_buflen)
20052 +                       scatterwalk_map_and_copy(next_buf, req->src, to_hash,
20053 +                                                *next_buflen, 0);
20054 +
20055 +               state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
20056 +                                               ctx->ctx_len, DMA_FROM_DEVICE);
20057 +               if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
20058 +                       dev_err(ctx->dev, "unable to map ctx\n");
20059 +                       state->ctx_dma = 0;
20060 +                       ret = -ENOMEM;
20061 +                       goto unmap_ctx;
20062 +               }
20063 +
20064 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
20065 +               dpaa2_fl_set_addr(out_fle, state->ctx_dma);
20066 +               dpaa2_fl_set_len(out_fle, ctx->ctx_len);
20067 +
20068 +               req_ctx->flc = &ctx->flc[UPDATE_FIRST];
20069 +               req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
20070 +               req_ctx->cbk = ahash_done_ctx_dst;
20071 +               req_ctx->ctx = &req->base;
20072 +               req_ctx->edesc = edesc;
20073 +
20074 +               ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
20075 +               if (ret != -EINPROGRESS &&
20076 +                   !(ret == -EBUSY && req->base.flags &
20077 +                     CRYPTO_TFM_REQ_MAY_BACKLOG))
20078 +                       goto unmap_ctx;
20079 +
20080 +               state->update = ahash_update_ctx;
20081 +               state->finup = ahash_finup_ctx;
20082 +               state->final = ahash_final_ctx;
20083 +       } else if (*next_buflen) {
20084 +               state->update = ahash_update_no_ctx;
20085 +               state->finup = ahash_finup_no_ctx;
20086 +               state->final = ahash_final_no_ctx;
20087 +               scatterwalk_map_and_copy(next_buf, req->src, 0,
20088 +                                        req->nbytes, 0);
20089 +               switch_buf(state);
20090 +       }
20091 +#ifdef DEBUG
20092 +       print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
20093 +                      DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, 1);
20094 +#endif
20095 +
20096 +       return ret;
20097 +unmap_ctx:
20098 +       ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
20099 +       qi_cache_free(edesc);
20100 +       return ret;
20101 +}
20102 +
20103 +static int ahash_finup_first(struct ahash_request *req)
20104 +{
20105 +       return ahash_digest(req);
20106 +}
20107 +
20108 +static int ahash_init(struct ahash_request *req)
20109 +{
20110 +       struct caam_hash_state *state = ahash_request_ctx(req);
20111 +
20112 +       state->update = ahash_update_first;
20113 +       state->finup = ahash_finup_first;
20114 +       state->final = ahash_final_no_ctx;
20115 +
20116 +       state->ctx_dma = 0;
20117 +       state->current_buf = 0;
20118 +       state->buf_dma = 0;
20119 +       state->buflen_0 = 0;
20120 +       state->buflen_1 = 0;
20121 +
20122 +       return 0;
20123 +}
20124 +
20125 +static int ahash_update(struct ahash_request *req)
20126 +{
20127 +       struct caam_hash_state *state = ahash_request_ctx(req);
20128 +
20129 +       return state->update(req);
20130 +}
20131 +
20132 +static int ahash_finup(struct ahash_request *req)
20133 +{
20134 +       struct caam_hash_state *state = ahash_request_ctx(req);
20135 +
20136 +       return state->finup(req);
20137 +}
20138 +
20139 +static int ahash_final(struct ahash_request *req)
20140 +{
20141 +       struct caam_hash_state *state = ahash_request_ctx(req);
20142 +
20143 +       return state->final(req);
20144 +}
20145 +
20146 +static int ahash_export(struct ahash_request *req, void *out)
20147 +{
20148 +       struct caam_hash_state *state = ahash_request_ctx(req);
20149 +       struct caam_export_state *export = out;
20150 +       int len;
20151 +       u8 *buf;
20152 +
20153 +       if (state->current_buf) {
20154 +               buf = state->buf_1;
20155 +               len = state->buflen_1;
20156 +       } else {
20157 +               buf = state->buf_0;
20158 +               len = state->buflen_0;
20159 +       }
20160 +
20161 +       memcpy(export->buf, buf, len);
20162 +       memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
20163 +       export->buflen = len;
20164 +       export->update = state->update;
20165 +       export->final = state->final;
20166 +       export->finup = state->finup;
20167 +
20168 +       return 0;
20169 +}
20170 +
20171 +static int ahash_import(struct ahash_request *req, const void *in)
20172 +{
20173 +       struct caam_hash_state *state = ahash_request_ctx(req);
20174 +       const struct caam_export_state *export = in;
20175 +
20176 +       memset(state, 0, sizeof(*state));
20177 +       memcpy(state->buf_0, export->buf, export->buflen);
20178 +       memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
20179 +       state->buflen_0 = export->buflen;
20180 +       state->update = export->update;
20181 +       state->final = export->final;
20182 +       state->finup = export->finup;
20183 +
20184 +       return 0;
20185 +}
20186 +
20187 +struct caam_hash_template {
20188 +       char name[CRYPTO_MAX_ALG_NAME];
20189 +       char driver_name[CRYPTO_MAX_ALG_NAME];
20190 +       char hmac_name[CRYPTO_MAX_ALG_NAME];
20191 +       char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
20192 +       unsigned int blocksize;
20193 +       struct ahash_alg template_ahash;
20194 +       u32 alg_type;
20195 +};
20196 +
20197 +/* ahash descriptors */
20198 +static struct caam_hash_template driver_hash[] = {
20199 +       {
20200 +               .name = "sha1",
20201 +               .driver_name = "sha1-caam-qi2",
20202 +               .hmac_name = "hmac(sha1)",
20203 +               .hmac_driver_name = "hmac-sha1-caam-qi2",
20204 +               .blocksize = SHA1_BLOCK_SIZE,
20205 +               .template_ahash = {
20206 +                       .init = ahash_init,
20207 +                       .update = ahash_update,
20208 +                       .final = ahash_final,
20209 +                       .finup = ahash_finup,
20210 +                       .digest = ahash_digest,
20211 +                       .export = ahash_export,
20212 +                       .import = ahash_import,
20213 +                       .setkey = ahash_setkey,
20214 +                       .halg = {
20215 +                               .digestsize = SHA1_DIGEST_SIZE,
20216 +                               .statesize = sizeof(struct caam_export_state),
20217 +                       },
20218 +               },
20219 +               .alg_type = OP_ALG_ALGSEL_SHA1,
20220 +       }, {
20221 +               .name = "sha224",
20222 +               .driver_name = "sha224-caam-qi2",
20223 +               .hmac_name = "hmac(sha224)",
20224 +               .hmac_driver_name = "hmac-sha224-caam-qi2",
20225 +               .blocksize = SHA224_BLOCK_SIZE,
20226 +               .template_ahash = {
20227 +                       .init = ahash_init,
20228 +                       .update = ahash_update,
20229 +                       .final = ahash_final,
20230 +                       .finup = ahash_finup,
20231 +                       .digest = ahash_digest,
20232 +                       .export = ahash_export,
20233 +                       .import = ahash_import,
20234 +                       .setkey = ahash_setkey,
20235 +                       .halg = {
20236 +                               .digestsize = SHA224_DIGEST_SIZE,
20237 +                               .statesize = sizeof(struct caam_export_state),
20238 +                       },
20239 +               },
20240 +               .alg_type = OP_ALG_ALGSEL_SHA224,
20241 +       }, {
20242 +               .name = "sha256",
20243 +               .driver_name = "sha256-caam-qi2",
20244 +               .hmac_name = "hmac(sha256)",
20245 +               .hmac_driver_name = "hmac-sha256-caam-qi2",
20246 +               .blocksize = SHA256_BLOCK_SIZE,
20247 +               .template_ahash = {
20248 +                       .init = ahash_init,
20249 +                       .update = ahash_update,
20250 +                       .final = ahash_final,
20251 +                       .finup = ahash_finup,
20252 +                       .digest = ahash_digest,
20253 +                       .export = ahash_export,
20254 +                       .import = ahash_import,
20255 +                       .setkey = ahash_setkey,
20256 +                       .halg = {
20257 +                               .digestsize = SHA256_DIGEST_SIZE,
20258 +                               .statesize = sizeof(struct caam_export_state),
20259 +                       },
20260 +               },
20261 +               .alg_type = OP_ALG_ALGSEL_SHA256,
20262 +       }, {
20263 +               .name = "sha384",
20264 +               .driver_name = "sha384-caam-qi2",
20265 +               .hmac_name = "hmac(sha384)",
20266 +               .hmac_driver_name = "hmac-sha384-caam-qi2",
20267 +               .blocksize = SHA384_BLOCK_SIZE,
20268 +               .template_ahash = {
20269 +                       .init = ahash_init,
20270 +                       .update = ahash_update,
20271 +                       .final = ahash_final,
20272 +                       .finup = ahash_finup,
20273 +                       .digest = ahash_digest,
20274 +                       .export = ahash_export,
20275 +                       .import = ahash_import,
20276 +                       .setkey = ahash_setkey,
20277 +                       .halg = {
20278 +                               .digestsize = SHA384_DIGEST_SIZE,
20279 +                               .statesize = sizeof(struct caam_export_state),
20280 +                       },
20281 +               },
20282 +               .alg_type = OP_ALG_ALGSEL_SHA384,
20283 +       }, {
20284 +               .name = "sha512",
20285 +               .driver_name = "sha512-caam-qi2",
20286 +               .hmac_name = "hmac(sha512)",
20287 +               .hmac_driver_name = "hmac-sha512-caam-qi2",
20288 +               .blocksize = SHA512_BLOCK_SIZE,
20289 +               .template_ahash = {
20290 +                       .init = ahash_init,
20291 +                       .update = ahash_update,
20292 +                       .final = ahash_final,
20293 +                       .finup = ahash_finup,
20294 +                       .digest = ahash_digest,
20295 +                       .export = ahash_export,
20296 +                       .import = ahash_import,
20297 +                       .setkey = ahash_setkey,
20298 +                       .halg = {
20299 +                               .digestsize = SHA512_DIGEST_SIZE,
20300 +                               .statesize = sizeof(struct caam_export_state),
20301 +                       },
20302 +               },
20303 +               .alg_type = OP_ALG_ALGSEL_SHA512,
20304 +       }, {
20305 +               .name = "md5",
20306 +               .driver_name = "md5-caam-qi2",
20307 +               .hmac_name = "hmac(md5)",
20308 +               .hmac_driver_name = "hmac-md5-caam-qi2",
20309 +               .blocksize = MD5_BLOCK_WORDS * 4,
20310 +               .template_ahash = {
20311 +                       .init = ahash_init,
20312 +                       .update = ahash_update,
20313 +                       .final = ahash_final,
20314 +                       .finup = ahash_finup,
20315 +                       .digest = ahash_digest,
20316 +                       .export = ahash_export,
20317 +                       .import = ahash_import,
20318 +                       .setkey = ahash_setkey,
20319 +                       .halg = {
20320 +                               .digestsize = MD5_DIGEST_SIZE,
20321 +                               .statesize = sizeof(struct caam_export_state),
20322 +                       },
20323 +               },
20324 +               .alg_type = OP_ALG_ALGSEL_MD5,
20325 +       }
20326 +};
20327 +
20328 +struct caam_hash_alg {
20329 +       struct list_head entry;
20330 +       struct device *dev;
20331 +       int alg_type;
20332 +       struct ahash_alg ahash_alg;
20333 +};
20334 +
20335 +static int caam_hash_cra_init(struct crypto_tfm *tfm)
20336 +{
20337 +       struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
20338 +       struct crypto_alg *base = tfm->__crt_alg;
20339 +       struct hash_alg_common *halg =
20340 +                container_of(base, struct hash_alg_common, base);
20341 +       struct ahash_alg *alg =
20342 +                container_of(halg, struct ahash_alg, halg);
20343 +       struct caam_hash_alg *caam_hash =
20344 +                container_of(alg, struct caam_hash_alg, ahash_alg);
20345 +       struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
20346 +       /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
20347 +       static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
20348 +                                        HASH_MSG_LEN + SHA1_DIGEST_SIZE,
20349 +                                        HASH_MSG_LEN + 32,
20350 +                                        HASH_MSG_LEN + SHA256_DIGEST_SIZE,
20351 +                                        HASH_MSG_LEN + 64,
20352 +                                        HASH_MSG_LEN + SHA512_DIGEST_SIZE };
20353 +       dma_addr_t dma_addr;
20354 +       int i;
20355 +
20356 +       ctx->dev = caam_hash->dev;
20357 +
20358 +       dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
20359 +                                       DMA_BIDIRECTIONAL,
20360 +                                       DMA_ATTR_SKIP_CPU_SYNC);
20361 +       if (dma_mapping_error(ctx->dev, dma_addr)) {
20362 +               dev_err(ctx->dev, "unable to map shared descriptors\n");
20363 +               return -ENOMEM;
20364 +       }
20365 +
20366 +       for (i = 0; i < HASH_NUM_OP; i++)
20367 +               ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
20368 +
20369 +       /* copy descriptor header template value */
20370 +       ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
20371 +
20372 +       ctx->ctx_len = runninglen[(ctx->adata.algtype &
20373 +                                  OP_ALG_ALGSEL_SUBMASK) >>
20374 +                                 OP_ALG_ALGSEL_SHIFT];
20375 +
20376 +       crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
20377 +                                sizeof(struct caam_hash_state));
20378 +
20379 +       return ahash_set_sh_desc(ahash);
20380 +}
20381 +
20382 +static void caam_hash_cra_exit(struct crypto_tfm *tfm)
20383 +{
20384 +       struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
20385 +
20386 +       dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
20387 +                              DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
20388 +}
20389 +
20390 +static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
20391 +       struct caam_hash_template *template, bool keyed)
20392 +{
20393 +       struct caam_hash_alg *t_alg;
20394 +       struct ahash_alg *halg;
20395 +       struct crypto_alg *alg;
20396 +
20397 +       t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
20398 +       if (!t_alg)
20399 +               return ERR_PTR(-ENOMEM);
20400 +
20401 +       t_alg->ahash_alg = template->template_ahash;
20402 +       halg = &t_alg->ahash_alg;
20403 +       alg = &halg->halg.base;
20404 +
20405 +       if (keyed) {
20406 +               snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
20407 +                        template->hmac_name);
20408 +               snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
20409 +                        template->hmac_driver_name);
20410 +       } else {
20411 +               snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
20412 +                        template->name);
20413 +               snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
20414 +                        template->driver_name);
20415 +       }
20416 +       alg->cra_module = THIS_MODULE;
20417 +       alg->cra_init = caam_hash_cra_init;
20418 +       alg->cra_exit = caam_hash_cra_exit;
20419 +       alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
20420 +       alg->cra_priority = CAAM_CRA_PRIORITY;
20421 +       alg->cra_blocksize = template->blocksize;
20422 +       alg->cra_alignmask = 0;
20423 +       alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
20424 +       alg->cra_type = &crypto_ahash_type;
20425 +
20426 +       t_alg->alg_type = template->alg_type;
20427 +       t_alg->dev = dev;
20428 +
20429 +       return t_alg;
20430 +}
20431 +
20432 +static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
20433 +{
20434 +       struct dpaa2_caam_priv_per_cpu *ppriv;
20435 +
20436 +       ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
20437 +       napi_schedule_irqoff(&ppriv->napi);
20438 +}
20439 +
20440 +static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
20441 +{
20442 +       struct device *dev = priv->dev;
20443 +       struct dpaa2_io_notification_ctx *nctx;
20444 +       struct dpaa2_caam_priv_per_cpu *ppriv;
20445 +       int err, i = 0, cpu;
20446 +
20447 +       for_each_online_cpu(cpu) {
20448 +               ppriv = per_cpu_ptr(priv->ppriv, cpu);
20449 +               ppriv->priv = priv;
20450 +               nctx = &ppriv->nctx;
20451 +               nctx->is_cdan = 0;
20452 +               nctx->id = ppriv->rsp_fqid;
20453 +               nctx->desired_cpu = cpu;
20454 +               nctx->cb = dpaa2_caam_fqdan_cb;
20455 +
20456 +               /* Register notification callbacks */
20457 +               err = dpaa2_io_service_register(NULL, nctx);
20458 +               if (unlikely(err)) {
20459 +                       dev_err(dev, "notification register failed\n");
20460 +                       nctx->cb = NULL;
20461 +                       goto err;
20462 +               }
20463 +
20464 +               ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
20465 +                                                    dev);
20466 +               if (unlikely(!ppriv->store)) {
20467 +                       dev_err(dev, "dpaa2_io_store_create() failed\n");
20468 +                       goto err;
20469 +               }
20470 +
20471 +               if (++i == priv->num_pairs)
20472 +                       break;
20473 +       }
20474 +
20475 +       return 0;
20476 +
20477 +err:
20478 +       for_each_online_cpu(cpu) {
20479 +               ppriv = per_cpu_ptr(priv->ppriv, cpu);
20480 +               if (!ppriv->nctx.cb)
20481 +                       break;
20482 +               dpaa2_io_service_deregister(NULL, &ppriv->nctx);
20483 +       }
20484 +
20485 +       for_each_online_cpu(cpu) {
20486 +               ppriv = per_cpu_ptr(priv->ppriv, cpu);
20487 +               if (!ppriv->store)
20488 +                       break;
20489 +               dpaa2_io_store_destroy(ppriv->store);
20490 +       }
20491 +
20492 +       return err;
20493 +}
20494 +
20495 +static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
20496 +{
20497 +       struct dpaa2_caam_priv_per_cpu *ppriv;
20498 +       int i = 0, cpu;
20499 +
20500 +       for_each_online_cpu(cpu) {
20501 +               ppriv = per_cpu_ptr(priv->ppriv, cpu);
20502 +               dpaa2_io_service_deregister(NULL, &ppriv->nctx);
20503 +               dpaa2_io_store_destroy(ppriv->store);
20504 +
20505 +               if (++i == priv->num_pairs)
20506 +                       return;
20507 +       }
20508 +}
20509 +
20510 +static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
20511 +{
20512 +       struct dpseci_rx_queue_cfg rx_queue_cfg;
20513 +       struct device *dev = priv->dev;
20514 +       struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
20515 +       struct dpaa2_caam_priv_per_cpu *ppriv;
20516 +       int err = 0, i = 0, cpu;
20517 +
20518 +       /* Configure Rx queues */
20519 +       for_each_online_cpu(cpu) {
20520 +               ppriv = per_cpu_ptr(priv->ppriv, cpu);
20521 +
20522 +               rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
20523 +                                      DPSECI_QUEUE_OPT_USER_CTX;
20524 +               rx_queue_cfg.order_preservation_en = 0;
20525 +               rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
20526 +               rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
20527 +               /*
20528 +                * Rx priority (WQ) doesn't really matter, since we use
20529 +                * pull mode, i.e. volatile dequeues from specific FQs
20530 +                */
20531 +               rx_queue_cfg.dest_cfg.priority = 0;
20532 +               rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
20533 +
20534 +               err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
20535 +                                         &rx_queue_cfg);
20536 +               if (err) {
20537 +                       dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
20538 +                               err);
20539 +                       return err;
20540 +               }
20541 +
20542 +               if (++i == priv->num_pairs)
20543 +                       break;
20544 +       }
20545 +
20546 +       return err;
20547 +}
20548 +
20549 +static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
20550 +{
20551 +       struct device *dev = priv->dev;
20552 +
20553 +       if (!priv->cscn_mem)
20554 +               return;
20555 +
20556 +       dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
20557 +       kfree(priv->cscn_mem);
20558 +}
20559 +
20560 +static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
20561 +{
20562 +       struct device *dev = priv->dev;
20563 +       struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
20564 +
20565 +       dpaa2_dpseci_congestion_free(priv);
20566 +       dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
20567 +}
20568 +
20569 +static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
20570 +                                 const struct dpaa2_fd *fd)
20571 +{
20572 +       struct caam_request *req;
20573 +       u32 fd_err;
20574 +
20575 +       if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
20576 +               dev_err(priv->dev, "Only Frame List FD format is supported!\n");
20577 +               return;
20578 +       }
20579 +
20580 +       fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
20581 +       if (unlikely(fd_err))
20582 +               dev_err(priv->dev, "FD error: %08x\n", fd_err);
20583 +
20584 +       /*
20585 +        * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
20586 +        * in FD[ERR] or FD[FRC].
20587 +        */
20588 +       req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
20589 +       dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
20590 +                        DMA_BIDIRECTIONAL);
20591 +       req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
20592 +}
20593 +
20594 +static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
20595 +{
20596 +       int err;
20597 +
20598 +       /* Retry while portal is busy */
20599 +       do {
20600 +               err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
20601 +                                              ppriv->store);
20602 +       } while (err == -EBUSY);
20603 +
20604 +       if (unlikely(err))
20605 +               dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
20606 +
20607 +       return err;
20608 +}
20609 +
20610 +static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
20611 +{
20612 +       struct dpaa2_dq *dq;
20613 +       int cleaned = 0, is_last;
20614 +
20615 +       do {
20616 +               dq = dpaa2_io_store_next(ppriv->store, &is_last);
20617 +               if (unlikely(!dq)) {
20618 +                       if (unlikely(!is_last)) {
20619 +                               dev_dbg(ppriv->priv->dev,
20620 +                                       "FQ %d returned no valid frames\n",
20621 +                                       ppriv->rsp_fqid);
20622 +                               /*
20623 +                                * MUST retry until we get some sort of
20624 +                                * valid response token (be it "empty dequeue"
20625 +                                * or a valid frame).
20626 +                                */
20627 +                               continue;
20628 +                       }
20629 +                       break;
20630 +               }
20631 +
20632 +               /* Process FD */
20633 +               dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
20634 +               cleaned++;
20635 +       } while (!is_last);
20636 +
20637 +       return cleaned;
20638 +}
20639 +
20640 +static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
20641 +{
20642 +       struct dpaa2_caam_priv_per_cpu *ppriv;
20643 +       struct dpaa2_caam_priv *priv;
20644 +       int err, cleaned = 0, store_cleaned;
20645 +
20646 +       ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
20647 +       priv = ppriv->priv;
20648 +
20649 +       if (unlikely(dpaa2_caam_pull_fq(ppriv)))
20650 +               return 0;
20651 +
20652 +       do {
20653 +               store_cleaned = dpaa2_caam_store_consume(ppriv);
20654 +               cleaned += store_cleaned;
20655 +
20656 +               if (store_cleaned == 0 ||
20657 +                   cleaned > budget - DPAA2_CAAM_STORE_SIZE)
20658 +                       break;
20659 +
20660 +               /* Try to dequeue some more */
20661 +               err = dpaa2_caam_pull_fq(ppriv);
20662 +               if (unlikely(err))
20663 +                       break;
20664 +       } while (1);
20665 +
20666 +       if (cleaned < budget) {
20667 +               napi_complete_done(napi, cleaned);
20668 +               err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
20669 +               if (unlikely(err))
20670 +                       dev_err(priv->dev, "Notification rearm failed: %d\n",
20671 +                               err);
20672 +       }
20673 +
20674 +       return cleaned;
20675 +}
20676 +
20677 +static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
20678 +                                        u16 token)
20679 +{
20680 +       struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
20681 +       struct device *dev = priv->dev;
20682 +       int err;
20683 +
20684 +       /*
20685 +        * Congestion group feature supported starting with DPSECI API v5.1
20686 +        * and only when object has been created with this capability.
20687 +        */
20688 +       if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
20689 +           !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
20690 +               return 0;
20691 +
20692 +       priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
20693 +                                GFP_KERNEL | GFP_DMA);
20694 +       if (!priv->cscn_mem)
20695 +               return -ENOMEM;
20696 +
20697 +       priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
20698 +       priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
20699 +                                       DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
20700 +       if (dma_mapping_error(dev, priv->cscn_dma)) {
20701 +               dev_err(dev, "Error mapping CSCN memory area\n");
20702 +               err = -ENOMEM;
20703 +               goto err_dma_map;
20704 +       }
20705 +
20706 +       cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
20707 +       cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
20708 +       cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
20709 +       cong_notif_cfg.message_ctx = (u64)priv;
20710 +       cong_notif_cfg.message_iova = priv->cscn_dma;
20711 +       cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
20712 +                                       DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
20713 +                                       DPSECI_CGN_MODE_COHERENT_WRITE;
20714 +
20715 +       err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
20716 +                                                &cong_notif_cfg);
20717 +       if (err) {
20718 +               dev_err(dev, "dpseci_set_congestion_notification failed\n");
20719 +               goto err_set_cong;
20720 +       }
20721 +
20722 +       return 0;
20723 +
20724 +err_set_cong:
20725 +       dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
20726 +err_dma_map:
20727 +       kfree(priv->cscn_mem);
20728 +
20729 +       return err;
20730 +}
20731 +
20732 +static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
20733 +{
20734 +       struct device *dev = &ls_dev->dev;
20735 +       struct dpaa2_caam_priv *priv;
20736 +       struct dpaa2_caam_priv_per_cpu *ppriv;
20737 +       int err, cpu;
20738 +       u8 i;
20739 +
20740 +       priv = dev_get_drvdata(dev);
20741 +
20742 +       priv->dev = dev;
20743 +       priv->dpsec_id = ls_dev->obj_desc.id;
20744 +
20745 +       /* Get a handle for the DPSECI this interface is associate with */
20746 +       err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
20747 +       if (err) {
20748 +               dev_err(dev, "dpsec_open() failed: %d\n", err);
20749 +               goto err_open;
20750 +       }
20751 +
20752 +       dev_info(dev, "Opened dpseci object successfully\n");
20753 +
20754 +       err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
20755 +                                    &priv->minor_ver);
20756 +       if (err) {
20757 +               dev_err(dev, "dpseci_get_api_version() failed\n");
20758 +               goto err_get_vers;
20759 +       }
20760 +
20761 +       err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
20762 +                                   &priv->dpseci_attr);
20763 +       if (err) {
20764 +               dev_err(dev, "dpseci_get_attributes() failed\n");
20765 +               goto err_get_vers;
20766 +       }
20767 +
20768 +       err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
20769 +                                 &priv->sec_attr);
20770 +       if (err) {
20771 +               dev_err(dev, "dpseci_get_sec_attr() failed\n");
20772 +               goto err_get_vers;
20773 +       }
20774 +
20775 +       err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
20776 +       if (err) {
20777 +               dev_err(dev, "setup_congestion() failed\n");
20778 +               goto err_get_vers;
20779 +       }
20780 +
20781 +       priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
20782 +                             priv->dpseci_attr.num_tx_queues);
20783 +       if (priv->num_pairs > num_online_cpus()) {
20784 +               dev_warn(dev, "%d queues won't be used\n",
20785 +                        priv->num_pairs - num_online_cpus());
20786 +               priv->num_pairs = num_online_cpus();
20787 +       }
20788 +
20789 +       for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
20790 +               err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
20791 +                                         &priv->rx_queue_attr[i]);
20792 +               if (err) {
20793 +                       dev_err(dev, "dpseci_get_rx_queue() failed\n");
20794 +                       goto err_get_rx_queue;
20795 +               }
20796 +       }
20797 +
20798 +       for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
20799 +               err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
20800 +                                         &priv->tx_queue_attr[i]);
20801 +               if (err) {
20802 +                       dev_err(dev, "dpseci_get_tx_queue() failed\n");
20803 +                       goto err_get_rx_queue;
20804 +               }
20805 +       }
20806 +
20807 +       i = 0;
20808 +       for_each_online_cpu(cpu) {
20809 +               dev_info(dev, "prio %d: rx queue %d, tx queue %d\n", i,
20810 +                        priv->rx_queue_attr[i].fqid,
20811 +                        priv->tx_queue_attr[i].fqid);
20812 +
20813 +               ppriv = per_cpu_ptr(priv->ppriv, cpu);
20814 +               ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
20815 +               ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
20816 +               ppriv->prio = i;
20817 +
20818 +               ppriv->net_dev.dev = *dev;
20819 +               INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
20820 +               netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
20821 +                              DPAA2_CAAM_NAPI_WEIGHT);
20822 +               if (++i == priv->num_pairs)
20823 +                       break;
20824 +       }
20825 +
20826 +       return 0;
20827 +
20828 +err_get_rx_queue:
20829 +       dpaa2_dpseci_congestion_free(priv);
20830 +err_get_vers:
20831 +       dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
20832 +err_open:
20833 +       return err;
20834 +}
20835 +
20836 +static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
20837 +{
20838 +       struct device *dev = priv->dev;
20839 +       struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
20840 +       struct dpaa2_caam_priv_per_cpu *ppriv;
20841 +       int err, i;
20842 +
20843 +       for (i = 0; i < priv->num_pairs; i++) {
20844 +               ppriv = per_cpu_ptr(priv->ppriv, i);
20845 +               napi_enable(&ppriv->napi);
20846 +       }
20847 +
20848 +       err = dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
20849 +       if (err) {
20850 +               dev_err(dev, "dpseci_enable() failed\n");
20851 +               return err;
20852 +       }
20853 +
20854 +       dev_info(dev, "DPSECI version %d.%d\n",
20855 +                priv->major_ver,
20856 +                priv->minor_ver);
20857 +
20858 +       return 0;
20859 +}
20860 +
20861 +static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
20862 +{
20863 +       struct device *dev = priv->dev;
20864 +       struct dpaa2_caam_priv_per_cpu *ppriv;
20865 +       struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
20866 +       int i, err = 0, enabled;
20867 +
20868 +       err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
20869 +       if (err) {
20870 +               dev_err(dev, "dpseci_disable() failed\n");
20871 +               return err;
20872 +       }
20873 +
20874 +       err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
20875 +       if (err) {
20876 +               dev_err(dev, "dpseci_is_enabled() failed\n");
20877 +               return err;
20878 +       }
20879 +
20880 +       dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
20881 +
20882 +       for (i = 0; i < priv->num_pairs; i++) {
20883 +               ppriv = per_cpu_ptr(priv->ppriv, i);
20884 +               napi_disable(&ppriv->napi);
20885 +               netif_napi_del(&ppriv->napi);
20886 +       }
20887 +
20888 +       return 0;
20889 +}
20890 +
20891 +static struct list_head alg_list;
20892 +static struct list_head hash_list;
20893 +
20894 +static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
20895 +{
20896 +       struct device *dev;
20897 +       struct dpaa2_caam_priv *priv;
20898 +       int i, err = 0;
20899 +       bool registered = false;
20900 +
20901 +       /*
20902 +        * There is no way to get CAAM endianness - there is no direct register
20903 +        * space access and MC f/w does not provide this attribute.
20904 +        * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
20905 +        * property.
20906 +        */
20907 +       caam_little_end = true;
20908 +
20909 +       caam_imx = false;
20910 +
20911 +       dev = &dpseci_dev->dev;
20912 +
20913 +       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
20914 +       if (!priv)
20915 +               return -ENOMEM;
20916 +
20917 +       dev_set_drvdata(dev, priv);
20918 +
20919 +       priv->domain = iommu_get_domain_for_dev(dev);
20920 +
20921 +       qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
20922 +                                    0, SLAB_CACHE_DMA, NULL);
20923 +       if (!qi_cache) {
20924 +               dev_err(dev, "Can't allocate SEC cache\n");
20925 +               err = -ENOMEM;
20926 +               goto err_qicache;
20927 +       }
20928 +
20929 +       err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
20930 +       if (err) {
20931 +               dev_err(dev, "dma_set_mask_and_coherent() failed\n");
20932 +               goto err_dma_mask;
20933 +       }
20934 +
20935 +       /* Obtain a MC portal */
20936 +       err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
20937 +       if (err) {
20938 +               dev_err(dev, "MC portal allocation failed\n");
20939 +               goto err_dma_mask;
20940 +       }
20941 +
20942 +       priv->ppriv = alloc_percpu(*priv->ppriv);
20943 +       if (!priv->ppriv) {
20944 +               dev_err(dev, "alloc_percpu() failed\n");
20945 +               goto err_alloc_ppriv;
20946 +       }
20947 +
20948 +       /* DPSECI initialization */
20949 +       err = dpaa2_dpseci_setup(dpseci_dev);
20950 +       if (err < 0) {
20951 +               dev_err(dev, "dpaa2_dpseci_setup() failed\n");
20952 +               goto err_dpseci_setup;
20953 +       }
20954 +
20955 +       /* DPIO */
20956 +       err = dpaa2_dpseci_dpio_setup(priv);
20957 +       if (err) {
20958 +               dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
20959 +               goto err_dpio_setup;
20960 +       }
20961 +
20962 +       /* DPSECI binding to DPIO */
20963 +       err = dpaa2_dpseci_bind(priv);
20964 +       if (err) {
20965 +               dev_err(dev, "dpaa2_dpseci_bind() failed\n");
20966 +               goto err_bind;
20967 +       }
20968 +
20969 +       /* DPSECI enable */
20970 +       err = dpaa2_dpseci_enable(priv);
20971 +       if (err) {
20972 +               dev_err(dev, "dpaa2_dpseci_enable() failed");
20973 +               goto err_bind;
20974 +       }
20975 +
20976 +       /* register crypto algorithms the device supports */
20977 +       INIT_LIST_HEAD(&alg_list);
20978 +       for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
20979 +               struct caam_crypto_alg *t_alg;
20980 +               struct caam_alg_template *alg = driver_algs + i;
20981 +               u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
20982 +
20983 +               /* Skip DES algorithms if not supported by device */
20984 +               if (!priv->sec_attr.des_acc_num &&
20985 +                   ((alg_sel == OP_ALG_ALGSEL_3DES) ||
20986 +                    (alg_sel == OP_ALG_ALGSEL_DES)))
20987 +                       continue;
20988 +
20989 +               /* Skip AES algorithms if not supported by device */
20990 +               if (!priv->sec_attr.aes_acc_num &&
20991 +                   (alg_sel == OP_ALG_ALGSEL_AES))
20992 +                       continue;
20993 +
20994 +               t_alg = caam_alg_alloc(alg);
20995 +               if (IS_ERR(t_alg)) {
20996 +                       err = PTR_ERR(t_alg);
20997 +                       dev_warn(dev, "%s alg allocation failed: %d\n",
20998 +                                alg->driver_name, err);
20999 +                       continue;
21000 +               }
21001 +               t_alg->caam.dev = dev;
21002 +
21003 +               err = crypto_register_alg(&t_alg->crypto_alg);
21004 +               if (err) {
21005 +                       dev_warn(dev, "%s alg registration failed: %d\n",
21006 +                                t_alg->crypto_alg.cra_driver_name, err);
21007 +                       kfree(t_alg);
21008 +                       continue;
21009 +               }
21010 +
21011 +               list_add_tail(&t_alg->entry, &alg_list);
21012 +               registered = true;
21013 +       }
21014 +
21015 +       for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
21016 +               struct caam_aead_alg *t_alg = driver_aeads + i;
21017 +               u32 c1_alg_sel = t_alg->caam.class1_alg_type &
21018 +                                OP_ALG_ALGSEL_MASK;
21019 +               u32 c2_alg_sel = t_alg->caam.class2_alg_type &
21020 +                                OP_ALG_ALGSEL_MASK;
21021 +
21022 +               /* Skip DES algorithms if not supported by device */
21023 +               if (!priv->sec_attr.des_acc_num &&
21024 +                   ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
21025 +                    (c1_alg_sel == OP_ALG_ALGSEL_DES)))
21026 +                       continue;
21027 +
21028 +               /* Skip AES algorithms if not supported by device */
21029 +               if (!priv->sec_attr.aes_acc_num &&
21030 +                   (c1_alg_sel == OP_ALG_ALGSEL_AES))
21031 +                       continue;
21032 +
21033 +               /*
21034 +                * Skip algorithms requiring message digests
21035 +                * if MD not supported by device.
21036 +                */
21037 +               if (!priv->sec_attr.md_acc_num && c2_alg_sel)
21038 +                       continue;
21039 +
21040 +               t_alg->caam.dev = dev;
21041 +               caam_aead_alg_init(t_alg);
21042 +
21043 +               err = crypto_register_aead(&t_alg->aead);
21044 +               if (err) {
21045 +                       dev_warn(dev, "%s alg registration failed: %d\n",
21046 +                                t_alg->aead.base.cra_driver_name, err);
21047 +                       continue;
21048 +               }
21049 +
21050 +               t_alg->registered = true;
21051 +               registered = true;
21052 +       }
21053 +       if (registered)
21054 +               dev_info(dev, "algorithms registered in /proc/crypto\n");
21055 +
21056 +       /* register hash algorithms the device supports */
21057 +       INIT_LIST_HEAD(&hash_list);
21058 +
21059 +       /*
21060 +        * Skip registration of any hashing algorithms if MD block
21061 +        * is not present.
21062 +        */
21063 +       if (!priv->sec_attr.md_acc_num)
21064 +               return 0;
21065 +
21066 +       for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
21067 +               struct caam_hash_alg *t_alg;
21068 +               struct caam_hash_template *alg = driver_hash + i;
21069 +
21070 +               /* register hmac version */
21071 +               t_alg = caam_hash_alloc(dev, alg, true);
21072 +               if (IS_ERR(t_alg)) {
21073 +                       err = PTR_ERR(t_alg);
21074 +                       dev_warn(dev, "%s hash alg allocation failed: %d\n",
21075 +                                alg->driver_name, err);
21076 +                       continue;
21077 +               }
21078 +
21079 +               err = crypto_register_ahash(&t_alg->ahash_alg);
21080 +               if (err) {
21081 +                       dev_warn(dev, "%s alg registration failed: %d\n",
21082 +                                t_alg->ahash_alg.halg.base.cra_driver_name,
21083 +                                err);
21084 +                       kfree(t_alg);
21085 +               } else {
21086 +                       list_add_tail(&t_alg->entry, &hash_list);
21087 +               }
21088 +
21089 +               /* register unkeyed version */
21090 +               t_alg = caam_hash_alloc(dev, alg, false);
21091 +               if (IS_ERR(t_alg)) {
21092 +                       err = PTR_ERR(t_alg);
21093 +                       dev_warn(dev, "%s alg allocation failed: %d\n",
21094 +                                alg->driver_name, err);
21095 +                       continue;
21096 +               }
21097 +
21098 +               err = crypto_register_ahash(&t_alg->ahash_alg);
21099 +               if (err) {
21100 +                       dev_warn(dev, "%s alg registration failed: %d\n",
21101 +                                t_alg->ahash_alg.halg.base.cra_driver_name,
21102 +                                err);
21103 +                       kfree(t_alg);
21104 +               } else {
21105 +                       list_add_tail(&t_alg->entry, &hash_list);
21106 +               }
21107 +       }
21108 +       if (!list_empty(&hash_list))
21109 +               dev_info(dev, "hash algorithms registered in /proc/crypto\n");
21110 +
21111 +       return err;
21112 +
21113 +err_bind:
21114 +       dpaa2_dpseci_dpio_free(priv);
21115 +err_dpio_setup:
21116 +       dpaa2_dpseci_free(priv);
21117 +err_dpseci_setup:
21118 +       free_percpu(priv->ppriv);
21119 +err_alloc_ppriv:
21120 +       fsl_mc_portal_free(priv->mc_io);
21121 +err_dma_mask:
21122 +       kmem_cache_destroy(qi_cache);
21123 +err_qicache:
21124 +       dev_set_drvdata(dev, NULL);
21125 +
21126 +       return err;
21127 +}
21128 +
21129 +static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
21130 +{
21131 +       struct device *dev;
21132 +       struct dpaa2_caam_priv *priv;
21133 +       int i;
21134 +
21135 +       dev = &ls_dev->dev;
21136 +       priv = dev_get_drvdata(dev);
21137 +
21138 +       for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
21139 +               struct caam_aead_alg *t_alg = driver_aeads + i;
21140 +
21141 +               if (t_alg->registered)
21142 +                       crypto_unregister_aead(&t_alg->aead);
21143 +       }
21144 +
21145 +       if (alg_list.next) {
21146 +               struct caam_crypto_alg *t_alg, *n;
21147 +
21148 +               list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
21149 +                       crypto_unregister_alg(&t_alg->crypto_alg);
21150 +                       list_del(&t_alg->entry);
21151 +                       kfree(t_alg);
21152 +               }
21153 +       }
21154 +
21155 +       if (hash_list.next) {
21156 +               struct caam_hash_alg *t_hash_alg, *p;
21157 +
21158 +               list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
21159 +                       crypto_unregister_ahash(&t_hash_alg->ahash_alg);
21160 +                       list_del(&t_hash_alg->entry);
21161 +                       kfree(t_hash_alg);
21162 +               }
21163 +       }
21164 +
21165 +       dpaa2_dpseci_disable(priv);
21166 +       dpaa2_dpseci_dpio_free(priv);
21167 +       dpaa2_dpseci_free(priv);
21168 +       free_percpu(priv->ppriv);
21169 +       fsl_mc_portal_free(priv->mc_io);
21170 +       dev_set_drvdata(dev, NULL);
21171 +       kmem_cache_destroy(qi_cache);
21172 +
21173 +       return 0;
21174 +}
21175 +
21176 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
21177 +{
21178 +       struct dpaa2_fd fd;
21179 +       struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
21180 +       int err = 0, i, id;
21181 +
21182 +       if (IS_ERR(req))
21183 +               return PTR_ERR(req);
21184 +
21185 +       if (priv->cscn_mem) {
21186 +               dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
21187 +                                       DPAA2_CSCN_SIZE,
21188 +                                       DMA_FROM_DEVICE);
21189 +               if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
21190 +                       dev_dbg_ratelimited(dev, "Dropping request\n");
21191 +                       return -EBUSY;
21192 +               }
21193 +       }
21194 +
21195 +       dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
21196 +
21197 +       req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
21198 +                                        DMA_BIDIRECTIONAL);
21199 +       if (dma_mapping_error(dev, req->fd_flt_dma)) {
21200 +               dev_err(dev, "DMA mapping error for QI enqueue request\n");
21201 +               goto err_out;
21202 +       }
21203 +
21204 +       memset(&fd, 0, sizeof(fd));
21205 +       dpaa2_fd_set_format(&fd, dpaa2_fd_list);
21206 +       dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
21207 +       dpaa2_fd_set_len(&fd, req->fd_flt[1].len);
21208 +       dpaa2_fd_set_flc(&fd, req->flc_dma);
21209 +
21210 +       /*
21211 +        * There is no guarantee that preemption is disabled here,
21212 +        * thus take action.
21213 +        */
21214 +       preempt_disable();
21215 +       id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
21216 +       for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
21217 +               err = dpaa2_io_service_enqueue_fq(NULL,
21218 +                                                 priv->tx_queue_attr[id].fqid,
21219 +                                                 &fd);
21220 +               if (err != -EBUSY)
21221 +                       break;
21222 +       }
21223 +       preempt_enable();
21224 +
21225 +       if (unlikely(err < 0)) {
21226 +               dev_err(dev, "Error enqueuing frame: %d\n", err);
21227 +               goto err_out;
21228 +       }
21229 +
21230 +       return -EINPROGRESS;
21231 +
21232 +err_out:
21233 +       dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
21234 +                        DMA_BIDIRECTIONAL);
21235 +       return -EIO;
21236 +}
21237 +EXPORT_SYMBOL(dpaa2_caam_enqueue);
21238 +
21239 +const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
21240 +       {
21241 +               .vendor = FSL_MC_VENDOR_FREESCALE,
21242 +               .obj_type = "dpseci",
21243 +       },
21244 +       { .vendor = 0x0 }
21245 +};
21246 +
21247 +static struct fsl_mc_driver dpaa2_caam_driver = {
21248 +       .driver = {
21249 +               .name           = KBUILD_MODNAME,
21250 +               .owner          = THIS_MODULE,
21251 +       },
21252 +       .probe          = dpaa2_caam_probe,
21253 +       .remove         = dpaa2_caam_remove,
21254 +       .match_id_table = dpaa2_caam_match_id_table
21255 +};
21256 +
21257 +MODULE_LICENSE("Dual BSD/GPL");
21258 +MODULE_AUTHOR("Freescale Semiconductor, Inc");
21259 +MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
21260 +
21261 +module_fsl_mc_driver(dpaa2_caam_driver);
21262 --- /dev/null
21263 +++ b/drivers/crypto/caam/caamalg_qi2.h
21264 @@ -0,0 +1,281 @@
21265 +/*
21266 + * Copyright 2015-2016 Freescale Semiconductor Inc.
21267 + * Copyright 2017 NXP
21268 + *
21269 + * Redistribution and use in source and binary forms, with or without
21270 + * modification, are permitted provided that the following conditions are met:
21271 + *     * Redistributions of source code must retain the above copyright
21272 + *      notice, this list of conditions and the following disclaimer.
21273 + *     * Redistributions in binary form must reproduce the above copyright
21274 + *      notice, this list of conditions and the following disclaimer in the
21275 + *      documentation and/or other materials provided with the distribution.
21276 + *     * Neither the names of the above-listed copyright holders nor the
21277 + *      names of any contributors may be used to endorse or promote products
21278 + *      derived from this software without specific prior written permission.
21279 + *
21280 + *
21281 + * ALTERNATIVELY, this software may be distributed under the terms of the
21282 + * GNU General Public License ("GPL") as published by the Free Software
21283 + * Foundation, either version 2 of that License or (at your option) any
21284 + * later version.
21285 + *
21286 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21287 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21288 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21289 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
21290 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21291 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21292 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21293 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21294 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21295 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
21296 + * POSSIBILITY OF SUCH DAMAGE.
21297 + */
21298 +
21299 +#ifndef _CAAMALG_QI2_H_
21300 +#define _CAAMALG_QI2_H_
21301 +
21302 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
21303 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
21304 +#include <linux/threads.h>
21305 +#include "dpseci.h"
21306 +#include "desc_constr.h"
21307 +
21308 +#define DPAA2_CAAM_STORE_SIZE  16
21309 +/* NAPI weight *must* be a multiple of the store size. */
21310 +#define DPAA2_CAAM_NAPI_WEIGHT 64
21311 +
21312 +/* The congestion entrance threshold was chosen so that on LS2088
21313 + * we support the maximum throughput for the available memory
21314 + */
21315 +#define DPAA2_SEC_CONG_ENTRY_THRESH    (128 * 1024 * 1024)
21316 +#define DPAA2_SEC_CONG_EXIT_THRESH     (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10)
21317 +
21318 +/**
21319 + * dpaa2_caam_priv - driver private data
21320 + * @dpseci_id: DPSECI object unique ID
21321 + * @major_ver: DPSECI major version
21322 + * @minor_ver: DPSECI minor version
21323 + * @dpseci_attr: DPSECI attributes
21324 + * @sec_attr: SEC engine attributes
21325 + * @rx_queue_attr: array of Rx queue attributes
21326 + * @tx_queue_attr: array of Tx queue attributes
21327 + * @cscn_mem: pointer to memory region containing the
21328 + *     dpaa2_cscn struct; it's size is larger than
21329 + *     sizeof(struct dpaa2_cscn) to accommodate alignment
21330 + * @cscn_mem_aligned: pointer to struct dpaa2_cscn; it is computed
21331 + *     as PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
21332 + * @cscn_dma: dma address used by the QMAN to write CSCN messages
21333 + * @dev: device associated with the DPSECI object
21334 + * @mc_io: pointer to MC portal's I/O object
21335 + * @domain: IOMMU domain
21336 + * @ppriv: per CPU pointers to privata data
21337 + */
21338 +struct dpaa2_caam_priv {
21339 +       int dpsec_id;
21340 +
21341 +       u16 major_ver;
21342 +       u16 minor_ver;
21343 +
21344 +       struct dpseci_attr dpseci_attr;
21345 +       struct dpseci_sec_attr sec_attr;
21346 +       struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_PRIO_NUM];
21347 +       struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_PRIO_NUM];
21348 +       int num_pairs;
21349 +
21350 +       /* congestion */
21351 +       void *cscn_mem;
21352 +       void *cscn_mem_aligned;
21353 +       dma_addr_t cscn_dma;
21354 +
21355 +       struct device *dev;
21356 +       struct fsl_mc_io *mc_io;
21357 +       struct iommu_domain *domain;
21358 +
21359 +       struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
21360 +};
21361 +
21362 +/**
21363 + * dpaa2_caam_priv_per_cpu - per CPU private data
21364 + * @napi: napi structure
21365 + * @net_dev: netdev used by napi
21366 + * @req_fqid: (virtual) request (Tx / enqueue) FQID
21367 + * @rsp_fqid: (virtual) response (Rx / dequeue) FQID
21368 + * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr
21369 + * @nctx: notification context of response FQ
21370 + * @store: where dequeued frames are stored
21371 + * @priv: backpointer to dpaa2_caam_priv
21372 + */
21373 +struct dpaa2_caam_priv_per_cpu {
21374 +       struct napi_struct napi;
21375 +       struct net_device net_dev;
21376 +       int req_fqid;
21377 +       int rsp_fqid;
21378 +       int prio;
21379 +       struct dpaa2_io_notification_ctx nctx;
21380 +       struct dpaa2_io_store *store;
21381 +       struct dpaa2_caam_priv *priv;
21382 +};
21383 +
21384 +/*
21385 + * The CAAM QI hardware constructs a job descriptor which points
21386 + * to shared descriptor (as pointed by context_a of FQ to CAAM).
21387 + * When the job descriptor is executed by deco, the whole job
21388 + * descriptor together with shared descriptor gets loaded in
21389 + * deco buffer which is 64 words long (each 32-bit).
21390 + *
21391 + * The job descriptor constructed by QI hardware has layout:
21392 + *
21393 + *     HEADER          (1 word)
21394 + *     Shdesc ptr      (1 or 2 words)
21395 + *     SEQ_OUT_PTR     (1 word)
21396 + *     Out ptr         (1 or 2 words)
21397 + *     Out length      (1 word)
21398 + *     SEQ_IN_PTR      (1 word)
21399 + *     In ptr          (1 or 2 words)
21400 + *     In length       (1 word)
21401 + *
21402 + * The shdesc ptr is used to fetch shared descriptor contents
21403 + * into deco buffer.
21404 + *
21405 + * Apart from shdesc contents, the total number of words that
21406 + * get loaded in deco buffer are '8' or '11'. The remaining words
21407 + * in deco buffer can be used for storing shared descriptor.
21408 + */
21409 +#define MAX_SDLEN      ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
21410 +
21411 +/* Length of a single buffer in the QI driver memory cache */
21412 +#define CAAM_QI_MEMCACHE_SIZE  512
21413 +
21414 +/*
21415 + * aead_edesc - s/w-extended aead descriptor
21416 + * @src_nents: number of segments in input scatterlist
21417 + * @dst_nents: number of segments in output scatterlist
21418 + * @iv_dma: dma address of iv for checking continuity and link table
21419 + * @qm_sg_bytes: length of dma mapped h/w link table
21420 + * @qm_sg_dma: bus physical mapped address of h/w link table
21421 + * @assoclen_dma: bus physical mapped address of req->assoclen
21422 + * @sgt: the h/w link table
21423 + */
21424 +struct aead_edesc {
21425 +       int src_nents;
21426 +       int dst_nents;
21427 +       dma_addr_t iv_dma;
21428 +       int qm_sg_bytes;
21429 +       dma_addr_t qm_sg_dma;
21430 +       dma_addr_t assoclen_dma;
21431 +#define CAAM_QI_MAX_AEAD_SG                                            \
21432 +       ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) /   \
21433 +        sizeof(struct dpaa2_sg_entry))
21434 +       struct dpaa2_sg_entry sgt[0];
21435 +};
21436 +
21437 +/*
21438 + * tls_edesc - s/w-extended tls descriptor
21439 + * @src_nents: number of segments in input scatterlist
21440 + * @dst_nents: number of segments in output scatterlist
21441 + * @iv_dma: dma address of iv for checking continuity and link table
21442 + * @qm_sg_bytes: length of dma mapped h/w link table
21443 + * @qm_sg_dma: bus physical mapped address of h/w link table
21444 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
21445 + * @dst: pointer to output scatterlist, usefull for unmapping
21446 + * @sgt: the h/w link table
21447 + */
21448 +struct tls_edesc {
21449 +       int src_nents;
21450 +       int dst_nents;
21451 +       dma_addr_t iv_dma;
21452 +       int qm_sg_bytes;
21453 +       dma_addr_t qm_sg_dma;
21454 +       struct scatterlist tmp[2];
21455 +       struct scatterlist *dst;
21456 +       struct dpaa2_sg_entry sgt[0];
21457 +};
21458 +
21459 +/*
21460 + * ablkcipher_edesc - s/w-extended ablkcipher descriptor
21461 + * @src_nents: number of segments in input scatterlist
21462 + * @dst_nents: number of segments in output scatterlist
21463 + * @iv_dma: dma address of iv for checking continuity and link table
21464 + * @qm_sg_bytes: length of dma mapped qm_sg space
21465 + * @qm_sg_dma: I/O virtual address of h/w link table
21466 + * @sgt: the h/w link table
21467 + */
21468 +struct ablkcipher_edesc {
21469 +       int src_nents;
21470 +       int dst_nents;
21471 +       dma_addr_t iv_dma;
21472 +       int qm_sg_bytes;
21473 +       dma_addr_t qm_sg_dma;
21474 +#define CAAM_QI_MAX_ABLKCIPHER_SG                                          \
21475 +       ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
21476 +        sizeof(struct dpaa2_sg_entry))
21477 +       struct dpaa2_sg_entry sgt[0];
21478 +};
21479 +
21480 +/*
21481 + * ahash_edesc - s/w-extended ahash descriptor
21482 + * @dst_dma: I/O virtual address of req->result
21483 + * @qm_sg_dma: I/O virtual address of h/w link table
21484 + * @src_nents: number of segments in input scatterlist
21485 + * @qm_sg_bytes: length of dma mapped qm_sg space
21486 + * @sgt: pointer to h/w link table
21487 + */
21488 +struct ahash_edesc {
21489 +       dma_addr_t dst_dma;
21490 +       dma_addr_t qm_sg_dma;
21491 +       int src_nents;
21492 +       int qm_sg_bytes;
21493 +       struct dpaa2_sg_entry sgt[0];
21494 +};
21495 +
21496 +/**
21497 + * caam_flc - Flow Context (FLC)
21498 + * @flc: Flow Context options
21499 + * @sh_desc: Shared Descriptor
21500 + */
21501 +struct caam_flc {
21502 +       u32 flc[16];
21503 +       u32 sh_desc[MAX_SDLEN];
21504 +} ____cacheline_aligned;
21505 +
21506 +enum optype {
21507 +       ENCRYPT = 0,
21508 +       DECRYPT,
21509 +       GIVENCRYPT,
21510 +       NUM_OP
21511 +};
21512 +
21513 +/**
21514 + * caam_request - the request structure the driver application should fill while
21515 + *                submitting a job to driver.
21516 + * @fd_flt: Frame list table defining input and output
21517 + *          fd_flt[0] - FLE pointing to output buffer
21518 + *          fd_flt[1] - FLE pointing to input buffer
21519 + * @fd_flt_dma: DMA address for the frame list table
21520 + * @flc: Flow Context
21521 + * @flc_dma: I/O virtual address of Flow Context
21522 + * @op_type: operation type
21523 + * @cbk: Callback function to invoke when job is completed
21524 + * @ctx: arbit context attached with request by the application
21525 + * @edesc: extended descriptor; points to one of {ablkcipher,aead}_edesc
21526 + */
21527 +struct caam_request {
21528 +       struct dpaa2_fl_entry fd_flt[2];
21529 +       dma_addr_t fd_flt_dma;
21530 +       struct caam_flc *flc;
21531 +       dma_addr_t flc_dma;
21532 +       enum optype op_type;
21533 +       void (*cbk)(void *ctx, u32 err);
21534 +       void *ctx;
21535 +       void *edesc;
21536 +};
21537 +
21538 +/**
21539 + * dpaa2_caam_enqueue() - enqueue a crypto request
21540 + * @dev: device associated with the DPSECI object
21541 + * @req: pointer to caam_request
21542 + */
21543 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
21544 +
21545 +#endif /* _CAAMALG_QI2_H_ */
21546 --- a/drivers/crypto/caam/caamhash.c
21547 +++ b/drivers/crypto/caam/caamhash.c
21548 @@ -62,6 +62,7 @@
21549  #include "error.h"
21550  #include "sg_sw_sec4.h"
21551  #include "key_gen.h"
21552 +#include "caamhash_desc.h"
21553  
21554  #define CAAM_CRA_PRIORITY              3000
21555  
21556 @@ -71,14 +72,6 @@
21557  #define CAAM_MAX_HASH_BLOCK_SIZE       SHA512_BLOCK_SIZE
21558  #define CAAM_MAX_HASH_DIGEST_SIZE      SHA512_DIGEST_SIZE
21559  
21560 -/* length of descriptors text */
21561 -#define DESC_AHASH_BASE                        (4 * CAAM_CMD_SZ)
21562 -#define DESC_AHASH_UPDATE_LEN          (6 * CAAM_CMD_SZ)
21563 -#define DESC_AHASH_UPDATE_FIRST_LEN    (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
21564 -#define DESC_AHASH_FINAL_LEN           (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
21565 -#define DESC_AHASH_FINUP_LEN           (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
21566 -#define DESC_AHASH_DIGEST_LEN          (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
21567 -
21568  #define DESC_HASH_MAX_USED_BYTES       (DESC_AHASH_FINAL_LEN + \
21569                                          CAAM_MAX_HASH_KEY_SIZE)
21570  #define DESC_HASH_MAX_USED_LEN         (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
21571 @@ -103,20 +96,14 @@ struct caam_hash_ctx {
21572         u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
21573         u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
21574         u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
21575 -       u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
21576         dma_addr_t sh_desc_update_dma ____cacheline_aligned;
21577         dma_addr_t sh_desc_update_first_dma;
21578         dma_addr_t sh_desc_fin_dma;
21579         dma_addr_t sh_desc_digest_dma;
21580 -       dma_addr_t sh_desc_finup_dma;
21581         struct device *jrdev;
21582 -       u32 alg_type;
21583 -       u32 alg_op;
21584         u8 key[CAAM_MAX_HASH_KEY_SIZE];
21585 -       dma_addr_t key_dma;
21586         int ctx_len;
21587 -       unsigned int split_key_len;
21588 -       unsigned int split_key_pad_len;
21589 +       struct alginfo adata;
21590  };
21591  
21592  /* ahash state */
21593 @@ -143,6 +130,31 @@ struct caam_export_state {
21594         int (*finup)(struct ahash_request *req);
21595  };
21596  
21597 +static inline void switch_buf(struct caam_hash_state *state)
21598 +{
21599 +       state->current_buf ^= 1;
21600 +}
21601 +
21602 +static inline u8 *current_buf(struct caam_hash_state *state)
21603 +{
21604 +       return state->current_buf ? state->buf_1 : state->buf_0;
21605 +}
21606 +
21607 +static inline u8 *alt_buf(struct caam_hash_state *state)
21608 +{
21609 +       return state->current_buf ? state->buf_0 : state->buf_1;
21610 +}
21611 +
21612 +static inline int *current_buflen(struct caam_hash_state *state)
21613 +{
21614 +       return state->current_buf ? &state->buflen_1 : &state->buflen_0;
21615 +}
21616 +
21617 +static inline int *alt_buflen(struct caam_hash_state *state)
21618 +{
21619 +       return state->current_buf ? &state->buflen_0 : &state->buflen_1;
21620 +}
21621 +
21622  /* Common job descriptor seq in/out ptr routines */
21623  
21624  /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
21625 @@ -175,40 +187,31 @@ static inline dma_addr_t map_seq_out_ptr
21626         return dst_dma;
21627  }
21628  
21629 -/* Map current buffer in state and put it in link table */
21630 -static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
21631 -                                           struct sec4_sg_entry *sec4_sg,
21632 -                                           u8 *buf, int buflen)
21633 +/* Map current buffer in state (if length > 0) and put it in link table */
21634 +static inline int buf_map_to_sec4_sg(struct device *jrdev,
21635 +                                    struct sec4_sg_entry *sec4_sg,
21636 +                                    struct caam_hash_state *state)
21637  {
21638 -       dma_addr_t buf_dma;
21639 +       int buflen = *current_buflen(state);
21640  
21641 -       buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
21642 -       dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
21643 +       if (!buflen)
21644 +               return 0;
21645  
21646 -       return buf_dma;
21647 -}
21648 +       state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
21649 +                                       DMA_TO_DEVICE);
21650 +       if (dma_mapping_error(jrdev, state->buf_dma)) {
21651 +               dev_err(jrdev, "unable to map buf\n");
21652 +               state->buf_dma = 0;
21653 +               return -ENOMEM;
21654 +       }
21655  
21656 -/*
21657 - * Only put buffer in link table if it contains data, which is possible,
21658 - * since a buffer has previously been used, and needs to be unmapped,
21659 - */
21660 -static inline dma_addr_t
21661 -try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
21662 -                      u8 *buf, dma_addr_t buf_dma, int buflen,
21663 -                      int last_buflen)
21664 -{
21665 -       if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
21666 -               dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
21667 -       if (buflen)
21668 -               buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
21669 -       else
21670 -               buf_dma = 0;
21671 +       dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
21672  
21673 -       return buf_dma;
21674 +       return 0;
21675  }
21676  
21677  /* Map state->caam_ctx, and add it to link table */
21678 -static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
21679 +static inline int ctx_map_to_sec4_sg(struct device *jrdev,
21680                                      struct caam_hash_state *state, int ctx_len,
21681                                      struct sec4_sg_entry *sec4_sg, u32 flag)
21682  {
21683 @@ -224,124 +227,22 @@ static inline int ctx_map_to_sec4_sg(u32
21684         return 0;
21685  }
21686  
21687 -/* Common shared descriptor commands */
21688 -static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
21689 -{
21690 -       append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
21691 -                         ctx->split_key_len, CLASS_2 |
21692 -                         KEY_DEST_MDHA_SPLIT | KEY_ENC);
21693 -}
21694 -
21695 -/* Append key if it has been set */
21696 -static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
21697 -{
21698 -       u32 *key_jump_cmd;
21699 -
21700 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
21701 -
21702 -       if (ctx->split_key_len) {
21703 -               /* Skip if already shared */
21704 -               key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
21705 -                                          JUMP_COND_SHRD);
21706 -
21707 -               append_key_ahash(desc, ctx);
21708 -
21709 -               set_jump_tgt_here(desc, key_jump_cmd);
21710 -       }
21711 -
21712 -       /* Propagate errors from shared to job descriptor */
21713 -       append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
21714 -}
21715 -
21716 -/*
21717 - * For ahash read data from seqin following state->caam_ctx,
21718 - * and write resulting class2 context to seqout, which may be state->caam_ctx
21719 - * or req->result
21720 - */
21721 -static inline void ahash_append_load_str(u32 *desc, int digestsize)
21722 -{
21723 -       /* Calculate remaining bytes to read */
21724 -       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
21725 -
21726 -       /* Read remaining bytes */
21727 -       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
21728 -                            FIFOLD_TYPE_MSG | KEY_VLF);
21729 -
21730 -       /* Store class2 context bytes */
21731 -       append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
21732 -                        LDST_SRCDST_BYTE_CONTEXT);
21733 -}
21734 -
21735 -/*
21736 - * For ahash update, final and finup, import context, read and write to seqout
21737 - */
21738 -static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
21739 -                                        int digestsize,
21740 -                                        struct caam_hash_ctx *ctx)
21741 -{
21742 -       init_sh_desc_key_ahash(desc, ctx);
21743 -
21744 -       /* Import context from software */
21745 -       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
21746 -                  LDST_CLASS_2_CCB | ctx->ctx_len);
21747 -
21748 -       /* Class 2 operation */
21749 -       append_operation(desc, op | state | OP_ALG_ENCRYPT);
21750 -
21751 -       /*
21752 -        * Load from buf and/or src and write to req->result or state->context
21753 -        */
21754 -       ahash_append_load_str(desc, digestsize);
21755 -}
21756 -
21757 -/* For ahash firsts and digest, read and write to seqout */
21758 -static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
21759 -                                    int digestsize, struct caam_hash_ctx *ctx)
21760 -{
21761 -       init_sh_desc_key_ahash(desc, ctx);
21762 -
21763 -       /* Class 2 operation */
21764 -       append_operation(desc, op | state | OP_ALG_ENCRYPT);
21765 -
21766 -       /*
21767 -        * Load from buf and/or src and write to req->result or state->context
21768 -        */
21769 -       ahash_append_load_str(desc, digestsize);
21770 -}
21771 -
21772  static int ahash_set_sh_desc(struct crypto_ahash *ahash)
21773  {
21774         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
21775         int digestsize = crypto_ahash_digestsize(ahash);
21776         struct device *jrdev = ctx->jrdev;
21777 -       u32 have_key = 0;
21778 +       struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
21779         u32 *desc;
21780  
21781 -       if (ctx->split_key_len)
21782 -               have_key = OP_ALG_AAI_HMAC_PRECOMP;
21783 +       ctx->adata.key_virt = ctx->key;
21784  
21785         /* ahash_update shared descriptor */
21786         desc = ctx->sh_desc_update;
21787 -
21788 -       init_sh_desc(desc, HDR_SHARE_SERIAL);
21789 -
21790 -       /* Import context from software */
21791 -       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
21792 -                  LDST_CLASS_2_CCB | ctx->ctx_len);
21793 -
21794 -       /* Class 2 operation */
21795 -       append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
21796 -                        OP_ALG_ENCRYPT);
21797 -
21798 -       /* Load data and write to result or context */
21799 -       ahash_append_load_str(desc, ctx->ctx_len);
21800 -
21801 -       ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
21802 -                                                DMA_TO_DEVICE);
21803 -       if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
21804 -               dev_err(jrdev, "unable to map shared descriptor\n");
21805 -               return -ENOMEM;
21806 -       }
21807 +       cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
21808 +                         ctx->ctx_len, true, ctrlpriv->era);
21809 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
21810 +                                  desc_bytes(desc), DMA_TO_DEVICE);
21811  #ifdef DEBUG
21812         print_hex_dump(KERN_ERR,
21813                        "ahash update shdesc@"__stringify(__LINE__)": ",
21814 @@ -350,17 +251,10 @@ static int ahash_set_sh_desc(struct cryp
21815  
21816         /* ahash_update_first shared descriptor */
21817         desc = ctx->sh_desc_update_first;
21818 -
21819 -       ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
21820 -                         ctx->ctx_len, ctx);
21821 -
21822 -       ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
21823 -                                                      desc_bytes(desc),
21824 -                                                      DMA_TO_DEVICE);
21825 -       if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
21826 -               dev_err(jrdev, "unable to map shared descriptor\n");
21827 -               return -ENOMEM;
21828 -       }
21829 +       cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
21830 +                         ctx->ctx_len, false, ctrlpriv->era);
21831 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
21832 +                                  desc_bytes(desc), DMA_TO_DEVICE);
21833  #ifdef DEBUG
21834         print_hex_dump(KERN_ERR,
21835                        "ahash update first shdesc@"__stringify(__LINE__)": ",
21836 @@ -369,53 +263,22 @@ static int ahash_set_sh_desc(struct cryp
21837  
21838         /* ahash_final shared descriptor */
21839         desc = ctx->sh_desc_fin;
21840 -
21841 -       ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
21842 -                             OP_ALG_AS_FINALIZE, digestsize, ctx);
21843 -
21844 -       ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
21845 -                                             DMA_TO_DEVICE);
21846 -       if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
21847 -               dev_err(jrdev, "unable to map shared descriptor\n");
21848 -               return -ENOMEM;
21849 -       }
21850 +       cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
21851 +                         ctx->ctx_len, true, ctrlpriv->era);
21852 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
21853 +                                  desc_bytes(desc), DMA_TO_DEVICE);
21854  #ifdef DEBUG
21855         print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
21856                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
21857                        desc_bytes(desc), 1);
21858  #endif
21859  
21860 -       /* ahash_finup shared descriptor */
21861 -       desc = ctx->sh_desc_finup;
21862 -
21863 -       ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
21864 -                             OP_ALG_AS_FINALIZE, digestsize, ctx);
21865 -
21866 -       ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
21867 -                                               DMA_TO_DEVICE);
21868 -       if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
21869 -               dev_err(jrdev, "unable to map shared descriptor\n");
21870 -               return -ENOMEM;
21871 -       }
21872 -#ifdef DEBUG
21873 -       print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
21874 -                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
21875 -                      desc_bytes(desc), 1);
21876 -#endif
21877 -
21878         /* ahash_digest shared descriptor */
21879         desc = ctx->sh_desc_digest;
21880 -
21881 -       ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
21882 -                         digestsize, ctx);
21883 -
21884 -       ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
21885 -                                                desc_bytes(desc),
21886 -                                                DMA_TO_DEVICE);
21887 -       if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
21888 -               dev_err(jrdev, "unable to map shared descriptor\n");
21889 -               return -ENOMEM;
21890 -       }
21891 +       cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
21892 +                         ctx->ctx_len, false, ctrlpriv->era);
21893 +       dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
21894 +                                  desc_bytes(desc), DMA_TO_DEVICE);
21895  #ifdef DEBUG
21896         print_hex_dump(KERN_ERR,
21897                        "ahash digest shdesc@"__stringify(__LINE__)": ",
21898 @@ -426,14 +289,6 @@ static int ahash_set_sh_desc(struct cryp
21899         return 0;
21900  }
21901  
21902 -static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
21903 -                             u32 keylen)
21904 -{
21905 -       return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
21906 -                              ctx->split_key_pad_len, key_in, keylen,
21907 -                              ctx->alg_op);
21908 -}
21909 -
21910  /* Digest hash size if it is too large */
21911  static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
21912                            u32 *keylen, u8 *key_out, u32 digestsize)
21913 @@ -469,7 +324,7 @@ static int hash_digest_key(struct caam_h
21914         }
21915  
21916         /* Job descriptor to perform unkeyed hash on key_in */
21917 -       append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
21918 +       append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
21919                          OP_ALG_AS_INITFINAL);
21920         append_seq_in_ptr(desc, src_dma, *keylen, 0);
21921         append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
21922 @@ -513,12 +368,10 @@ static int hash_digest_key(struct caam_h
21923  static int ahash_setkey(struct crypto_ahash *ahash,
21924                         const u8 *key, unsigned int keylen)
21925  {
21926 -       /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
21927 -       static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
21928         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
21929 -       struct device *jrdev = ctx->jrdev;
21930         int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
21931         int digestsize = crypto_ahash_digestsize(ahash);
21932 +       struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
21933         int ret;
21934         u8 *hashed_key = NULL;
21935  
21936 @@ -539,43 +392,29 @@ static int ahash_setkey(struct crypto_ah
21937                 key = hashed_key;
21938         }
21939  
21940 -       /* Pick class 2 key length from algorithm submask */
21941 -       ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
21942 -                                     OP_ALG_ALGSEL_SHIFT] * 2;
21943 -       ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
21944 -
21945 -#ifdef DEBUG
21946 -       printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
21947 -              ctx->split_key_len, ctx->split_key_pad_len);
21948 -       print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
21949 -                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
21950 -#endif
21951 +       /*
21952 +        * If DKP is supported, use it in the shared descriptor to generate
21953 +        * the split key.
21954 +        */
21955 +       if (ctrlpriv->era >= 6) {
21956 +               ctx->adata.key_inline = true;
21957 +               ctx->adata.keylen = keylen;
21958 +               ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
21959 +                                                     OP_ALG_ALGSEL_MASK);
21960  
21961 -       ret = gen_split_hash_key(ctx, key, keylen);
21962 -       if (ret)
21963 -               goto bad_free_key;
21964 +               if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
21965 +                       goto bad_free_key;
21966  
21967 -       ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
21968 -                                     DMA_TO_DEVICE);
21969 -       if (dma_mapping_error(jrdev, ctx->key_dma)) {
21970 -               dev_err(jrdev, "unable to map key i/o memory\n");
21971 -               ret = -ENOMEM;
21972 -               goto error_free_key;
21973 +               memcpy(ctx->key, key, keylen);
21974 +       } else {
21975 +               ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
21976 +                                   keylen, CAAM_MAX_HASH_KEY_SIZE);
21977 +               if (ret)
21978 +                       goto bad_free_key;
21979         }
21980 -#ifdef DEBUG
21981 -       print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
21982 -                      DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
21983 -                      ctx->split_key_pad_len, 1);
21984 -#endif
21985  
21986 -       ret = ahash_set_sh_desc(ahash);
21987 -       if (ret) {
21988 -               dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
21989 -                                DMA_TO_DEVICE);
21990 -       }
21991 - error_free_key:
21992         kfree(hashed_key);
21993 -       return ret;
21994 +       return ahash_set_sh_desc(ahash);
21995   bad_free_key:
21996         kfree(hashed_key);
21997         crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
21998 @@ -604,6 +443,8 @@ static inline void ahash_unmap(struct de
21999                         struct ahash_edesc *edesc,
22000                         struct ahash_request *req, int dst_len)
22001  {
22002 +       struct caam_hash_state *state = ahash_request_ctx(req);
22003 +
22004         if (edesc->src_nents)
22005                 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
22006         if (edesc->dst_dma)
22007 @@ -612,6 +453,12 @@ static inline void ahash_unmap(struct de
22008         if (edesc->sec4_sg_bytes)
22009                 dma_unmap_single(dev, edesc->sec4_sg_dma,
22010                                  edesc->sec4_sg_bytes, DMA_TO_DEVICE);
22011 +
22012 +       if (state->buf_dma) {
22013 +               dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
22014 +                                DMA_TO_DEVICE);
22015 +               state->buf_dma = 0;
22016 +       }
22017  }
22018  
22019  static inline void ahash_unmap_ctx(struct device *dev,
22020 @@ -643,8 +490,7 @@ static void ahash_done(struct device *jr
22021         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
22022  #endif
22023  
22024 -       edesc = (struct ahash_edesc *)((char *)desc -
22025 -                offsetof(struct ahash_edesc, hw_desc));
22026 +       edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
22027         if (err)
22028                 caam_jr_strstatus(jrdev, err);
22029  
22030 @@ -671,19 +517,19 @@ static void ahash_done_bi(struct device
22031         struct ahash_edesc *edesc;
22032         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
22033         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22034 -#ifdef DEBUG
22035         struct caam_hash_state *state = ahash_request_ctx(req);
22036 +#ifdef DEBUG
22037         int digestsize = crypto_ahash_digestsize(ahash);
22038  
22039         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
22040  #endif
22041  
22042 -       edesc = (struct ahash_edesc *)((char *)desc -
22043 -                offsetof(struct ahash_edesc, hw_desc));
22044 +       edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
22045         if (err)
22046                 caam_jr_strstatus(jrdev, err);
22047  
22048         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
22049 +       switch_buf(state);
22050         kfree(edesc);
22051  
22052  #ifdef DEBUG
22053 @@ -713,8 +559,7 @@ static void ahash_done_ctx_src(struct de
22054         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
22055  #endif
22056  
22057 -       edesc = (struct ahash_edesc *)((char *)desc -
22058 -                offsetof(struct ahash_edesc, hw_desc));
22059 +       edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
22060         if (err)
22061                 caam_jr_strstatus(jrdev, err);
22062  
22063 @@ -741,19 +586,19 @@ static void ahash_done_ctx_dst(struct de
22064         struct ahash_edesc *edesc;
22065         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
22066         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22067 -#ifdef DEBUG
22068         struct caam_hash_state *state = ahash_request_ctx(req);
22069 +#ifdef DEBUG
22070         int digestsize = crypto_ahash_digestsize(ahash);
22071  
22072         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
22073  #endif
22074  
22075 -       edesc = (struct ahash_edesc *)((char *)desc -
22076 -                offsetof(struct ahash_edesc, hw_desc));
22077 +       edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
22078         if (err)
22079                 caam_jr_strstatus(jrdev, err);
22080  
22081         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
22082 +       switch_buf(state);
22083         kfree(edesc);
22084  
22085  #ifdef DEBUG
22086 @@ -835,13 +680,12 @@ static int ahash_update_ctx(struct ahash
22087         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22088         struct caam_hash_state *state = ahash_request_ctx(req);
22089         struct device *jrdev = ctx->jrdev;
22090 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22091 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22092 -       u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
22093 -       int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
22094 -       u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
22095 -       int *next_buflen = state->current_buf ? &state->buflen_0 :
22096 -                          &state->buflen_1, last_buflen;
22097 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22098 +                      GFP_KERNEL : GFP_ATOMIC;
22099 +       u8 *buf = current_buf(state);
22100 +       int *buflen = current_buflen(state);
22101 +       u8 *next_buf = alt_buf(state);
22102 +       int *next_buflen = alt_buflen(state), last_buflen;
22103         int in_len = *buflen + req->nbytes, to_hash;
22104         u32 *desc;
22105         int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
22106 @@ -890,15 +734,14 @@ static int ahash_update_ctx(struct ahash
22107                 edesc->src_nents = src_nents;
22108                 edesc->sec4_sg_bytes = sec4_sg_bytes;
22109  
22110 -               ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
22111 +               ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
22112                                          edesc->sec4_sg, DMA_BIDIRECTIONAL);
22113                 if (ret)
22114                         goto unmap_ctx;
22115  
22116 -               state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
22117 -                                                       edesc->sec4_sg + 1,
22118 -                                                       buf, state->buf_dma,
22119 -                                                       *buflen, last_buflen);
22120 +               ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
22121 +               if (ret)
22122 +                       goto unmap_ctx;
22123  
22124                 if (mapped_nents) {
22125                         sg_to_sec4_sg_last(req->src, mapped_nents,
22126 @@ -909,12 +752,10 @@ static int ahash_update_ctx(struct ahash
22127                                                          to_hash - *buflen,
22128                                                          *next_buflen, 0);
22129                 } else {
22130 -                       (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
22131 -                               cpu_to_caam32(SEC4_SG_LEN_FIN);
22132 +                       sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
22133 +                                           1);
22134                 }
22135  
22136 -               state->current_buf = !state->current_buf;
22137 -
22138                 desc = edesc->hw_desc;
22139  
22140                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
22141 @@ -969,12 +810,9 @@ static int ahash_final_ctx(struct ahash_
22142         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22143         struct caam_hash_state *state = ahash_request_ctx(req);
22144         struct device *jrdev = ctx->jrdev;
22145 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22146 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22147 -       u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
22148 -       int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
22149 -       int last_buflen = state->current_buf ? state->buflen_0 :
22150 -                         state->buflen_1;
22151 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22152 +                      GFP_KERNEL : GFP_ATOMIC;
22153 +       int buflen = *current_buflen(state);
22154         u32 *desc;
22155         int sec4_sg_bytes, sec4_sg_src_index;
22156         int digestsize = crypto_ahash_digestsize(ahash);
22157 @@ -994,18 +832,17 @@ static int ahash_final_ctx(struct ahash_
22158         desc = edesc->hw_desc;
22159  
22160         edesc->sec4_sg_bytes = sec4_sg_bytes;
22161 -       edesc->src_nents = 0;
22162  
22163 -       ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
22164 +       ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
22165                                  edesc->sec4_sg, DMA_TO_DEVICE);
22166         if (ret)
22167                 goto unmap_ctx;
22168  
22169 -       state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
22170 -                                               buf, state->buf_dma, buflen,
22171 -                                               last_buflen);
22172 -       (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
22173 -               cpu_to_caam32(SEC4_SG_LEN_FIN);
22174 +       ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
22175 +       if (ret)
22176 +               goto unmap_ctx;
22177 +
22178 +       sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
22179  
22180         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
22181                                             sec4_sg_bytes, DMA_TO_DEVICE);
22182 @@ -1048,12 +885,9 @@ static int ahash_finup_ctx(struct ahash_
22183         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22184         struct caam_hash_state *state = ahash_request_ctx(req);
22185         struct device *jrdev = ctx->jrdev;
22186 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22187 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22188 -       u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
22189 -       int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
22190 -       int last_buflen = state->current_buf ? state->buflen_0 :
22191 -                         state->buflen_1;
22192 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22193 +                      GFP_KERNEL : GFP_ATOMIC;
22194 +       int buflen = *current_buflen(state);
22195         u32 *desc;
22196         int sec4_sg_src_index;
22197         int src_nents, mapped_nents;
22198 @@ -1082,7 +916,7 @@ static int ahash_finup_ctx(struct ahash_
22199  
22200         /* allocate space for base edesc and hw desc commands, link tables */
22201         edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
22202 -                                 ctx->sh_desc_finup, ctx->sh_desc_finup_dma,
22203 +                                 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
22204                                   flags);
22205         if (!edesc) {
22206                 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
22207 @@ -1093,14 +927,14 @@ static int ahash_finup_ctx(struct ahash_
22208  
22209         edesc->src_nents = src_nents;
22210  
22211 -       ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
22212 +       ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
22213                                  edesc->sec4_sg, DMA_TO_DEVICE);
22214         if (ret)
22215                 goto unmap_ctx;
22216  
22217 -       state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
22218 -                                               buf, state->buf_dma, buflen,
22219 -                                               last_buflen);
22220 +       ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
22221 +       if (ret)
22222 +               goto unmap_ctx;
22223  
22224         ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
22225                                   sec4_sg_src_index, ctx->ctx_len + buflen,
22226 @@ -1136,15 +970,18 @@ static int ahash_digest(struct ahash_req
22227  {
22228         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
22229         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22230 +       struct caam_hash_state *state = ahash_request_ctx(req);
22231         struct device *jrdev = ctx->jrdev;
22232 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22233 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22234 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22235 +                      GFP_KERNEL : GFP_ATOMIC;
22236         u32 *desc;
22237         int digestsize = crypto_ahash_digestsize(ahash);
22238         int src_nents, mapped_nents;
22239         struct ahash_edesc *edesc;
22240         int ret;
22241  
22242 +       state->buf_dma = 0;
22243 +
22244         src_nents = sg_nents_for_len(req->src, req->nbytes);
22245         if (src_nents < 0) {
22246                 dev_err(jrdev, "Invalid number of src SG.\n");
22247 @@ -1215,10 +1052,10 @@ static int ahash_final_no_ctx(struct aha
22248         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22249         struct caam_hash_state *state = ahash_request_ctx(req);
22250         struct device *jrdev = ctx->jrdev;
22251 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22252 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22253 -       u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
22254 -       int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
22255 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22256 +                      GFP_KERNEL : GFP_ATOMIC;
22257 +       u8 *buf = current_buf(state);
22258 +       int buflen = *current_buflen(state);
22259         u32 *desc;
22260         int digestsize = crypto_ahash_digestsize(ahash);
22261         struct ahash_edesc *edesc;
22262 @@ -1246,7 +1083,6 @@ static int ahash_final_no_ctx(struct aha
22263                 dev_err(jrdev, "unable to map dst\n");
22264                 goto unmap;
22265         }
22266 -       edesc->src_nents = 0;
22267  
22268  #ifdef DEBUG
22269         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
22270 @@ -1276,13 +1112,12 @@ static int ahash_update_no_ctx(struct ah
22271         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22272         struct caam_hash_state *state = ahash_request_ctx(req);
22273         struct device *jrdev = ctx->jrdev;
22274 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22275 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22276 -       u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
22277 -       int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
22278 -       u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
22279 -       int *next_buflen = state->current_buf ? &state->buflen_0 :
22280 -                          &state->buflen_1;
22281 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22282 +                      GFP_KERNEL : GFP_ATOMIC;
22283 +       u8 *buf = current_buf(state);
22284 +       int *buflen = current_buflen(state);
22285 +       u8 *next_buf = alt_buf(state);
22286 +       int *next_buflen = alt_buflen(state);
22287         int in_len = *buflen + req->nbytes, to_hash;
22288         int sec4_sg_bytes, src_nents, mapped_nents;
22289         struct ahash_edesc *edesc;
22290 @@ -1329,10 +1164,11 @@ static int ahash_update_no_ctx(struct ah
22291  
22292                 edesc->src_nents = src_nents;
22293                 edesc->sec4_sg_bytes = sec4_sg_bytes;
22294 -               edesc->dst_dma = 0;
22295  
22296 -               state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
22297 -                                                   buf, *buflen);
22298 +               ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
22299 +               if (ret)
22300 +                       goto unmap_ctx;
22301 +
22302                 sg_to_sec4_sg_last(req->src, mapped_nents,
22303                                    edesc->sec4_sg + 1, 0);
22304  
22305 @@ -1342,8 +1178,6 @@ static int ahash_update_no_ctx(struct ah
22306                                                  *next_buflen, 0);
22307                 }
22308  
22309 -               state->current_buf = !state->current_buf;
22310 -
22311                 desc = edesc->hw_desc;
22312  
22313                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
22314 @@ -1403,12 +1237,9 @@ static int ahash_finup_no_ctx(struct aha
22315         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22316         struct caam_hash_state *state = ahash_request_ctx(req);
22317         struct device *jrdev = ctx->jrdev;
22318 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22319 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22320 -       u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
22321 -       int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
22322 -       int last_buflen = state->current_buf ? state->buflen_0 :
22323 -                         state->buflen_1;
22324 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22325 +                      GFP_KERNEL : GFP_ATOMIC;
22326 +       int buflen = *current_buflen(state);
22327         u32 *desc;
22328         int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
22329         int digestsize = crypto_ahash_digestsize(ahash);
22330 @@ -1450,9 +1281,9 @@ static int ahash_finup_no_ctx(struct aha
22331         edesc->src_nents = src_nents;
22332         edesc->sec4_sg_bytes = sec4_sg_bytes;
22333  
22334 -       state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
22335 -                                               state->buf_dma, buflen,
22336 -                                               last_buflen);
22337 +       ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
22338 +       if (ret)
22339 +               goto unmap;
22340  
22341         ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
22342                                   req->nbytes);
22343 @@ -1496,11 +1327,10 @@ static int ahash_update_first(struct aha
22344         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22345         struct caam_hash_state *state = ahash_request_ctx(req);
22346         struct device *jrdev = ctx->jrdev;
22347 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22348 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22349 -       u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
22350 -       int *next_buflen = state->current_buf ?
22351 -               &state->buflen_1 : &state->buflen_0;
22352 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22353 +                      GFP_KERNEL : GFP_ATOMIC;
22354 +       u8 *next_buf = alt_buf(state);
22355 +       int *next_buflen = alt_buflen(state);
22356         int to_hash;
22357         u32 *desc;
22358         int src_nents, mapped_nents;
22359 @@ -1545,7 +1375,6 @@ static int ahash_update_first(struct aha
22360                 }
22361  
22362                 edesc->src_nents = src_nents;
22363 -               edesc->dst_dma = 0;
22364  
22365                 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
22366                                           to_hash);
22367 @@ -1582,6 +1411,7 @@ static int ahash_update_first(struct aha
22368                 state->final = ahash_final_no_ctx;
22369                 scatterwalk_map_and_copy(next_buf, req->src, 0,
22370                                          req->nbytes, 0);
22371 +               switch_buf(state);
22372         }
22373  #ifdef DEBUG
22374         print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
22375 @@ -1688,7 +1518,6 @@ struct caam_hash_template {
22376         unsigned int blocksize;
22377         struct ahash_alg template_ahash;
22378         u32 alg_type;
22379 -       u32 alg_op;
22380  };
22381  
22382  /* ahash descriptors */
22383 @@ -1714,7 +1543,6 @@ static struct caam_hash_template driver_
22384                         },
22385                 },
22386                 .alg_type = OP_ALG_ALGSEL_SHA1,
22387 -               .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
22388         }, {
22389                 .name = "sha224",
22390                 .driver_name = "sha224-caam",
22391 @@ -1736,7 +1564,6 @@ static struct caam_hash_template driver_
22392                         },
22393                 },
22394                 .alg_type = OP_ALG_ALGSEL_SHA224,
22395 -               .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
22396         }, {
22397                 .name = "sha256",
22398                 .driver_name = "sha256-caam",
22399 @@ -1758,7 +1585,6 @@ static struct caam_hash_template driver_
22400                         },
22401                 },
22402                 .alg_type = OP_ALG_ALGSEL_SHA256,
22403 -               .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
22404         }, {
22405                 .name = "sha384",
22406                 .driver_name = "sha384-caam",
22407 @@ -1780,7 +1606,6 @@ static struct caam_hash_template driver_
22408                         },
22409                 },
22410                 .alg_type = OP_ALG_ALGSEL_SHA384,
22411 -               .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
22412         }, {
22413                 .name = "sha512",
22414                 .driver_name = "sha512-caam",
22415 @@ -1802,7 +1627,6 @@ static struct caam_hash_template driver_
22416                         },
22417                 },
22418                 .alg_type = OP_ALG_ALGSEL_SHA512,
22419 -               .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
22420         }, {
22421                 .name = "md5",
22422                 .driver_name = "md5-caam",
22423 @@ -1824,14 +1648,12 @@ static struct caam_hash_template driver_
22424                         },
22425                 },
22426                 .alg_type = OP_ALG_ALGSEL_MD5,
22427 -               .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
22428         },
22429  };
22430  
22431  struct caam_hash_alg {
22432         struct list_head entry;
22433         int alg_type;
22434 -       int alg_op;
22435         struct ahash_alg ahash_alg;
22436  };
22437  
22438 @@ -1853,6 +1675,7 @@ static int caam_hash_cra_init(struct cry
22439                                          HASH_MSG_LEN + SHA256_DIGEST_SIZE,
22440                                          HASH_MSG_LEN + 64,
22441                                          HASH_MSG_LEN + SHA512_DIGEST_SIZE };
22442 +       dma_addr_t dma_addr;
22443  
22444         /*
22445          * Get a Job ring from Job Ring driver to ensure in-order
22446 @@ -1863,11 +1686,31 @@ static int caam_hash_cra_init(struct cry
22447                 pr_err("Job Ring Device allocation for transform failed\n");
22448                 return PTR_ERR(ctx->jrdev);
22449         }
22450 +
22451 +       dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
22452 +                                       offsetof(struct caam_hash_ctx,
22453 +                                                sh_desc_update_dma),
22454 +                                       DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
22455 +       if (dma_mapping_error(ctx->jrdev, dma_addr)) {
22456 +               dev_err(ctx->jrdev, "unable to map shared descriptors\n");
22457 +               caam_jr_free(ctx->jrdev);
22458 +               return -ENOMEM;
22459 +       }
22460 +
22461 +       ctx->sh_desc_update_dma = dma_addr;
22462 +       ctx->sh_desc_update_first_dma = dma_addr +
22463 +                                       offsetof(struct caam_hash_ctx,
22464 +                                                sh_desc_update_first);
22465 +       ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
22466 +                                                  sh_desc_fin);
22467 +       ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
22468 +                                                     sh_desc_digest);
22469 +
22470         /* copy descriptor header template value */
22471 -       ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
22472 -       ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
22473 +       ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
22474  
22475 -       ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
22476 +       ctx->ctx_len = runninglen[(ctx->adata.algtype &
22477 +                                  OP_ALG_ALGSEL_SUBMASK) >>
22478                                   OP_ALG_ALGSEL_SHIFT];
22479  
22480         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
22481 @@ -1879,30 +1722,10 @@ static void caam_hash_cra_exit(struct cr
22482  {
22483         struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
22484  
22485 -       if (ctx->sh_desc_update_dma &&
22486 -           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
22487 -               dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
22488 -                                desc_bytes(ctx->sh_desc_update),
22489 -                                DMA_TO_DEVICE);
22490 -       if (ctx->sh_desc_update_first_dma &&
22491 -           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
22492 -               dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
22493 -                                desc_bytes(ctx->sh_desc_update_first),
22494 -                                DMA_TO_DEVICE);
22495 -       if (ctx->sh_desc_fin_dma &&
22496 -           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
22497 -               dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
22498 -                                desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
22499 -       if (ctx->sh_desc_digest_dma &&
22500 -           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
22501 -               dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
22502 -                                desc_bytes(ctx->sh_desc_digest),
22503 -                                DMA_TO_DEVICE);
22504 -       if (ctx->sh_desc_finup_dma &&
22505 -           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
22506 -               dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
22507 -                                desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
22508 -
22509 +       dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
22510 +                              offsetof(struct caam_hash_ctx,
22511 +                                       sh_desc_update_dma),
22512 +                              DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
22513         caam_jr_free(ctx->jrdev);
22514  }
22515  
22516 @@ -1961,7 +1784,6 @@ caam_hash_alloc(struct caam_hash_templat
22517         alg->cra_type = &crypto_ahash_type;
22518  
22519         t_alg->alg_type = template->alg_type;
22520 -       t_alg->alg_op = template->alg_op;
22521  
22522         return t_alg;
22523  }
22524 --- /dev/null
22525 +++ b/drivers/crypto/caam/caamhash_desc.c
22526 @@ -0,0 +1,108 @@
22527 +/*
22528 + * Shared descriptors for ahash algorithms
22529 + *
22530 + * Copyright 2017 NXP
22531 + *
22532 + * Redistribution and use in source and binary forms, with or without
22533 + * modification, are permitted provided that the following conditions are met:
22534 + *     * Redistributions of source code must retain the above copyright
22535 + *      notice, this list of conditions and the following disclaimer.
22536 + *     * Redistributions in binary form must reproduce the above copyright
22537 + *      notice, this list of conditions and the following disclaimer in the
22538 + *      documentation and/or other materials provided with the distribution.
22539 + *     * Neither the names of the above-listed copyright holders nor the
22540 + *      names of any contributors may be used to endorse or promote products
22541 + *      derived from this software without specific prior written permission.
22542 + *
22543 + *
22544 + * ALTERNATIVELY, this software may be distributed under the terms of the
22545 + * GNU General Public License ("GPL") as published by the Free Software
22546 + * Foundation, either version 2 of that License or (at your option) any
22547 + * later version.
22548 + *
22549 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22550 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22551 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22552 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
22553 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22554 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22555 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22556 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22557 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22558 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
22559 + * POSSIBILITY OF SUCH DAMAGE.
22560 + */
22561 +
22562 +#include "compat.h"
22563 +#include "desc_constr.h"
22564 +#include "caamhash_desc.h"
22565 +
22566 +/**
22567 + * cnstr_shdsc_ahash - ahash shared descriptor
22568 + * @desc: pointer to buffer used for descriptor construction
22569 + * @adata: pointer to authentication transform definitions.
22570 + *         A split key is required for SEC Era < 6; the size of the split key
22571 + *         is specified in this case.
22572 + *         Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
22573 + *         SHA256, SHA384, SHA512}.
22574 + * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
22575 + * @digestsize: algorithm's digest size
22576 + * @ctx_len: size of Context Register
22577 + * @import_ctx: true if previous Context Register needs to be restored
22578 + *              must be true for ahash update and final
22579 + *              must be false for for ahash first and digest
22580 + * @era: SEC Era
22581 + */
22582 +void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
22583 +                      int digestsize, int ctx_len, bool import_ctx, int era)
22584 +{
22585 +       u32 op = adata->algtype;
22586 +
22587 +       init_sh_desc(desc, HDR_SHARE_SERIAL);
22588 +
22589 +       /* Append key if it has been set; ahash update excluded */
22590 +       if (state != OP_ALG_AS_UPDATE && adata->keylen) {
22591 +               u32 *skip_key_load;
22592 +
22593 +               /* Skip key loading if already shared */
22594 +               skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
22595 +                                           JUMP_COND_SHRD);
22596 +
22597 +               if (era < 6)
22598 +                       append_key_as_imm(desc, adata->key_virt,
22599 +                                         adata->keylen_pad,
22600 +                                         adata->keylen, CLASS_2 |
22601 +                                         KEY_DEST_MDHA_SPLIT | KEY_ENC);
22602 +               else
22603 +                       append_proto_dkp(desc, adata);
22604 +
22605 +               set_jump_tgt_here(desc, skip_key_load);
22606 +
22607 +               op |= OP_ALG_AAI_HMAC_PRECOMP;
22608 +       }
22609 +
22610 +       /* If needed, import context from software */
22611 +       if (import_ctx)
22612 +               append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB |
22613 +                               LDST_SRCDST_BYTE_CONTEXT);
22614 +
22615 +       /* Class 2 operation */
22616 +       append_operation(desc, op | state | OP_ALG_ENCRYPT);
22617 +
22618 +       /*
22619 +        * Load from buf and/or src and write to req->result or state->context
22620 +        * Calculate remaining bytes to read
22621 +        */
22622 +       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
22623 +       /* Read remaining bytes */
22624 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
22625 +                            FIFOLD_TYPE_MSG | KEY_VLF);
22626 +       /* Store class2 context bytes */
22627 +       append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
22628 +                        LDST_SRCDST_BYTE_CONTEXT);
22629 +}
22630 +EXPORT_SYMBOL(cnstr_shdsc_ahash);
22631 +
22632 +MODULE_LICENSE("Dual BSD/GPL");
22633 +MODULE_DESCRIPTION("FSL CAAM ahash descriptors support");
22634 +MODULE_AUTHOR("NXP Semiconductors");
22635 --- /dev/null
22636 +++ b/drivers/crypto/caam/caamhash_desc.h
22637 @@ -0,0 +1,49 @@
22638 +/*
22639 + * Shared descriptors for ahash algorithms
22640 + *
22641 + * Copyright 2017 NXP
22642 + *
22643 + * Redistribution and use in source and binary forms, with or without
22644 + * modification, are permitted provided that the following conditions are met:
22645 + *     * Redistributions of source code must retain the above copyright
22646 + *      notice, this list of conditions and the following disclaimer.
22647 + *     * Redistributions in binary form must reproduce the above copyright
22648 + *      notice, this list of conditions and the following disclaimer in the
22649 + *      documentation and/or other materials provided with the distribution.
22650 + *     * Neither the names of the above-listed copyright holders nor the
22651 + *      names of any contributors may be used to endorse or promote products
22652 + *      derived from this software without specific prior written permission.
22653 + *
22654 + *
22655 + * ALTERNATIVELY, this software may be distributed under the terms of the
22656 + * GNU General Public License ("GPL") as published by the Free Software
22657 + * Foundation, either version 2 of that License or (at your option) any
22658 + * later version.
22659 + *
22660 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22661 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22662 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22663 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
22664 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22665 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22666 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22667 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22668 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22669 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
22670 + * POSSIBILITY OF SUCH DAMAGE.
22671 + */
22672 +
22673 +#ifndef _CAAMHASH_DESC_H_
22674 +#define _CAAMHASH_DESC_H_
22675 +
22676 +/* length of descriptors text */
22677 +#define DESC_AHASH_BASE                        (3 * CAAM_CMD_SZ)
22678 +#define DESC_AHASH_UPDATE_LEN          (6 * CAAM_CMD_SZ)
22679 +#define DESC_AHASH_UPDATE_FIRST_LEN    (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
22680 +#define DESC_AHASH_FINAL_LEN           (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
22681 +#define DESC_AHASH_DIGEST_LEN          (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
22682 +
22683 +void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
22684 +                      int digestsize, int ctx_len, bool import_ctx, int era);
22685 +
22686 +#endif /* _CAAMHASH_DESC_H_ */
22687 --- a/drivers/crypto/caam/caampkc.c
22688 +++ b/drivers/crypto/caam/caampkc.c
22689 @@ -18,6 +18,10 @@
22690  #define DESC_RSA_PUB_LEN       (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
22691  #define DESC_RSA_PRIV_F1_LEN   (2 * CAAM_CMD_SZ + \
22692                                  sizeof(struct rsa_priv_f1_pdb))
22693 +#define DESC_RSA_PRIV_F2_LEN   (2 * CAAM_CMD_SZ + \
22694 +                                sizeof(struct rsa_priv_f2_pdb))
22695 +#define DESC_RSA_PRIV_F3_LEN   (2 * CAAM_CMD_SZ + \
22696 +                                sizeof(struct rsa_priv_f3_pdb))
22697  
22698  static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
22699                          struct akcipher_request *req)
22700 @@ -54,6 +58,42 @@ static void rsa_priv_f1_unmap(struct dev
22701         dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
22702  }
22703  
22704 +static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
22705 +                             struct akcipher_request *req)
22706 +{
22707 +       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
22708 +       struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
22709 +       struct caam_rsa_key *key = &ctx->key;
22710 +       struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
22711 +       size_t p_sz = key->p_sz;
22712 +       size_t q_sz = key->p_sz;
22713 +
22714 +       dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
22715 +       dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
22716 +       dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
22717 +       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
22718 +       dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
22719 +}
22720 +
22721 +static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
22722 +                             struct akcipher_request *req)
22723 +{
22724 +       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
22725 +       struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
22726 +       struct caam_rsa_key *key = &ctx->key;
22727 +       struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
22728 +       size_t p_sz = key->p_sz;
22729 +       size_t q_sz = key->p_sz;
22730 +
22731 +       dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
22732 +       dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
22733 +       dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
22734 +       dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
22735 +       dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
22736 +       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
22737 +       dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
22738 +}
22739 +
22740  /* RSA Job Completion handler */
22741  static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
22742  {
22743 @@ -90,6 +130,42 @@ static void rsa_priv_f1_done(struct devi
22744         akcipher_request_complete(req, err);
22745  }
22746  
22747 +static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
22748 +                            void *context)
22749 +{
22750 +       struct akcipher_request *req = context;
22751 +       struct rsa_edesc *edesc;
22752 +
22753 +       if (err)
22754 +               caam_jr_strstatus(dev, err);
22755 +
22756 +       edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
22757 +
22758 +       rsa_priv_f2_unmap(dev, edesc, req);
22759 +       rsa_io_unmap(dev, edesc, req);
22760 +       kfree(edesc);
22761 +
22762 +       akcipher_request_complete(req, err);
22763 +}
22764 +
22765 +static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
22766 +                            void *context)
22767 +{
22768 +       struct akcipher_request *req = context;
22769 +       struct rsa_edesc *edesc;
22770 +
22771 +       if (err)
22772 +               caam_jr_strstatus(dev, err);
22773 +
22774 +       edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
22775 +
22776 +       rsa_priv_f3_unmap(dev, edesc, req);
22777 +       rsa_io_unmap(dev, edesc, req);
22778 +       kfree(edesc);
22779 +
22780 +       akcipher_request_complete(req, err);
22781 +}
22782 +
22783  static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
22784                                          size_t desclen)
22785  {
22786 @@ -97,8 +173,8 @@ static struct rsa_edesc *rsa_edesc_alloc
22787         struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
22788         struct device *dev = ctx->dev;
22789         struct rsa_edesc *edesc;
22790 -       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22791 -                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22792 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22793 +                      GFP_KERNEL : GFP_ATOMIC;
22794         int sgc;
22795         int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
22796         int src_nents, dst_nents;
22797 @@ -258,6 +334,172 @@ static int set_rsa_priv_f1_pdb(struct ak
22798         return 0;
22799  }
22800  
22801 +static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
22802 +                              struct rsa_edesc *edesc)
22803 +{
22804 +       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
22805 +       struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
22806 +       struct caam_rsa_key *key = &ctx->key;
22807 +       struct device *dev = ctx->dev;
22808 +       struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
22809 +       int sec4_sg_index = 0;
22810 +       size_t p_sz = key->p_sz;
22811 +       size_t q_sz = key->p_sz;
22812 +
22813 +       pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
22814 +       if (dma_mapping_error(dev, pdb->d_dma)) {
22815 +               dev_err(dev, "Unable to map RSA private exponent memory\n");
22816 +               return -ENOMEM;
22817 +       }
22818 +
22819 +       pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
22820 +       if (dma_mapping_error(dev, pdb->p_dma)) {
22821 +               dev_err(dev, "Unable to map RSA prime factor p memory\n");
22822 +               goto unmap_d;
22823 +       }
22824 +
22825 +       pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
22826 +       if (dma_mapping_error(dev, pdb->q_dma)) {
22827 +               dev_err(dev, "Unable to map RSA prime factor q memory\n");
22828 +               goto unmap_p;
22829 +       }
22830 +
22831 +       pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
22832 +       if (dma_mapping_error(dev, pdb->tmp1_dma)) {
22833 +               dev_err(dev, "Unable to map RSA tmp1 memory\n");
22834 +               goto unmap_q;
22835 +       }
22836 +
22837 +       pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
22838 +       if (dma_mapping_error(dev, pdb->tmp2_dma)) {
22839 +               dev_err(dev, "Unable to map RSA tmp2 memory\n");
22840 +               goto unmap_tmp1;
22841 +       }
22842 +
22843 +       if (edesc->src_nents > 1) {
22844 +               pdb->sgf |= RSA_PRIV_PDB_SGF_G;
22845 +               pdb->g_dma = edesc->sec4_sg_dma;
22846 +               sec4_sg_index += edesc->src_nents;
22847 +       } else {
22848 +               pdb->g_dma = sg_dma_address(req->src);
22849 +       }
22850 +
22851 +       if (edesc->dst_nents > 1) {
22852 +               pdb->sgf |= RSA_PRIV_PDB_SGF_F;
22853 +               pdb->f_dma = edesc->sec4_sg_dma +
22854 +                            sec4_sg_index * sizeof(struct sec4_sg_entry);
22855 +       } else {
22856 +               pdb->f_dma = sg_dma_address(req->dst);
22857 +       }
22858 +
22859 +       pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
22860 +       pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
22861 +
22862 +       return 0;
22863 +
22864 +unmap_tmp1:
22865 +       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
22866 +unmap_q:
22867 +       dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
22868 +unmap_p:
22869 +       dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
22870 +unmap_d:
22871 +       dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
22872 +
22873 +       return -ENOMEM;
22874 +}
22875 +
22876 +static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
22877 +                              struct rsa_edesc *edesc)
22878 +{
22879 +       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
22880 +       struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
22881 +       struct caam_rsa_key *key = &ctx->key;
22882 +       struct device *dev = ctx->dev;
22883 +       struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
22884 +       int sec4_sg_index = 0;
22885 +       size_t p_sz = key->p_sz;
22886 +       size_t q_sz = key->p_sz;
22887 +
22888 +       pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
22889 +       if (dma_mapping_error(dev, pdb->p_dma)) {
22890 +               dev_err(dev, "Unable to map RSA prime factor p memory\n");
22891 +               return -ENOMEM;
22892 +       }
22893 +
22894 +       pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
22895 +       if (dma_mapping_error(dev, pdb->q_dma)) {
22896 +               dev_err(dev, "Unable to map RSA prime factor q memory\n");
22897 +               goto unmap_p;
22898 +       }
22899 +
22900 +       pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
22901 +       if (dma_mapping_error(dev, pdb->dp_dma)) {
22902 +               dev_err(dev, "Unable to map RSA exponent dp memory\n");
22903 +               goto unmap_q;
22904 +       }
22905 +
22906 +       pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
22907 +       if (dma_mapping_error(dev, pdb->dq_dma)) {
22908 +               dev_err(dev, "Unable to map RSA exponent dq memory\n");
22909 +               goto unmap_dp;
22910 +       }
22911 +
22912 +       pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
22913 +       if (dma_mapping_error(dev, pdb->c_dma)) {
22914 +               dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
22915 +               goto unmap_dq;
22916 +       }
22917 +
22918 +       pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
22919 +       if (dma_mapping_error(dev, pdb->tmp1_dma)) {
22920 +               dev_err(dev, "Unable to map RSA tmp1 memory\n");
22921 +               goto unmap_qinv;
22922 +       }
22923 +
22924 +       pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
22925 +       if (dma_mapping_error(dev, pdb->tmp2_dma)) {
22926 +               dev_err(dev, "Unable to map RSA tmp2 memory\n");
22927 +               goto unmap_tmp1;
22928 +       }
22929 +
22930 +       if (edesc->src_nents > 1) {
22931 +               pdb->sgf |= RSA_PRIV_PDB_SGF_G;
22932 +               pdb->g_dma = edesc->sec4_sg_dma;
22933 +               sec4_sg_index += edesc->src_nents;
22934 +       } else {
22935 +               pdb->g_dma = sg_dma_address(req->src);
22936 +       }
22937 +
22938 +       if (edesc->dst_nents > 1) {
22939 +               pdb->sgf |= RSA_PRIV_PDB_SGF_F;
22940 +               pdb->f_dma = edesc->sec4_sg_dma +
22941 +                            sec4_sg_index * sizeof(struct sec4_sg_entry);
22942 +       } else {
22943 +               pdb->f_dma = sg_dma_address(req->dst);
22944 +       }
22945 +
22946 +       pdb->sgf |= key->n_sz;
22947 +       pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
22948 +
22949 +       return 0;
22950 +
22951 +unmap_tmp1:
22952 +       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
22953 +unmap_qinv:
22954 +       dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
22955 +unmap_dq:
22956 +       dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
22957 +unmap_dp:
22958 +       dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
22959 +unmap_q:
22960 +       dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
22961 +unmap_p:
22962 +       dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
22963 +
22964 +       return -ENOMEM;
22965 +}
22966 +
22967  static int caam_rsa_enc(struct akcipher_request *req)
22968  {
22969         struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
22970 @@ -301,24 +543,14 @@ init_fail:
22971         return ret;
22972  }
22973  
22974 -static int caam_rsa_dec(struct akcipher_request *req)
22975 +static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
22976  {
22977         struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
22978         struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
22979 -       struct caam_rsa_key *key = &ctx->key;
22980         struct device *jrdev = ctx->dev;
22981         struct rsa_edesc *edesc;
22982         int ret;
22983  
22984 -       if (unlikely(!key->n || !key->d))
22985 -               return -EINVAL;
22986 -
22987 -       if (req->dst_len < key->n_sz) {
22988 -               req->dst_len = key->n_sz;
22989 -               dev_err(jrdev, "Output buffer length less than parameter n\n");
22990 -               return -EOVERFLOW;
22991 -       }
22992 -
22993         /* Allocate extended descriptor */
22994         edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
22995         if (IS_ERR(edesc))
22996 @@ -344,17 +576,147 @@ init_fail:
22997         return ret;
22998  }
22999  
23000 +static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
23001 +{
23002 +       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
23003 +       struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
23004 +       struct device *jrdev = ctx->dev;
23005 +       struct rsa_edesc *edesc;
23006 +       int ret;
23007 +
23008 +       /* Allocate extended descriptor */
23009 +       edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
23010 +       if (IS_ERR(edesc))
23011 +               return PTR_ERR(edesc);
23012 +
23013 +       /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
23014 +       ret = set_rsa_priv_f2_pdb(req, edesc);
23015 +       if (ret)
23016 +               goto init_fail;
23017 +
23018 +       /* Initialize Job Descriptor */
23019 +       init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
23020 +
23021 +       ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
23022 +       if (!ret)
23023 +               return -EINPROGRESS;
23024 +
23025 +       rsa_priv_f2_unmap(jrdev, edesc, req);
23026 +
23027 +init_fail:
23028 +       rsa_io_unmap(jrdev, edesc, req);
23029 +       kfree(edesc);
23030 +       return ret;
23031 +}
23032 +
23033 +static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
23034 +{
23035 +       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
23036 +       struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
23037 +       struct device *jrdev = ctx->dev;
23038 +       struct rsa_edesc *edesc;
23039 +       int ret;
23040 +
23041 +       /* Allocate extended descriptor */
23042 +       edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
23043 +       if (IS_ERR(edesc))
23044 +               return PTR_ERR(edesc);
23045 +
23046 +       /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
23047 +       ret = set_rsa_priv_f3_pdb(req, edesc);
23048 +       if (ret)
23049 +               goto init_fail;
23050 +
23051 +       /* Initialize Job Descriptor */
23052 +       init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
23053 +
23054 +       ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
23055 +       if (!ret)
23056 +               return -EINPROGRESS;
23057 +
23058 +       rsa_priv_f3_unmap(jrdev, edesc, req);
23059 +
23060 +init_fail:
23061 +       rsa_io_unmap(jrdev, edesc, req);
23062 +       kfree(edesc);
23063 +       return ret;
23064 +}
23065 +
23066 +static int caam_rsa_dec(struct akcipher_request *req)
23067 +{
23068 +       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
23069 +       struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
23070 +       struct caam_rsa_key *key = &ctx->key;
23071 +       int ret;
23072 +
23073 +       if (unlikely(!key->n || !key->d))
23074 +               return -EINVAL;
23075 +
23076 +       if (req->dst_len < key->n_sz) {
23077 +               req->dst_len = key->n_sz;
23078 +               dev_err(ctx->dev, "Output buffer length less than parameter n\n");
23079 +               return -EOVERFLOW;
23080 +       }
23081 +
23082 +       if (key->priv_form == FORM3)
23083 +               ret = caam_rsa_dec_priv_f3(req);
23084 +       else if (key->priv_form == FORM2)
23085 +               ret = caam_rsa_dec_priv_f2(req);
23086 +       else
23087 +               ret = caam_rsa_dec_priv_f1(req);
23088 +
23089 +       return ret;
23090 +}
23091 +
23092  static void caam_rsa_free_key(struct caam_rsa_key *key)
23093  {
23094         kzfree(key->d);
23095 +       kzfree(key->p);
23096 +       kzfree(key->q);
23097 +       kzfree(key->dp);
23098 +       kzfree(key->dq);
23099 +       kzfree(key->qinv);
23100 +       kzfree(key->tmp1);
23101 +       kzfree(key->tmp2);
23102         kfree(key->e);
23103         kfree(key->n);
23104 -       key->d = NULL;
23105 -       key->e = NULL;
23106 -       key->n = NULL;
23107 -       key->d_sz = 0;
23108 -       key->e_sz = 0;
23109 -       key->n_sz = 0;
23110 +       memset(key, 0, sizeof(*key));
23111 +}
23112 +
23113 +static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
23114 +{
23115 +       while (!**ptr && *nbytes) {
23116 +               (*ptr)++;
23117 +               (*nbytes)--;
23118 +       }
23119 +}
23120 +
23121 +/**
23122 + * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
23123 + * dP, dQ and qInv could decode to less than corresponding p, q length, as the
23124 + * BER-encoding requires that the minimum number of bytes be used to encode the
23125 + * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
23126 + * length.
23127 + *
23128 + * @ptr   : pointer to {dP, dQ, qInv} CRT member
23129 + * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
23130 + * @dstlen: length in bytes of corresponding p or q prime factor
23131 + */
23132 +static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
23133 +{
23134 +       u8 *dst;
23135 +
23136 +       caam_rsa_drop_leading_zeros(&ptr, &nbytes);
23137 +       if (!nbytes)
23138 +               return NULL;
23139 +
23140 +       dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
23141 +       if (!dst)
23142 +               return NULL;
23143 +
23144 +       memcpy(dst + (dstlen - nbytes), ptr, nbytes);
23145 +
23146 +       return dst;
23147  }
23148  
23149  /**
23150 @@ -370,10 +732,9 @@ static inline u8 *caam_read_raw_data(con
23151  {
23152         u8 *val;
23153  
23154 -       while (!*buf && *nbytes) {
23155 -               buf++;
23156 -               (*nbytes)--;
23157 -       }
23158 +       caam_rsa_drop_leading_zeros(&buf, nbytes);
23159 +       if (!*nbytes)
23160 +               return NULL;
23161  
23162         val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL);
23163         if (!val)
23164 @@ -395,7 +756,7 @@ static int caam_rsa_set_pub_key(struct c
23165                                 unsigned int keylen)
23166  {
23167         struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
23168 -       struct rsa_key raw_key = {0};
23169 +       struct rsa_key raw_key = {NULL};
23170         struct caam_rsa_key *rsa_key = &ctx->key;
23171         int ret;
23172  
23173 @@ -437,11 +798,69 @@ err:
23174         return -ENOMEM;
23175  }
23176  
23177 +static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
23178 +                                      struct rsa_key *raw_key)
23179 +{
23180 +       struct caam_rsa_key *rsa_key = &ctx->key;
23181 +       size_t p_sz = raw_key->p_sz;
23182 +       size_t q_sz = raw_key->q_sz;
23183 +
23184 +       rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
23185 +       if (!rsa_key->p)
23186 +               return;
23187 +       rsa_key->p_sz = p_sz;
23188 +
23189 +       rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
23190 +       if (!rsa_key->q)
23191 +               goto free_p;
23192 +       rsa_key->q_sz = q_sz;
23193 +
23194 +       rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
23195 +       if (!rsa_key->tmp1)
23196 +               goto free_q;
23197 +
23198 +       rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
23199 +       if (!rsa_key->tmp2)
23200 +               goto free_tmp1;
23201 +
23202 +       rsa_key->priv_form = FORM2;
23203 +
23204 +       rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
23205 +       if (!rsa_key->dp)
23206 +               goto free_tmp2;
23207 +
23208 +       rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
23209 +       if (!rsa_key->dq)
23210 +               goto free_dp;
23211 +
23212 +       rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
23213 +                                         q_sz);
23214 +       if (!rsa_key->qinv)
23215 +               goto free_dq;
23216 +
23217 +       rsa_key->priv_form = FORM3;
23218 +
23219 +       return;
23220 +
23221 +free_dq:
23222 +       kzfree(rsa_key->dq);
23223 +free_dp:
23224 +       kzfree(rsa_key->dp);
23225 +free_tmp2:
23226 +       kzfree(rsa_key->tmp2);
23227 +free_tmp1:
23228 +       kzfree(rsa_key->tmp1);
23229 +free_q:
23230 +       kzfree(rsa_key->q);
23231 +free_p:
23232 +       kzfree(rsa_key->p);
23233 +}
23234 +
23235  static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
23236                                  unsigned int keylen)
23237  {
23238         struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
23239 -       struct rsa_key raw_key = {0};
23240 +       struct rsa_key raw_key = {NULL};
23241         struct caam_rsa_key *rsa_key = &ctx->key;
23242         int ret;
23243  
23244 @@ -483,6 +902,8 @@ static int caam_rsa_set_priv_key(struct
23245         memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
23246         memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
23247  
23248 +       caam_rsa_set_priv_key_form(ctx, &raw_key);
23249 +
23250         return 0;
23251  
23252  err:
23253 --- a/drivers/crypto/caam/caampkc.h
23254 +++ b/drivers/crypto/caam/caampkc.h
23255 @@ -13,21 +13,75 @@
23256  #include "pdb.h"
23257  
23258  /**
23259 + * caam_priv_key_form - CAAM RSA private key representation
23260 + * CAAM RSA private key may have either of three forms.
23261 + *
23262 + * 1. The first representation consists of the pair (n, d), where the
23263 + *    components have the following meanings:
23264 + *        n      the RSA modulus
23265 + *        d      the RSA private exponent
23266 + *
23267 + * 2. The second representation consists of the triplet (p, q, d), where the
23268 + *    components have the following meanings:
23269 + *        p      the first prime factor of the RSA modulus n
23270 + *        q      the second prime factor of the RSA modulus n
23271 + *        d      the RSA private exponent
23272 + *
23273 + * 3. The third representation consists of the quintuple (p, q, dP, dQ, qInv),
23274 + *    where the components have the following meanings:
23275 + *        p      the first prime factor of the RSA modulus n
23276 + *        q      the second prime factor of the RSA modulus n
23277 + *        dP     the first factors's CRT exponent
23278 + *        dQ     the second factors's CRT exponent
23279 + *        qInv   the (first) CRT coefficient
23280 + *
23281 + * The benefit of using the third or the second key form is lower computational
23282 + * cost for the decryption and signature operations.
23283 + */
23284 +enum caam_priv_key_form {
23285 +       FORM1,
23286 +       FORM2,
23287 +       FORM3
23288 +};
23289 +
23290 +/**
23291   * caam_rsa_key - CAAM RSA key structure. Keys are allocated in DMA zone.
23292   * @n           : RSA modulus raw byte stream
23293   * @e           : RSA public exponent raw byte stream
23294   * @d           : RSA private exponent raw byte stream
23295 + * @p           : RSA prime factor p of RSA modulus n
23296 + * @q           : RSA prime factor q of RSA modulus n
23297 + * @dp          : RSA CRT exponent of p
23298 + * @dp          : RSA CRT exponent of q
23299 + * @qinv        : RSA CRT coefficient
23300 + * @tmp1        : CAAM uses this temporary buffer as internal state buffer.
23301 + *                It is assumed to be as long as p.
23302 + * @tmp2        : CAAM uses this temporary buffer as internal state buffer.
23303 + *                It is assumed to be as long as q.
23304   * @n_sz        : length in bytes of RSA modulus n
23305   * @e_sz        : length in bytes of RSA public exponent
23306   * @d_sz        : length in bytes of RSA private exponent
23307 + * @p_sz        : length in bytes of RSA prime factor p of RSA modulus n
23308 + * @q_sz        : length in bytes of RSA prime factor q of RSA modulus n
23309 + * @priv_form   : CAAM RSA private key representation
23310   */
23311  struct caam_rsa_key {
23312         u8 *n;
23313         u8 *e;
23314         u8 *d;
23315 +       u8 *p;
23316 +       u8 *q;
23317 +       u8 *dp;
23318 +       u8 *dq;
23319 +       u8 *qinv;
23320 +       u8 *tmp1;
23321 +       u8 *tmp2;
23322         size_t n_sz;
23323         size_t e_sz;
23324         size_t d_sz;
23325 +       size_t p_sz;
23326 +       size_t q_sz;
23327 +       enum caam_priv_key_form priv_form;
23328  };
23329  
23330  /**
23331 @@ -59,6 +113,8 @@ struct rsa_edesc {
23332         union {
23333                 struct rsa_pub_pdb pub;
23334                 struct rsa_priv_f1_pdb priv_f1;
23335 +               struct rsa_priv_f2_pdb priv_f2;
23336 +               struct rsa_priv_f3_pdb priv_f3;
23337         } pdb;
23338         u32 hw_desc[];
23339  };
23340 @@ -66,5 +122,7 @@ struct rsa_edesc {
23341  /* Descriptor construction primitives. */
23342  void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb);
23343  void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb);
23344 +void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb);
23345 +void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb);
23346  
23347  #endif
23348 --- a/drivers/crypto/caam/caamrng.c
23349 +++ b/drivers/crypto/caam/caamrng.c
23350 @@ -52,7 +52,7 @@
23351  
23352  /* length of descriptors */
23353  #define DESC_JOB_O_LEN                 (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
23354 -#define DESC_RNG_LEN                   (4 * CAAM_CMD_SZ)
23355 +#define DESC_RNG_LEN                   (3 * CAAM_CMD_SZ)
23356  
23357  /* Buffer, its dma address and lock */
23358  struct buf_data {
23359 @@ -100,8 +100,7 @@ static void rng_done(struct device *jrde
23360  {
23361         struct buf_data *bd;
23362  
23363 -       bd = (struct buf_data *)((char *)desc -
23364 -             offsetof(struct buf_data, hw_desc));
23365 +       bd = container_of(desc, struct buf_data, hw_desc[0]);
23366  
23367         if (err)
23368                 caam_jr_strstatus(jrdev, err);
23369 @@ -196,9 +195,6 @@ static inline int rng_create_sh_desc(str
23370  
23371         init_sh_desc(desc, HDR_SHARE_SERIAL);
23372  
23373 -       /* Propagate errors from shared to job descriptor */
23374 -       append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
23375 -
23376         /* Generate random bytes */
23377         append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
23378  
23379 @@ -289,11 +285,7 @@ static int caam_init_rng(struct caam_rng
23380         if (err)
23381                 return err;
23382  
23383 -       err = caam_init_buf(ctx, 1);
23384 -       if (err)
23385 -               return err;
23386 -
23387 -       return 0;
23388 +       return caam_init_buf(ctx, 1);
23389  }
23390  
23391  static struct hwrng caam_rng = {
23392 @@ -351,7 +343,7 @@ static int __init caam_rng_init(void)
23393                 pr_err("Job Ring Device allocation for transform failed\n");
23394                 return PTR_ERR(dev);
23395         }
23396 -       rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA);
23397 +       rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
23398         if (!rng_ctx) {
23399                 err = -ENOMEM;
23400                 goto free_caam_alloc;
23401 --- a/drivers/crypto/caam/compat.h
23402 +++ b/drivers/crypto/caam/compat.h
23403 @@ -16,6 +16,7 @@
23404  #include <linux/of_platform.h>
23405  #include <linux/dma-mapping.h>
23406  #include <linux/io.h>
23407 +#include <linux/iommu.h>
23408  #include <linux/spinlock.h>
23409  #include <linux/rtnetlink.h>
23410  #include <linux/in.h>
23411 --- a/drivers/crypto/caam/ctrl.c
23412 +++ b/drivers/crypto/caam/ctrl.c
23413 @@ -2,40 +2,41 @@
23414   * Controller-level driver, kernel property detection, initialization
23415   *
23416   * Copyright 2008-2012 Freescale Semiconductor, Inc.
23417 + * Copyright 2017 NXP
23418   */
23419  
23420  #include <linux/device.h>
23421  #include <linux/of_address.h>
23422  #include <linux/of_irq.h>
23423 +#include <linux/sys_soc.h>
23424  
23425  #include "compat.h"
23426  #include "regs.h"
23427  #include "intern.h"
23428  #include "jr.h"
23429  #include "desc_constr.h"
23430 -#include "error.h"
23431  #include "ctrl.h"
23432  
23433  bool caam_little_end;
23434  EXPORT_SYMBOL(caam_little_end);
23435 +bool caam_imx;
23436 +EXPORT_SYMBOL(caam_imx);
23437 +bool caam_dpaa2;
23438 +EXPORT_SYMBOL(caam_dpaa2);
23439 +
23440 +#ifdef CONFIG_CAAM_QI
23441 +#include "qi.h"
23442 +#endif
23443  
23444  /*
23445   * i.MX targets tend to have clock control subsystems that can
23446   * enable/disable clocking to our device.
23447   */
23448 -#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
23449 -static inline struct clk *caam_drv_identify_clk(struct device *dev,
23450 -                                               char *clk_name)
23451 -{
23452 -       return devm_clk_get(dev, clk_name);
23453 -}
23454 -#else
23455  static inline struct clk *caam_drv_identify_clk(struct device *dev,
23456                                                 char *clk_name)
23457  {
23458 -       return NULL;
23459 +       return caam_imx ? devm_clk_get(dev, clk_name) : NULL;
23460  }
23461 -#endif
23462  
23463  /*
23464   * Descriptor to instantiate RNG State Handle 0 in normal mode and
23465 @@ -274,7 +275,7 @@ static int deinstantiate_rng(struct devi
23466                 /*
23467                  * If the corresponding bit is set, then it means the state
23468                  * handle was initialized by us, and thus it needs to be
23469 -                * deintialized as well
23470 +                * deinitialized as well
23471                  */
23472                 if ((1 << sh_idx) & state_handle_mask) {
23473                         /*
23474 @@ -307,20 +308,24 @@ static int caam_remove(struct platform_d
23475         struct device *ctrldev;
23476         struct caam_drv_private *ctrlpriv;
23477         struct caam_ctrl __iomem *ctrl;
23478 -       int ring;
23479  
23480         ctrldev = &pdev->dev;
23481         ctrlpriv = dev_get_drvdata(ctrldev);
23482         ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
23483  
23484 -       /* Remove platform devices for JobRs */
23485 -       for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
23486 -               if (ctrlpriv->jrpdev[ring])
23487 -                       of_device_unregister(ctrlpriv->jrpdev[ring]);
23488 -       }
23489 +       /* Remove platform devices under the crypto node */
23490 +       of_platform_depopulate(ctrldev);
23491 +
23492 +#ifdef CONFIG_CAAM_QI
23493 +       if (ctrlpriv->qidev)
23494 +               caam_qi_shutdown(ctrlpriv->qidev);
23495 +#endif
23496  
23497 -       /* De-initialize RNG state handles initialized by this driver. */
23498 -       if (ctrlpriv->rng4_sh_init)
23499 +       /*
23500 +        * De-initialize RNG state handles initialized by this driver.
23501 +        * In case of DPAA 2.x, RNG is managed by MC firmware.
23502 +        */
23503 +       if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
23504                 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
23505  
23506         /* Shut down debug views */
23507 @@ -335,8 +340,8 @@ static int caam_remove(struct platform_d
23508         clk_disable_unprepare(ctrlpriv->caam_ipg);
23509         clk_disable_unprepare(ctrlpriv->caam_mem);
23510         clk_disable_unprepare(ctrlpriv->caam_aclk);
23511 -       clk_disable_unprepare(ctrlpriv->caam_emi_slow);
23512 -
23513 +       if (ctrlpriv->caam_emi_slow)
23514 +               clk_disable_unprepare(ctrlpriv->caam_emi_slow);
23515         return 0;
23516  }
23517  
23518 @@ -370,11 +375,8 @@ static void kick_trng(struct platform_de
23519          */
23520         val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
23521               >> RTSDCTL_ENT_DLY_SHIFT;
23522 -       if (ent_delay <= val) {
23523 -               /* put RNG4 into run mode */
23524 -               clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, 0);
23525 -               return;
23526 -       }
23527 +       if (ent_delay <= val)
23528 +               goto start_rng;
23529  
23530         val = rd_reg32(&r4tst->rtsdctl);
23531         val = (val & ~RTSDCTL_ENT_DLY_MASK) |
23532 @@ -386,15 +388,12 @@ static void kick_trng(struct platform_de
23533         wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
23534         /* read the control register */
23535         val = rd_reg32(&r4tst->rtmctl);
23536 +start_rng:
23537         /*
23538          * select raw sampling in both entropy shifter
23539 -        * and statistical checker
23540 +        * and statistical checker; ; put RNG4 into run mode
23541          */
23542 -       clrsetbits_32(&val, 0, RTMCTL_SAMP_MODE_RAW_ES_SC);
23543 -       /* put RNG4 into run mode */
23544 -       clrsetbits_32(&val, RTMCTL_PRGM, 0);
23545 -       /* write back the control register */
23546 -       wr_reg32(&r4tst->rtmctl, val);
23547 +       clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
23548  }
23549  
23550  /**
23551 @@ -415,28 +414,26 @@ int caam_get_era(void)
23552  }
23553  EXPORT_SYMBOL(caam_get_era);
23554  
23555 -#ifdef CONFIG_DEBUG_FS
23556 -static int caam_debugfs_u64_get(void *data, u64 *val)
23557 -{
23558 -       *val = caam64_to_cpu(*(u64 *)data);
23559 -       return 0;
23560 -}
23561 -
23562 -static int caam_debugfs_u32_get(void *data, u64 *val)
23563 -{
23564 -       *val = caam32_to_cpu(*(u32 *)data);
23565 -       return 0;
23566 -}
23567 -
23568 -DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
23569 -DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
23570 -#endif
23571 +static const struct of_device_id caam_match[] = {
23572 +       {
23573 +               .compatible = "fsl,sec-v4.0",
23574 +       },
23575 +       {
23576 +               .compatible = "fsl,sec4.0",
23577 +       },
23578 +       {},
23579 +};
23580 +MODULE_DEVICE_TABLE(of, caam_match);
23581  
23582  /* Probe routine for CAAM top (controller) level */
23583  static int caam_probe(struct platform_device *pdev)
23584  {
23585 -       int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
23586 +       int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
23587         u64 caam_id;
23588 +       static const struct soc_device_attribute imx_soc[] = {
23589 +               {.family = "Freescale i.MX"},
23590 +               {},
23591 +       };
23592         struct device *dev;
23593         struct device_node *nprop, *np;
23594         struct caam_ctrl __iomem *ctrl;
23595 @@ -456,9 +453,10 @@ static int caam_probe(struct platform_de
23596  
23597         dev = &pdev->dev;
23598         dev_set_drvdata(dev, ctrlpriv);
23599 -       ctrlpriv->pdev = pdev;
23600         nprop = pdev->dev.of_node;
23601  
23602 +       caam_imx = (bool)soc_device_match(imx_soc);
23603 +
23604         /* Enable clocking */
23605         clk = caam_drv_identify_clk(&pdev->dev, "ipg");
23606         if (IS_ERR(clk)) {
23607 @@ -487,14 +485,16 @@ static int caam_probe(struct platform_de
23608         }
23609         ctrlpriv->caam_aclk = clk;
23610  
23611 -       clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
23612 -       if (IS_ERR(clk)) {
23613 -               ret = PTR_ERR(clk);
23614 -               dev_err(&pdev->dev,
23615 -                       "can't identify CAAM emi_slow clk: %d\n", ret);
23616 -               return ret;
23617 +       if (!of_machine_is_compatible("fsl,imx6ul")) {
23618 +               clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
23619 +               if (IS_ERR(clk)) {
23620 +                       ret = PTR_ERR(clk);
23621 +                       dev_err(&pdev->dev,
23622 +                               "can't identify CAAM emi_slow clk: %d\n", ret);
23623 +                       return ret;
23624 +               }
23625 +               ctrlpriv->caam_emi_slow = clk;
23626         }
23627 -       ctrlpriv->caam_emi_slow = clk;
23628  
23629         ret = clk_prepare_enable(ctrlpriv->caam_ipg);
23630         if (ret < 0) {
23631 @@ -515,11 +515,13 @@ static int caam_probe(struct platform_de
23632                 goto disable_caam_mem;
23633         }
23634  
23635 -       ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
23636 -       if (ret < 0) {
23637 -               dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
23638 -                       ret);
23639 -               goto disable_caam_aclk;
23640 +       if (ctrlpriv->caam_emi_slow) {
23641 +               ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
23642 +               if (ret < 0) {
23643 +                       dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
23644 +                               ret);
23645 +                       goto disable_caam_aclk;
23646 +               }
23647         }
23648  
23649         /* Get configuration properties from device tree */
23650 @@ -546,13 +548,13 @@ static int caam_probe(struct platform_de
23651         else
23652                 BLOCK_OFFSET = PG_SIZE_64K;
23653  
23654 -       ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
23655 -       ctrlpriv->assure = (struct caam_assurance __force *)
23656 -                          ((uint8_t *)ctrl +
23657 +       ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
23658 +       ctrlpriv->assure = (struct caam_assurance __iomem __force *)
23659 +                          ((__force uint8_t *)ctrl +
23660                             BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
23661                            );
23662 -       ctrlpriv->deco = (struct caam_deco __force *)
23663 -                        ((uint8_t *)ctrl +
23664 +       ctrlpriv->deco = (struct caam_deco __iomem __force *)
23665 +                        ((__force uint8_t *)ctrl +
23666                          BLOCK_OFFSET * DECO_BLOCK_NUMBER
23667                          );
23668  
23669 @@ -561,12 +563,17 @@ static int caam_probe(struct platform_de
23670  
23671         /*
23672          * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
23673 -        * long pointers in master configuration register
23674 +        * long pointers in master configuration register.
23675 +        * In case of DPAA 2.x, Management Complex firmware performs
23676 +        * the configuration.
23677          */
23678 -       clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
23679 -                     MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
23680 -                     MCFGR_WDENABLE | MCFGR_LARGE_BURST |
23681 -                     (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
23682 +       caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
23683 +       if (!caam_dpaa2)
23684 +               clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
23685 +                             MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
23686 +                             MCFGR_WDENABLE | MCFGR_LARGE_BURST |
23687 +                             (sizeof(dma_addr_t) == sizeof(u64) ?
23688 +                              MCFGR_LONG_PTR : 0));
23689  
23690         /*
23691          *  Read the Compile Time paramters and SCFGR to determine
23692 @@ -594,64 +601,69 @@ static int caam_probe(struct platform_de
23693                               JRSTART_JR1_START | JRSTART_JR2_START |
23694                               JRSTART_JR3_START);
23695  
23696 -       if (sizeof(dma_addr_t) == sizeof(u64))
23697 -               if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
23698 -                       dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
23699 +       if (sizeof(dma_addr_t) == sizeof(u64)) {
23700 +               if (caam_dpaa2)
23701 +                       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
23702 +               else if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
23703 +                       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
23704                 else
23705 -                       dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
23706 -       else
23707 -               dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
23708 +                       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
23709 +       } else {
23710 +               ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
23711 +       }
23712 +       if (ret) {
23713 +               dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
23714 +               goto iounmap_ctrl;
23715 +       }
23716  
23717 -       /*
23718 -        * Detect and enable JobRs
23719 -        * First, find out how many ring spec'ed, allocate references
23720 -        * for all, then go probe each one.
23721 -        */
23722 -       rspec = 0;
23723 -       for_each_available_child_of_node(nprop, np)
23724 -               if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
23725 -                   of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
23726 -                       rspec++;
23727 +       ctrlpriv->era = caam_get_era();
23728  
23729 -       ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
23730 -                                       sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
23731 -       if (ctrlpriv->jrpdev == NULL) {
23732 -               ret = -ENOMEM;
23733 +       ret = of_platform_populate(nprop, caam_match, NULL, dev);
23734 +       if (ret) {
23735 +               dev_err(dev, "JR platform devices creation error\n");
23736                 goto iounmap_ctrl;
23737         }
23738  
23739 +#ifdef CONFIG_DEBUG_FS
23740 +       /*
23741 +        * FIXME: needs better naming distinction, as some amalgamation of
23742 +        * "caam" and nprop->full_name. The OF name isn't distinctive,
23743 +        * but does separate instances
23744 +        */
23745 +       perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
23746 +
23747 +       ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
23748 +       ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
23749 +#endif
23750         ring = 0;
23751 -       ctrlpriv->total_jobrs = 0;
23752         for_each_available_child_of_node(nprop, np)
23753                 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
23754                     of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
23755 -                       ctrlpriv->jrpdev[ring] =
23756 -                               of_platform_device_create(np, NULL, dev);
23757 -                       if (!ctrlpriv->jrpdev[ring]) {
23758 -                               pr_warn("JR%d Platform device creation error\n",
23759 -                                       ring);
23760 -                               continue;
23761 -                       }
23762 -                       ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
23763 -                                            ((uint8_t *)ctrl +
23764 +                       ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
23765 +                                            ((__force uint8_t *)ctrl +
23766                                              (ring + JR_BLOCK_NUMBER) *
23767                                               BLOCK_OFFSET
23768                                              );
23769                         ctrlpriv->total_jobrs++;
23770                         ring++;
23771 -       }
23772 +               }
23773  
23774 -       /* Check to see if QI present. If so, enable */
23775 -       ctrlpriv->qi_present =
23776 -                       !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
23777 -                          CTPR_MS_QI_MASK);
23778 -       if (ctrlpriv->qi_present) {
23779 -               ctrlpriv->qi = (struct caam_queue_if __force *)
23780 -                              ((uint8_t *)ctrl +
23781 +       /* Check to see if (DPAA 1.x) QI present. If so, enable */
23782 +       ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
23783 +       if (ctrlpriv->qi_present && !caam_dpaa2) {
23784 +               ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
23785 +                              ((__force uint8_t *)ctrl +
23786                                  BLOCK_OFFSET * QI_BLOCK_NUMBER
23787                                );
23788                 /* This is all that's required to physically enable QI */
23789                 wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
23790 +
23791 +               /* If QMAN driver is present, init CAAM-QI backend */
23792 +#ifdef CONFIG_CAAM_QI
23793 +               ret = caam_qi_init(pdev);
23794 +               if (ret)
23795 +                       dev_err(dev, "caam qi i/f init failed: %d\n", ret);
23796 +#endif
23797         }
23798  
23799         /* If no QI and no rings specified, quit and go home */
23800 @@ -666,8 +678,10 @@ static int caam_probe(struct platform_de
23801         /*
23802          * If SEC has RNG version >= 4 and RNG state handle has not been
23803          * already instantiated, do RNG instantiation
23804 +        * In case of DPAA 2.x, RNG is managed by MC firmware.
23805          */
23806 -       if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
23807 +       if (!caam_dpaa2 &&
23808 +           (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
23809                 ctrlpriv->rng4_sh_init =
23810                         rd_reg32(&ctrl->r4tst[0].rdsta);
23811                 /*
23812 @@ -734,78 +748,47 @@ static int caam_probe(struct platform_de
23813  
23814         /* Report "alive" for developer to see */
23815         dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
23816 -                caam_get_era());
23817 -       dev_info(dev, "job rings = %d, qi = %d\n",
23818 -                ctrlpriv->total_jobrs, ctrlpriv->qi_present);
23819 +                ctrlpriv->era);
23820 +       dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
23821 +                ctrlpriv->total_jobrs, ctrlpriv->qi_present,
23822 +                caam_dpaa2 ? "yes" : "no");
23823  
23824  #ifdef CONFIG_DEBUG_FS
23825 -       /*
23826 -        * FIXME: needs better naming distinction, as some amalgamation of
23827 -        * "caam" and nprop->full_name. The OF name isn't distinctive,
23828 -        * but does separate instances
23829 -        */
23830 -       perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
23831 -
23832 -       ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
23833 -       ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
23834 -
23835 -       /* Controller-level - performance monitor counters */
23836 -
23837 -       ctrlpriv->ctl_rq_dequeued =
23838 -               debugfs_create_file("rq_dequeued",
23839 -                                   S_IRUSR | S_IRGRP | S_IROTH,
23840 -                                   ctrlpriv->ctl, &perfmon->req_dequeued,
23841 -                                   &caam_fops_u64_ro);
23842 -       ctrlpriv->ctl_ob_enc_req =
23843 -               debugfs_create_file("ob_rq_encrypted",
23844 -                                   S_IRUSR | S_IRGRP | S_IROTH,
23845 -                                   ctrlpriv->ctl, &perfmon->ob_enc_req,
23846 -                                   &caam_fops_u64_ro);
23847 -       ctrlpriv->ctl_ib_dec_req =
23848 -               debugfs_create_file("ib_rq_decrypted",
23849 -                                   S_IRUSR | S_IRGRP | S_IROTH,
23850 -                                   ctrlpriv->ctl, &perfmon->ib_dec_req,
23851 -                                   &caam_fops_u64_ro);
23852 -       ctrlpriv->ctl_ob_enc_bytes =
23853 -               debugfs_create_file("ob_bytes_encrypted",
23854 -                                   S_IRUSR | S_IRGRP | S_IROTH,
23855 -                                   ctrlpriv->ctl, &perfmon->ob_enc_bytes,
23856 -                                   &caam_fops_u64_ro);
23857 -       ctrlpriv->ctl_ob_prot_bytes =
23858 -               debugfs_create_file("ob_bytes_protected",
23859 -                                   S_IRUSR | S_IRGRP | S_IROTH,
23860 -                                   ctrlpriv->ctl, &perfmon->ob_prot_bytes,
23861 -                                   &caam_fops_u64_ro);
23862 -       ctrlpriv->ctl_ib_dec_bytes =
23863 -               debugfs_create_file("ib_bytes_decrypted",
23864 -                                   S_IRUSR | S_IRGRP | S_IROTH,
23865 -                                   ctrlpriv->ctl, &perfmon->ib_dec_bytes,
23866 -                                   &caam_fops_u64_ro);
23867 -       ctrlpriv->ctl_ib_valid_bytes =
23868 -               debugfs_create_file("ib_bytes_validated",
23869 -                                   S_IRUSR | S_IRGRP | S_IROTH,
23870 -                                   ctrlpriv->ctl, &perfmon->ib_valid_bytes,
23871 -                                   &caam_fops_u64_ro);
23872 +       debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
23873 +                           ctrlpriv->ctl, &perfmon->req_dequeued,
23874 +                           &caam_fops_u64_ro);
23875 +       debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
23876 +                           ctrlpriv->ctl, &perfmon->ob_enc_req,
23877 +                           &caam_fops_u64_ro);
23878 +       debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
23879 +                           ctrlpriv->ctl, &perfmon->ib_dec_req,
23880 +                           &caam_fops_u64_ro);
23881 +       debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
23882 +                           ctrlpriv->ctl, &perfmon->ob_enc_bytes,
23883 +                           &caam_fops_u64_ro);
23884 +       debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH,
23885 +                           ctrlpriv->ctl, &perfmon->ob_prot_bytes,
23886 +                           &caam_fops_u64_ro);
23887 +       debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
23888 +                           ctrlpriv->ctl, &perfmon->ib_dec_bytes,
23889 +                           &caam_fops_u64_ro);
23890 +       debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH,
23891 +                           ctrlpriv->ctl, &perfmon->ib_valid_bytes,
23892 +                           &caam_fops_u64_ro);
23893  
23894         /* Controller level - global status values */
23895 -       ctrlpriv->ctl_faultaddr =
23896 -               debugfs_create_file("fault_addr",
23897 -                                   S_IRUSR | S_IRGRP | S_IROTH,
23898 -                                   ctrlpriv->ctl, &perfmon->faultaddr,
23899 -                                   &caam_fops_u32_ro);
23900 -       ctrlpriv->ctl_faultdetail =
23901 -               debugfs_create_file("fault_detail",
23902 -                                   S_IRUSR | S_IRGRP | S_IROTH,
23903 -                                   ctrlpriv->ctl, &perfmon->faultdetail,
23904 -                                   &caam_fops_u32_ro);
23905 -       ctrlpriv->ctl_faultstatus =
23906 -               debugfs_create_file("fault_status",
23907 -                                   S_IRUSR | S_IRGRP | S_IROTH,
23908 -                                   ctrlpriv->ctl, &perfmon->status,
23909 -                                   &caam_fops_u32_ro);
23910 +       debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH,
23911 +                           ctrlpriv->ctl, &perfmon->faultaddr,
23912 +                           &caam_fops_u32_ro);
23913 +       debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH,
23914 +                           ctrlpriv->ctl, &perfmon->faultdetail,
23915 +                           &caam_fops_u32_ro);
23916 +       debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH,
23917 +                           ctrlpriv->ctl, &perfmon->status,
23918 +                           &caam_fops_u32_ro);
23919  
23920         /* Internal covering keys (useful in non-secure mode only) */
23921 -       ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
23922 +       ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
23923         ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
23924         ctrlpriv->ctl_kek = debugfs_create_blob("kek",
23925                                                 S_IRUSR |
23926 @@ -813,7 +796,7 @@ static int caam_probe(struct platform_de
23927                                                 ctrlpriv->ctl,
23928                                                 &ctrlpriv->ctl_kek_wrap);
23929  
23930 -       ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
23931 +       ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
23932         ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
23933         ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
23934                                                  S_IRUSR |
23935 @@ -821,7 +804,7 @@ static int caam_probe(struct platform_de
23936                                                  ctrlpriv->ctl,
23937                                                  &ctrlpriv->ctl_tkek_wrap);
23938  
23939 -       ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
23940 +       ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
23941         ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
23942         ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
23943                                                  S_IRUSR |
23944 @@ -832,13 +815,17 @@ static int caam_probe(struct platform_de
23945         return 0;
23946  
23947  caam_remove:
23948 +#ifdef CONFIG_DEBUG_FS
23949 +       debugfs_remove_recursive(ctrlpriv->dfs_root);
23950 +#endif
23951         caam_remove(pdev);
23952         return ret;
23953  
23954  iounmap_ctrl:
23955         iounmap(ctrl);
23956  disable_caam_emi_slow:
23957 -       clk_disable_unprepare(ctrlpriv->caam_emi_slow);
23958 +       if (ctrlpriv->caam_emi_slow)
23959 +               clk_disable_unprepare(ctrlpriv->caam_emi_slow);
23960  disable_caam_aclk:
23961         clk_disable_unprepare(ctrlpriv->caam_aclk);
23962  disable_caam_mem:
23963 @@ -848,17 +835,6 @@ disable_caam_ipg:
23964         return ret;
23965  }
23966  
23967 -static struct of_device_id caam_match[] = {
23968 -       {
23969 -               .compatible = "fsl,sec-v4.0",
23970 -       },
23971 -       {
23972 -               .compatible = "fsl,sec4.0",
23973 -       },
23974 -       {},
23975 -};
23976 -MODULE_DEVICE_TABLE(of, caam_match);
23977 -
23978  static struct platform_driver caam_driver = {
23979         .driver = {
23980                 .name = "caam",
23981 --- a/drivers/crypto/caam/ctrl.h
23982 +++ b/drivers/crypto/caam/ctrl.h
23983 @@ -10,4 +10,6 @@
23984  /* Prototypes for backend-level services exposed to APIs */
23985  int caam_get_era(void);
23986  
23987 +extern bool caam_dpaa2;
23988 +
23989  #endif /* CTRL_H */
23990 --- a/drivers/crypto/caam/desc.h
23991 +++ b/drivers/crypto/caam/desc.h
23992 @@ -22,12 +22,6 @@
23993  #define SEC4_SG_LEN_MASK       0x3fffffff      /* Excludes EXT and FINAL */
23994  #define SEC4_SG_OFFSET_MASK    0x00001fff
23995  
23996 -struct sec4_sg_entry {
23997 -       u64 ptr;
23998 -       u32 len;
23999 -       u32 bpid_offset;
24000 -};
24001 -
24002  /* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
24003  #define MAX_CAAM_DESCSIZE      64
24004  
24005 @@ -47,6 +41,7 @@ struct sec4_sg_entry {
24006  #define CMD_SEQ_LOAD           (0x03 << CMD_SHIFT)
24007  #define CMD_FIFO_LOAD          (0x04 << CMD_SHIFT)
24008  #define CMD_SEQ_FIFO_LOAD      (0x05 << CMD_SHIFT)
24009 +#define CMD_MOVEB              (0x07 << CMD_SHIFT)
24010  #define CMD_STORE              (0x0a << CMD_SHIFT)
24011  #define CMD_SEQ_STORE          (0x0b << CMD_SHIFT)
24012  #define CMD_FIFO_STORE         (0x0c << CMD_SHIFT)
24013 @@ -90,8 +85,8 @@ struct sec4_sg_entry {
24014  #define HDR_ZRO                        0x00008000
24015  
24016  /* Start Index or SharedDesc Length */
24017 -#define HDR_START_IDX_MASK     0x3f
24018  #define HDR_START_IDX_SHIFT    16
24019 +#define HDR_START_IDX_MASK     (0x3f << HDR_START_IDX_SHIFT)
24020  
24021  /* If shared descriptor header, 6-bit length */
24022  #define HDR_DESCLEN_SHR_MASK   0x3f
24023 @@ -121,10 +116,10 @@ struct sec4_sg_entry {
24024  #define HDR_PROP_DNR           0x00000800
24025  
24026  /* JobDesc/SharedDesc share property */
24027 -#define HDR_SD_SHARE_MASK      0x03
24028  #define HDR_SD_SHARE_SHIFT     8
24029 -#define HDR_JD_SHARE_MASK      0x07
24030 +#define HDR_SD_SHARE_MASK      (0x03 << HDR_SD_SHARE_SHIFT)
24031  #define HDR_JD_SHARE_SHIFT     8
24032 +#define HDR_JD_SHARE_MASK      (0x07 << HDR_JD_SHARE_SHIFT)
24033  
24034  #define HDR_SHARE_NEVER                (0x00 << HDR_SD_SHARE_SHIFT)
24035  #define HDR_SHARE_WAIT         (0x01 << HDR_SD_SHARE_SHIFT)
24036 @@ -235,7 +230,7 @@ struct sec4_sg_entry {
24037  #define LDST_SRCDST_WORD_DECO_MATH2    (0x0a << LDST_SRCDST_SHIFT)
24038  #define LDST_SRCDST_WORD_DECO_AAD_SZ   (0x0b << LDST_SRCDST_SHIFT)
24039  #define LDST_SRCDST_WORD_DECO_MATH3    (0x0b << LDST_SRCDST_SHIFT)
24040 -#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
24041 +#define LDST_SRCDST_WORD_CLASS1_IV_SZ  (0x0c << LDST_SRCDST_SHIFT)
24042  #define LDST_SRCDST_WORD_ALTDS_CLASS1  (0x0f << LDST_SRCDST_SHIFT)
24043  #define LDST_SRCDST_WORD_PKHA_A_SZ     (0x10 << LDST_SRCDST_SHIFT)
24044  #define LDST_SRCDST_WORD_PKHA_B_SZ     (0x11 << LDST_SRCDST_SHIFT)
24045 @@ -360,6 +355,7 @@ struct sec4_sg_entry {
24046  #define FIFOLD_TYPE_PK_N       (0x08 << FIFOLD_TYPE_SHIFT)
24047  #define FIFOLD_TYPE_PK_A       (0x0c << FIFOLD_TYPE_SHIFT)
24048  #define FIFOLD_TYPE_PK_B       (0x0d << FIFOLD_TYPE_SHIFT)
24049 +#define FIFOLD_TYPE_IFIFO      (0x0f << FIFOLD_TYPE_SHIFT)
24050  
24051  /* Other types. Need to OR in last/flush bits as desired */
24052  #define FIFOLD_TYPE_MSG_MASK   (0x38 << FIFOLD_TYPE_SHIFT)
24053 @@ -400,7 +396,7 @@ struct sec4_sg_entry {
24054  #define FIFOST_TYPE_PKHA_N      (0x08 << FIFOST_TYPE_SHIFT)
24055  #define FIFOST_TYPE_PKHA_A      (0x0c << FIFOST_TYPE_SHIFT)
24056  #define FIFOST_TYPE_PKHA_B      (0x0d << FIFOST_TYPE_SHIFT)
24057 -#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
24058 +#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
24059  #define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
24060  #define FIFOST_TYPE_PKHA_E_JKEK         (0x22 << FIFOST_TYPE_SHIFT)
24061  #define FIFOST_TYPE_PKHA_E_TKEK         (0x23 << FIFOST_TYPE_SHIFT)
24062 @@ -413,6 +409,7 @@ struct sec4_sg_entry {
24063  #define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
24064  #define FIFOST_TYPE_RNGSTORE    (0x34 << FIFOST_TYPE_SHIFT)
24065  #define FIFOST_TYPE_RNGFIFO     (0x35 << FIFOST_TYPE_SHIFT)
24066 +#define FIFOST_TYPE_METADATA    (0x3e << FIFOST_TYPE_SHIFT)
24067  #define FIFOST_TYPE_SKIP        (0x3f << FIFOST_TYPE_SHIFT)
24068  
24069  /*
24070 @@ -449,6 +446,18 @@ struct sec4_sg_entry {
24071  #define OP_PCLID_DSAVERIFY     (0x16 << OP_PCLID_SHIFT)
24072  #define OP_PCLID_RSAENC_PUBKEY  (0x18 << OP_PCLID_SHIFT)
24073  #define OP_PCLID_RSADEC_PRVKEY  (0x19 << OP_PCLID_SHIFT)
24074 +#define OP_PCLID_DKP_MD5       (0x20 << OP_PCLID_SHIFT)
24075 +#define OP_PCLID_DKP_SHA1      (0x21 << OP_PCLID_SHIFT)
24076 +#define OP_PCLID_DKP_SHA224    (0x22 << OP_PCLID_SHIFT)
24077 +#define OP_PCLID_DKP_SHA256    (0x23 << OP_PCLID_SHIFT)
24078 +#define OP_PCLID_DKP_SHA384    (0x24 << OP_PCLID_SHIFT)
24079 +#define OP_PCLID_DKP_SHA512    (0x25 << OP_PCLID_SHIFT)
24080 +#define OP_PCLID_DKP_RIF_MD5   (0x60 << OP_PCLID_SHIFT)
24081 +#define OP_PCLID_DKP_RIF_SHA1  (0x61 << OP_PCLID_SHIFT)
24082 +#define OP_PCLID_DKP_RIF_SHA224        (0x62 << OP_PCLID_SHIFT)
24083 +#define OP_PCLID_DKP_RIF_SHA256        (0x63 << OP_PCLID_SHIFT)
24084 +#define OP_PCLID_DKP_RIF_SHA384        (0x64 << OP_PCLID_SHIFT)
24085 +#define OP_PCLID_DKP_RIF_SHA512        (0x65 << OP_PCLID_SHIFT)
24086  
24087  /* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
24088  #define OP_PCLID_IPSEC         (0x01 << OP_PCLID_SHIFT)
24089 @@ -1098,6 +1107,22 @@ struct sec4_sg_entry {
24090  /* MacSec protinfos */
24091  #define OP_PCL_MACSEC                           0x0001
24092  
24093 +/* Derived Key Protocol (DKP) Protinfo */
24094 +#define OP_PCL_DKP_SRC_SHIFT   14
24095 +#define OP_PCL_DKP_SRC_MASK    (3 << OP_PCL_DKP_SRC_SHIFT)
24096 +#define OP_PCL_DKP_SRC_IMM     (0 << OP_PCL_DKP_SRC_SHIFT)
24097 +#define OP_PCL_DKP_SRC_SEQ     (1 << OP_PCL_DKP_SRC_SHIFT)
24098 +#define OP_PCL_DKP_SRC_PTR     (2 << OP_PCL_DKP_SRC_SHIFT)
24099 +#define OP_PCL_DKP_SRC_SGF     (3 << OP_PCL_DKP_SRC_SHIFT)
24100 +#define OP_PCL_DKP_DST_SHIFT   12
24101 +#define OP_PCL_DKP_DST_MASK    (3 << OP_PCL_DKP_DST_SHIFT)
24102 +#define OP_PCL_DKP_DST_IMM     (0 << OP_PCL_DKP_DST_SHIFT)
24103 +#define OP_PCL_DKP_DST_SEQ     (1 << OP_PCL_DKP_DST_SHIFT)
24104 +#define OP_PCL_DKP_DST_PTR     (2 << OP_PCL_DKP_DST_SHIFT)
24105 +#define OP_PCL_DKP_DST_SGF     (3 << OP_PCL_DKP_DST_SHIFT)
24106 +#define OP_PCL_DKP_KEY_SHIFT   0
24107 +#define OP_PCL_DKP_KEY_MASK    (0xfff << OP_PCL_DKP_KEY_SHIFT)
24108 +
24109  /* PKI unidirectional protocol protinfo bits */
24110  #define OP_PCL_PKPROT_TEST                      0x0008
24111  #define OP_PCL_PKPROT_DECRYPT                   0x0004
24112 @@ -1107,8 +1132,8 @@ struct sec4_sg_entry {
24113  /* For non-protocol/alg-only op commands */
24114  #define OP_ALG_TYPE_SHIFT      24
24115  #define OP_ALG_TYPE_MASK       (0x7 << OP_ALG_TYPE_SHIFT)
24116 -#define OP_ALG_TYPE_CLASS1     2
24117 -#define OP_ALG_TYPE_CLASS2     4
24118 +#define OP_ALG_TYPE_CLASS1     (2 << OP_ALG_TYPE_SHIFT)
24119 +#define OP_ALG_TYPE_CLASS2     (4 << OP_ALG_TYPE_SHIFT)
24120  
24121  #define OP_ALG_ALGSEL_SHIFT    16
24122  #define OP_ALG_ALGSEL_MASK     (0xff << OP_ALG_ALGSEL_SHIFT)
24123 @@ -1249,7 +1274,7 @@ struct sec4_sg_entry {
24124  #define OP_ALG_PKMODE_MOD_PRIMALITY    0x00f
24125  
24126  /* PKHA mode copy-memory functions */
24127 -#define OP_ALG_PKMODE_SRC_REG_SHIFT    13
24128 +#define OP_ALG_PKMODE_SRC_REG_SHIFT    17
24129  #define OP_ALG_PKMODE_SRC_REG_MASK     (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
24130  #define OP_ALG_PKMODE_DST_REG_SHIFT    10
24131  #define OP_ALG_PKMODE_DST_REG_MASK     (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
24132 @@ -1445,10 +1470,11 @@ struct sec4_sg_entry {
24133  #define MATH_SRC1_REG2         (0x02 << MATH_SRC1_SHIFT)
24134  #define MATH_SRC1_REG3         (0x03 << MATH_SRC1_SHIFT)
24135  #define MATH_SRC1_IMM          (0x04 << MATH_SRC1_SHIFT)
24136 -#define MATH_SRC1_DPOVRD       (0x07 << MATH_SRC0_SHIFT)
24137 +#define MATH_SRC1_DPOVRD       (0x07 << MATH_SRC1_SHIFT)
24138  #define MATH_SRC1_INFIFO       (0x0a << MATH_SRC1_SHIFT)
24139  #define MATH_SRC1_OUTFIFO      (0x0b << MATH_SRC1_SHIFT)
24140  #define MATH_SRC1_ONE          (0x0c << MATH_SRC1_SHIFT)
24141 +#define MATH_SRC1_ZERO         (0x0f << MATH_SRC1_SHIFT)
24142  
24143  /* Destination selectors */
24144  #define MATH_DEST_SHIFT                8
24145 @@ -1457,6 +1483,7 @@ struct sec4_sg_entry {
24146  #define MATH_DEST_REG1         (0x01 << MATH_DEST_SHIFT)
24147  #define MATH_DEST_REG2         (0x02 << MATH_DEST_SHIFT)
24148  #define MATH_DEST_REG3         (0x03 << MATH_DEST_SHIFT)
24149 +#define MATH_DEST_DPOVRD       (0x07 << MATH_DEST_SHIFT)
24150  #define MATH_DEST_SEQINLEN     (0x08 << MATH_DEST_SHIFT)
24151  #define MATH_DEST_SEQOUTLEN    (0x09 << MATH_DEST_SHIFT)
24152  #define MATH_DEST_VARSEQINLEN  (0x0a << MATH_DEST_SHIFT)
24153 @@ -1629,4 +1656,31 @@ struct sec4_sg_entry {
24154  /* Frame Descriptor Command for Replacement Job Descriptor */
24155  #define FD_CMD_REPLACE_JOB_DESC                                0x20000000
24156  
24157 +/* CHA Control Register bits */
24158 +#define CCTRL_RESET_CHA_ALL          0x1
24159 +#define CCTRL_RESET_CHA_AESA         0x2
24160 +#define CCTRL_RESET_CHA_DESA         0x4
24161 +#define CCTRL_RESET_CHA_AFHA         0x8
24162 +#define CCTRL_RESET_CHA_KFHA         0x10
24163 +#define CCTRL_RESET_CHA_SF8A         0x20
24164 +#define CCTRL_RESET_CHA_PKHA         0x40
24165 +#define CCTRL_RESET_CHA_MDHA         0x80
24166 +#define CCTRL_RESET_CHA_CRCA         0x100
24167 +#define CCTRL_RESET_CHA_RNG          0x200
24168 +#define CCTRL_RESET_CHA_SF9A         0x400
24169 +#define CCTRL_RESET_CHA_ZUCE         0x800
24170 +#define CCTRL_RESET_CHA_ZUCA         0x1000
24171 +#define CCTRL_UNLOAD_PK_A0           0x10000
24172 +#define CCTRL_UNLOAD_PK_A1           0x20000
24173 +#define CCTRL_UNLOAD_PK_A2           0x40000
24174 +#define CCTRL_UNLOAD_PK_A3           0x80000
24175 +#define CCTRL_UNLOAD_PK_B0           0x100000
24176 +#define CCTRL_UNLOAD_PK_B1           0x200000
24177 +#define CCTRL_UNLOAD_PK_B2           0x400000
24178 +#define CCTRL_UNLOAD_PK_B3           0x800000
24179 +#define CCTRL_UNLOAD_PK_N            0x1000000
24180 +#define CCTRL_UNLOAD_PK_A            0x4000000
24181 +#define CCTRL_UNLOAD_PK_B            0x8000000
24182 +#define CCTRL_UNLOAD_SBOX            0x10000000
24183 +
24184  #endif /* DESC_H */
24185 --- a/drivers/crypto/caam/desc_constr.h
24186 +++ b/drivers/crypto/caam/desc_constr.h
24187 @@ -4,6 +4,9 @@
24188   * Copyright 2008-2012 Freescale Semiconductor, Inc.
24189   */
24190  
24191 +#ifndef DESC_CONSTR_H
24192 +#define DESC_CONSTR_H
24193 +
24194  #include "desc.h"
24195  #include "regs.h"
24196  
24197 @@ -33,38 +36,39 @@
24198  
24199  extern bool caam_little_end;
24200  
24201 -static inline int desc_len(u32 *desc)
24202 +static inline int desc_len(u32 * const desc)
24203  {
24204         return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
24205  }
24206  
24207 -static inline int desc_bytes(void *desc)
24208 +static inline int desc_bytes(void * const desc)
24209  {
24210         return desc_len(desc) * CAAM_CMD_SZ;
24211  }
24212  
24213 -static inline u32 *desc_end(u32 *desc)
24214 +static inline u32 *desc_end(u32 * const desc)
24215  {
24216         return desc + desc_len(desc);
24217  }
24218  
24219 -static inline void *sh_desc_pdb(u32 *desc)
24220 +static inline void *sh_desc_pdb(u32 * const desc)
24221  {
24222         return desc + 1;
24223  }
24224  
24225 -static inline void init_desc(u32 *desc, u32 options)
24226 +static inline void init_desc(u32 * const desc, u32 options)
24227  {
24228         *desc = cpu_to_caam32((options | HDR_ONE) + 1);
24229  }
24230  
24231 -static inline void init_sh_desc(u32 *desc, u32 options)
24232 +static inline void init_sh_desc(u32 * const desc, u32 options)
24233  {
24234         PRINT_POS;
24235         init_desc(desc, CMD_SHARED_DESC_HDR | options);
24236  }
24237  
24238 -static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
24239 +static inline void init_sh_desc_pdb(u32 * const desc, u32 options,
24240 +                                   size_t pdb_bytes)
24241  {
24242         u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
24243  
24244 @@ -72,19 +76,20 @@ static inline void init_sh_desc_pdb(u32
24245                      options);
24246  }
24247  
24248 -static inline void init_job_desc(u32 *desc, u32 options)
24249 +static inline void init_job_desc(u32 * const desc, u32 options)
24250  {
24251         init_desc(desc, CMD_DESC_HDR | options);
24252  }
24253  
24254 -static inline void init_job_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
24255 +static inline void init_job_desc_pdb(u32 * const desc, u32 options,
24256 +                                    size_t pdb_bytes)
24257  {
24258         u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
24259  
24260         init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options);
24261  }
24262  
24263 -static inline void append_ptr(u32 *desc, dma_addr_t ptr)
24264 +static inline void append_ptr(u32 * const desc, dma_addr_t ptr)
24265  {
24266         dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
24267  
24268 @@ -94,8 +99,8 @@ static inline void append_ptr(u32 *desc,
24269                                 CAAM_PTR_SZ / CAAM_CMD_SZ);
24270  }
24271  
24272 -static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
24273 -                                       u32 options)
24274 +static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr,
24275 +                                       int len, u32 options)
24276  {
24277         PRINT_POS;
24278         init_job_desc(desc, HDR_SHARED | options |
24279 @@ -103,7 +108,7 @@ static inline void init_job_desc_shared(
24280         append_ptr(desc, ptr);
24281  }
24282  
24283 -static inline void append_data(u32 *desc, void *data, int len)
24284 +static inline void append_data(u32 * const desc, const void *data, int len)
24285  {
24286         u32 *offset = desc_end(desc);
24287  
24288 @@ -114,7 +119,7 @@ static inline void append_data(u32 *desc
24289                                 (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ);
24290  }
24291  
24292 -static inline void append_cmd(u32 *desc, u32 command)
24293 +static inline void append_cmd(u32 * const desc, u32 command)
24294  {
24295         u32 *cmd = desc_end(desc);
24296  
24297 @@ -125,7 +130,7 @@ static inline void append_cmd(u32 *desc,
24298  
24299  #define append_u32 append_cmd
24300  
24301 -static inline void append_u64(u32 *desc, u64 data)
24302 +static inline void append_u64(u32 * const desc, u64 data)
24303  {
24304         u32 *offset = desc_end(desc);
24305  
24306 @@ -142,14 +147,14 @@ static inline void append_u64(u32 *desc,
24307  }
24308  
24309  /* Write command without affecting header, and return pointer to next word */
24310 -static inline u32 *write_cmd(u32 *desc, u32 command)
24311 +static inline u32 *write_cmd(u32 * const desc, u32 command)
24312  {
24313         *desc = cpu_to_caam32(command);
24314  
24315         return desc + 1;
24316  }
24317  
24318 -static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
24319 +static inline void append_cmd_ptr(u32 * const desc, dma_addr_t ptr, int len,
24320                                   u32 command)
24321  {
24322         append_cmd(desc, command | len);
24323 @@ -157,7 +162,7 @@ static inline void append_cmd_ptr(u32 *d
24324  }
24325  
24326  /* Write length after pointer, rather than inside command */
24327 -static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
24328 +static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr,
24329                                          unsigned int len, u32 command)
24330  {
24331         append_cmd(desc, command);
24332 @@ -166,7 +171,7 @@ static inline void append_cmd_ptr_extlen
24333         append_cmd(desc, len);
24334  }
24335  
24336 -static inline void append_cmd_data(u32 *desc, void *data, int len,
24337 +static inline void append_cmd_data(u32 * const desc, const void *data, int len,
24338                                    u32 command)
24339  {
24340         append_cmd(desc, command | IMMEDIATE | len);
24341 @@ -174,7 +179,7 @@ static inline void append_cmd_data(u32 *
24342  }
24343  
24344  #define APPEND_CMD_RET(cmd, op) \
24345 -static inline u32 *append_##cmd(u32 *desc, u32 options) \
24346 +static inline u32 *append_##cmd(u32 * const desc, u32 options) \
24347  { \
24348         u32 *cmd = desc_end(desc); \
24349         PRINT_POS; \
24350 @@ -183,14 +188,15 @@ static inline u32 *append_##cmd(u32 *des
24351  }
24352  APPEND_CMD_RET(jump, JUMP)
24353  APPEND_CMD_RET(move, MOVE)
24354 +APPEND_CMD_RET(moveb, MOVEB)
24355  
24356 -static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
24357 +static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
24358  {
24359         *jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) |
24360                                   (desc_len(desc) - (jump_cmd - desc)));
24361  }
24362  
24363 -static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
24364 +static inline void set_move_tgt_here(u32 * const desc, u32 *move_cmd)
24365  {
24366         u32 val = caam32_to_cpu(*move_cmd);
24367  
24368 @@ -200,7 +206,7 @@ static inline void set_move_tgt_here(u32
24369  }
24370  
24371  #define APPEND_CMD(cmd, op) \
24372 -static inline void append_##cmd(u32 *desc, u32 options) \
24373 +static inline void append_##cmd(u32 * const desc, u32 options) \
24374  { \
24375         PRINT_POS; \
24376         append_cmd(desc, CMD_##op | options); \
24377 @@ -208,7 +214,8 @@ static inline void append_##cmd(u32 *des
24378  APPEND_CMD(operation, OPERATION)
24379  
24380  #define APPEND_CMD_LEN(cmd, op) \
24381 -static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
24382 +static inline void append_##cmd(u32 * const desc, unsigned int len, \
24383 +                               u32 options) \
24384  { \
24385         PRINT_POS; \
24386         append_cmd(desc, CMD_##op | len | options); \
24387 @@ -220,8 +227,8 @@ APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_L
24388  APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
24389  
24390  #define APPEND_CMD_PTR(cmd, op) \
24391 -static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
24392 -                               u32 options) \
24393 +static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
24394 +                               unsigned int len, u32 options) \
24395  { \
24396         PRINT_POS; \
24397         append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
24398 @@ -231,8 +238,8 @@ APPEND_CMD_PTR(load, LOAD)
24399  APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
24400  APPEND_CMD_PTR(fifo_store, FIFO_STORE)
24401  
24402 -static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
24403 -                               u32 options)
24404 +static inline void append_store(u32 * const desc, dma_addr_t ptr,
24405 +                               unsigned int len, u32 options)
24406  {
24407         u32 cmd_src;
24408  
24409 @@ -249,7 +256,8 @@ static inline void append_store(u32 *des
24410  }
24411  
24412  #define APPEND_SEQ_PTR_INTLEN(cmd, op) \
24413 -static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \
24414 +static inline void append_seq_##cmd##_ptr_intlen(u32 * const desc, \
24415 +                                                dma_addr_t ptr, \
24416                                                  unsigned int len, \
24417                                                  u32 options) \
24418  { \
24419 @@ -263,7 +271,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
24420  APPEND_SEQ_PTR_INTLEN(out, OUT)
24421  
24422  #define APPEND_CMD_PTR_TO_IMM(cmd, op) \
24423 -static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
24424 +static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
24425                                          unsigned int len, u32 options) \
24426  { \
24427         PRINT_POS; \
24428 @@ -273,7 +281,7 @@ APPEND_CMD_PTR_TO_IMM(load, LOAD);
24429  APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
24430  
24431  #define APPEND_CMD_PTR_EXTLEN(cmd, op) \
24432 -static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \
24433 +static inline void append_##cmd##_extlen(u32 * const desc, dma_addr_t ptr, \
24434                                          unsigned int len, u32 options) \
24435  { \
24436         PRINT_POS; \
24437 @@ -287,7 +295,7 @@ APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_O
24438   * the size of its type
24439   */
24440  #define APPEND_CMD_PTR_LEN(cmd, op, type) \
24441 -static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \
24442 +static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
24443                                 type len, u32 options) \
24444  { \
24445         PRINT_POS; \
24446 @@ -304,7 +312,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_
24447   * from length of immediate data provided, e.g., split keys
24448   */
24449  #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
24450 -static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
24451 +static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
24452                                          unsigned int data_len, \
24453                                          unsigned int len, u32 options) \
24454  { \
24455 @@ -315,7 +323,7 @@ static inline void append_##cmd##_as_imm
24456  APPEND_CMD_PTR_TO_IMM2(key, KEY);
24457  
24458  #define APPEND_CMD_RAW_IMM(cmd, op, type) \
24459 -static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
24460 +static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \
24461                                              u32 options) \
24462  { \
24463         PRINT_POS; \
24464 @@ -426,3 +434,107 @@ do { \
24465         APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
24466  #define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
24467         APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
24468 +
24469 +/**
24470 + * struct alginfo - Container for algorithm details
24471 + * @algtype: algorithm selector; for valid values, see documentation of the
24472 + *           functions where it is used.
24473 + * @keylen: length of the provided algorithm key, in bytes
24474 + * @keylen_pad: padded length of the provided algorithm key, in bytes
24475 + * @key: address where algorithm key resides; virtual address if key_inline
24476 + *       is true, dma (bus) address if key_inline is false.
24477 + * @key_inline: true - key can be inlined in the descriptor; false - key is
24478 + *              referenced by the descriptor
24479 + */
24480 +struct alginfo {
24481 +       u32 algtype;
24482 +       unsigned int keylen;
24483 +       unsigned int keylen_pad;
24484 +       union {
24485 +               dma_addr_t key_dma;
24486 +               const void *key_virt;
24487 +       };
24488 +       bool key_inline;
24489 +};
24490 +
24491 +/**
24492 + * desc_inline_query() - Provide indications on which data items can be inlined
24493 + *                       and which shall be referenced in a shared descriptor.
24494 + * @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
24495 + *               excluding the data items to be inlined (or corresponding
24496 + *               pointer if an item is not inlined). Each cnstr_* function that
24497 + *               generates descriptors should have a define mentioning
24498 + *               corresponding length.
24499 + * @jd_len: Maximum length of the job descriptor(s) that will be used
24500 + *          together with the shared descriptor.
24501 + * @data_len: Array of lengths of the data items trying to be inlined
24502 + * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
24503 + *            otherwise.
24504 + * @count: Number of data items (size of @data_len array); must be <= 32
24505 + *
24506 + * Return: 0 if data can be inlined / referenced, negative value if not. If 0,
24507 + *         check @inl_mask for details.
24508 + */
24509 +static inline int desc_inline_query(unsigned int sd_base_len,
24510 +                                   unsigned int jd_len, unsigned int *data_len,
24511 +                                   u32 *inl_mask, unsigned int count)
24512 +{
24513 +       int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len);
24514 +       unsigned int i;
24515 +
24516 +       *inl_mask = 0;
24517 +       for (i = 0; (i < count) && (rem_bytes > 0); i++) {
24518 +               if (rem_bytes - (int)(data_len[i] +
24519 +                       (count - i - 1) * CAAM_PTR_SZ) >= 0) {
24520 +                       rem_bytes -= data_len[i];
24521 +                       *inl_mask |= (1 << i);
24522 +               } else {
24523 +                       rem_bytes -= CAAM_PTR_SZ;
24524 +               }
24525 +       }
24526 +
24527 +       return (rem_bytes >= 0) ? 0 : -1;
24528 +}
24529 +
24530 +/**
24531 + * append_proto_dkp - Derived Key Protocol (DKP): key -> split key
24532 + * @desc: pointer to buffer used for descriptor construction
24533 + * @adata: pointer to authentication transform definitions.
24534 + *         keylen should be the length of initial key, while keylen_pad
24535 + *         the length of the derived (split) key.
24536 + *         Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
24537 + *         SHA256, SHA384, SHA512}.
24538 + */
24539 +static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
24540 +{
24541 +       u32 protid;
24542 +
24543 +       /*
24544 +        * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*}
24545 +        * to OP_PCLID_DKP_{MD5, SHA*}
24546 +        */
24547 +       protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) |
24548 +                (0x20 << OP_ALG_ALGSEL_SHIFT);
24549 +
24550 +       if (adata->key_inline) {
24551 +               int words;
24552 +
24553 +               append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
24554 +                                OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
24555 +                                adata->keylen);
24556 +               append_data(desc, adata->key_virt, adata->keylen);
24557 +
24558 +               /* Reserve space in descriptor buffer for the derived key */
24559 +               words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
24560 +                        ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
24561 +               if (words)
24562 +                       (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
24563 +       } else {
24564 +               append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
24565 +                                OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR |
24566 +                                adata->keylen);
24567 +               append_ptr(desc, adata->key_dma);
24568 +       }
24569 +}
24570 +
24571 +#endif /* DESC_CONSTR_H */
24572 --- /dev/null
24573 +++ b/drivers/crypto/caam/dpseci.c
24574 @@ -0,0 +1,859 @@
24575 +/*
24576 + * Copyright 2013-2016 Freescale Semiconductor Inc.
24577 + * Copyright 2017 NXP
24578 + *
24579 + * Redistribution and use in source and binary forms, with or without
24580 + * modification, are permitted provided that the following conditions are met:
24581 + *     * Redistributions of source code must retain the above copyright
24582 + *      notice, this list of conditions and the following disclaimer.
24583 + *     * Redistributions in binary form must reproduce the above copyright
24584 + *      notice, this list of conditions and the following disclaimer in the
24585 + *      documentation and/or other materials provided with the distribution.
24586 + *     * Neither the names of the above-listed copyright holders nor the
24587 + *      names of any contributors may be used to endorse or promote products
24588 + *      derived from this software without specific prior written permission.
24589 + *
24590 + *
24591 + * ALTERNATIVELY, this software may be distributed under the terms of the
24592 + * GNU General Public License ("GPL") as published by the Free Software
24593 + * Foundation, either version 2 of that License or (at your option) any
24594 + * later version.
24595 + *
24596 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24597 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24598 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24599 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
24600 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24601 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24602 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24603 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24604 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24605 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
24606 + * POSSIBILITY OF SUCH DAMAGE.
24607 + */
24608 +
24609 +#include "../../../drivers/staging/fsl-mc/include/mc-sys.h"
24610 +#include "../../../drivers/staging/fsl-mc/include/mc-cmd.h"
24611 +#include "../../../drivers/staging/fsl-mc/include/dpopr.h"
24612 +#include "dpseci.h"
24613 +#include "dpseci_cmd.h"
24614 +
24615 +/**
24616 + * dpseci_open() - Open a control session for the specified object
24617 + * @mc_io:     Pointer to MC portal's I/O object
24618 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24619 + * @dpseci_id: DPSECI unique ID
24620 + * @token:     Returned token; use in subsequent API calls
24621 + *
24622 + * This function can be used to open a control session for an already created
24623 + * object; an object may have been declared in the DPL or by calling the
24624 + * dpseci_create() function.
24625 + * This function returns a unique authentication token, associated with the
24626 + * specific object ID and the specific MC portal; this token must be used in all
24627 + * subsequent commands for this specific object.
24628 + *
24629 + * Return:     '0' on success, error code otherwise
24630 + */
24631 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
24632 +               u16 *token)
24633 +{
24634 +       struct mc_command cmd = { 0 };
24635 +       struct dpseci_cmd_open *cmd_params;
24636 +       int err;
24637 +
24638 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
24639 +                                         cmd_flags,
24640 +                                         0);
24641 +       cmd_params = (struct dpseci_cmd_open *)cmd.params;
24642 +       cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
24643 +       err = mc_send_command(mc_io, &cmd);
24644 +       if (err)
24645 +               return err;
24646 +
24647 +       *token = mc_cmd_hdr_read_token(&cmd);
24648 +
24649 +       return 0;
24650 +}
24651 +
24652 +/**
24653 + * dpseci_close() - Close the control session of the object
24654 + * @mc_io:     Pointer to MC portal's I/O object
24655 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24656 + * @token:     Token of DPSECI object
24657 + *
24658 + * After this function is called, no further operations are allowed on the
24659 + * object without opening a new control session.
24660 + *
24661 + * Return:     '0' on success, error code otherwise
24662 + */
24663 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
24664 +{
24665 +       struct mc_command cmd = { 0 };
24666 +
24667 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
24668 +                                         cmd_flags,
24669 +                                         token);
24670 +       return mc_send_command(mc_io, &cmd);
24671 +}
24672 +
24673 +/**
24674 + * dpseci_create() - Create the DPSECI object
24675 + * @mc_io:     Pointer to MC portal's I/O object
24676 + * @dprc_token:        Parent container token; '0' for default container
24677 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24678 + * @cfg:       Configuration structure
24679 + * @obj_id:    returned object id
24680 + *
24681 + * Create the DPSECI object, allocate required resources and perform required
24682 + * initialization.
24683 + *
24684 + * The object can be created either by declaring it in the DPL file, or by
24685 + * calling this function.
24686 + *
24687 + * The function accepts an authentication token of a parent container that this
24688 + * object should be assigned to. The token can be '0' so the object will be
24689 + * assigned to the default container.
24690 + * The newly created object can be opened with the returned object id and using
24691 + * the container's associated tokens and MC portals.
24692 + *
24693 + * Return:     '0' on success, error code otherwise
24694 + */
24695 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
24696 +                 const struct dpseci_cfg *cfg, u32 *obj_id)
24697 +{
24698 +       struct mc_command cmd = { 0 };
24699 +       struct dpseci_cmd_create *cmd_params;
24700 +       int i, err;
24701 +
24702 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
24703 +                                         cmd_flags,
24704 +                                         dprc_token);
24705 +       cmd_params = (struct dpseci_cmd_create *)cmd.params;
24706 +       for (i = 0; i < 8; i++)
24707 +               cmd_params->priorities[i] = cfg->priorities[i];
24708 +       cmd_params->num_tx_queues = cfg->num_tx_queues;
24709 +       cmd_params->num_rx_queues = cfg->num_rx_queues;
24710 +       cmd_params->options = cpu_to_le32(cfg->options);
24711 +       err = mc_send_command(mc_io, &cmd);
24712 +       if (err)
24713 +               return err;
24714 +
24715 +       *obj_id = mc_cmd_read_object_id(&cmd);
24716 +
24717 +       return 0;
24718 +}
24719 +
24720 +/**
24721 + * dpseci_destroy() - Destroy the DPSECI object and release all its resources
24722 + * @mc_io:     Pointer to MC portal's I/O object
24723 + * @dprc_token: Parent container token; '0' for default container
24724 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24725 + * @object_id: The object id; it must be a valid id within the container that
24726 + *             created this object
24727 + *
24728 + * The function accepts the authentication token of the parent container that
24729 + * created the object (not the one that currently owns the object). The object
24730 + * is searched within parent using the provided 'object_id'.
24731 + * All tokens to the object must be closed before calling destroy.
24732 + *
24733 + * Return:     '0' on success, error code otherwise
24734 + */
24735 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
24736 +                  u32 object_id)
24737 +{
24738 +       struct mc_command cmd = { 0 };
24739 +       struct dpseci_cmd_destroy *cmd_params;
24740 +
24741 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
24742 +                                         cmd_flags,
24743 +                                         dprc_token);
24744 +       cmd_params = (struct dpseci_cmd_destroy *)cmd.params;
24745 +       cmd_params->object_id = cpu_to_le32(object_id);
24746 +
24747 +       return mc_send_command(mc_io, &cmd);
24748 +}
24749 +
24750 +/**
24751 + * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
24752 + * @mc_io:     Pointer to MC portal's I/O object
24753 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24754 + * @token:     Token of DPSECI object
24755 + *
24756 + * Return:     '0' on success, error code otherwise
24757 + */
24758 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
24759 +{
24760 +       struct mc_command cmd = { 0 };
24761 +
24762 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
24763 +                                         cmd_flags,
24764 +                                         token);
24765 +       return mc_send_command(mc_io, &cmd);
24766 +}
24767 +
24768 +/**
24769 + * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames
24770 + * @mc_io:     Pointer to MC portal's I/O object
24771 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24772 + * @token:     Token of DPSECI object
24773 + *
24774 + * Return:     '0' on success, error code otherwise
24775 + */
24776 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
24777 +{
24778 +       struct mc_command cmd = { 0 };
24779 +
24780 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
24781 +                                         cmd_flags,
24782 +                                         token);
24783 +
24784 +       return mc_send_command(mc_io, &cmd);
24785 +}
24786 +
24787 +/**
24788 + * dpseci_is_enabled() - Check if the DPSECI is enabled.
24789 + * @mc_io:     Pointer to MC portal's I/O object
24790 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24791 + * @token:     Token of DPSECI object
24792 + * @en:                Returns '1' if object is enabled; '0' otherwise
24793 + *
24794 + * Return:     '0' on success, error code otherwise
24795 + */
24796 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
24797 +                     int *en)
24798 +{
24799 +       struct mc_command cmd = { 0 };
24800 +       struct dpseci_rsp_is_enabled *rsp_params;
24801 +       int err;
24802 +
24803 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
24804 +                                         cmd_flags,
24805 +                                         token);
24806 +       err = mc_send_command(mc_io, &cmd);
24807 +       if (err)
24808 +               return err;
24809 +
24810 +       rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
24811 +       *en = le32_to_cpu(rsp_params->is_enabled);
24812 +
24813 +       return 0;
24814 +}
24815 +
24816 +/**
24817 + * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
24818 + * @mc_io:     Pointer to MC portal's I/O object
24819 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24820 + * @token:     Token of DPSECI object
24821 + *
24822 + * Return:     '0' on success, error code otherwise
24823 + */
24824 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
24825 +{
24826 +       struct mc_command cmd = { 0 };
24827 +
24828 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
24829 +                                         cmd_flags,
24830 +                                         token);
24831 +
24832 +       return mc_send_command(mc_io, &cmd);
24833 +}
24834 +
24835 +/**
24836 + * dpseci_get_irq_enable() - Get overall interrupt state
24837 + * @mc_io:     Pointer to MC portal's I/O object
24838 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24839 + * @token:     Token of DPSECI object
24840 + * @irq_index: The interrupt index to configure
24841 + * @en:                Returned Interrupt state - enable = 1, disable = 0
24842 + *
24843 + * Return:     '0' on success, error code otherwise
24844 + */
24845 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
24846 +                         u8 irq_index, u8 *en)
24847 +{
24848 +       struct mc_command cmd = { 0 };
24849 +       struct dpseci_cmd_irq_enable *cmd_params;
24850 +       struct dpseci_rsp_get_irq_enable *rsp_params;
24851 +       int err;
24852 +
24853 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE,
24854 +                                         cmd_flags,
24855 +                                         token);
24856 +       cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
24857 +       cmd_params->irq_index = irq_index;
24858 +       err = mc_send_command(mc_io, &cmd);
24859 +       if (err)
24860 +               return err;
24861 +
24862 +       rsp_params = (struct dpseci_rsp_get_irq_enable *)cmd.params;
24863 +       *en = rsp_params->enable_state;
24864 +
24865 +       return 0;
24866 +}
24867 +
24868 +/**
24869 + * dpseci_set_irq_enable() - Set overall interrupt state.
24870 + * @mc_io:     Pointer to MC portal's I/O object
24871 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24872 + * @token:     Token of DPSECI object
24873 + * @irq_index: The interrupt index to configure
24874 + * @en:                Interrupt state - enable = 1, disable = 0
24875 + *
24876 + * Allows GPP software to control when interrupts are generated.
24877 + * Each interrupt can have up to 32 causes. The enable/disable control's the
24878 + * overall interrupt state. If the interrupt is disabled no causes will cause
24879 + * an interrupt.
24880 + *
24881 + * Return:     '0' on success, error code otherwise
24882 + */
24883 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
24884 +                         u8 irq_index, u8 en)
24885 +{
24886 +       struct mc_command cmd = { 0 };
24887 +       struct dpseci_cmd_irq_enable *cmd_params;
24888 +
24889 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
24890 +                                         cmd_flags,
24891 +                                         token);
24892 +       cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
24893 +       cmd_params->irq_index = irq_index;
24894 +       cmd_params->enable_state = en;
24895 +
24896 +       return mc_send_command(mc_io, &cmd);
24897 +}
24898 +
24899 +/**
24900 + * dpseci_get_irq_mask() - Get interrupt mask.
24901 + * @mc_io:     Pointer to MC portal's I/O object
24902 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24903 + * @token:     Token of DPSECI object
24904 + * @irq_index: The interrupt index to configure
24905 + * @mask:      Returned event mask to trigger interrupt
24906 + *
24907 + * Every interrupt can have up to 32 causes and the interrupt model supports
24908 + * masking/unmasking each cause independently.
24909 + *
24910 + * Return:     '0' on success, error code otherwise
24911 + */
24912 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
24913 +                       u8 irq_index, u32 *mask)
24914 +{
24915 +       struct mc_command cmd = { 0 };
24916 +       struct dpseci_cmd_irq_mask *cmd_params;
24917 +       int err;
24918 +
24919 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK,
24920 +                                         cmd_flags,
24921 +                                         token);
24922 +       cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
24923 +       cmd_params->irq_index = irq_index;
24924 +       err = mc_send_command(mc_io, &cmd);
24925 +       if (err)
24926 +               return err;
24927 +
24928 +       *mask = le32_to_cpu(cmd_params->mask);
24929 +
24930 +       return 0;
24931 +}
24932 +
24933 +/**
24934 + * dpseci_set_irq_mask() - Set interrupt mask.
24935 + * @mc_io:     Pointer to MC portal's I/O object
24936 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24937 + * @token:     Token of DPSECI object
24938 + * @irq_index: The interrupt index to configure
24939 + * @mask:      event mask to trigger interrupt;
24940 + *             each bit:
24941 + *                     0 = ignore event
24942 + *                     1 = consider event for asserting IRQ
24943 + *
24944 + * Every interrupt can have up to 32 causes and the interrupt model supports
24945 + * masking/unmasking each cause independently
24946 + *
24947 + * Return:     '0' on success, error code otherwise
24948 + */
24949 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
24950 +                       u8 irq_index, u32 mask)
24951 +{
24952 +       struct mc_command cmd = { 0 };
24953 +       struct dpseci_cmd_irq_mask *cmd_params;
24954 +
24955 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
24956 +                                         cmd_flags,
24957 +                                         token);
24958 +       cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
24959 +       cmd_params->mask = cpu_to_le32(mask);
24960 +       cmd_params->irq_index = irq_index;
24961 +
24962 +       return mc_send_command(mc_io, &cmd);
24963 +}
24964 +
24965 +/**
24966 + * dpseci_get_irq_status() - Get the current status of any pending interrupts
24967 + * @mc_io:     Pointer to MC portal's I/O object
24968 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24969 + * @token:     Token of DPSECI object
24970 + * @irq_index: The interrupt index to configure
24971 + * @status:    Returned interrupts status - one bit per cause:
24972 + *                     0 = no interrupt pending
24973 + *                     1 = interrupt pending
24974 + *
24975 + * Return:     '0' on success, error code otherwise
24976 + */
24977 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
24978 +                         u8 irq_index, u32 *status)
24979 +{
24980 +       struct mc_command cmd = { 0 };
24981 +       struct dpseci_cmd_irq_status *cmd_params;
24982 +       int err;
24983 +
24984 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS,
24985 +                                         cmd_flags,
24986 +                                         token);
24987 +       cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
24988 +       cmd_params->status = cpu_to_le32(*status);
24989 +       cmd_params->irq_index = irq_index;
24990 +       err = mc_send_command(mc_io, &cmd);
24991 +       if (err)
24992 +               return err;
24993 +
24994 +       *status = le32_to_cpu(cmd_params->status);
24995 +
24996 +       return 0;
24997 +}
24998 +
24999 +/**
25000 + * dpseci_clear_irq_status() - Clear a pending interrupt's status
25001 + * @mc_io:     Pointer to MC portal's I/O object
25002 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25003 + * @token:     Token of DPSECI object
25004 + * @irq_index: The interrupt index to configure
25005 + * @status:    bits to clear (W1C) - one bit per cause:
25006 + *                     0 = don't change
25007 + *                     1 = clear status bit
25008 + *
25009 + * Return:     '0' on success, error code otherwise
25010 + */
25011 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25012 +                           u8 irq_index, u32 status)
25013 +{
25014 +       struct mc_command cmd = { 0 };
25015 +       struct dpseci_cmd_irq_status *cmd_params;
25016 +
25017 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
25018 +                                         cmd_flags,
25019 +                                         token);
25020 +       cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
25021 +       cmd_params->status = cpu_to_le32(status);
25022 +       cmd_params->irq_index = irq_index;
25023 +
25024 +       return mc_send_command(mc_io, &cmd);
25025 +}
25026 +
25027 +/**
25028 + * dpseci_get_attributes() - Retrieve DPSECI attributes
25029 + * @mc_io:     Pointer to MC portal's I/O object
25030 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25031 + * @token:     Token of DPSECI object
25032 + * @attr:      Returned object's attributes
25033 + *
25034 + * Return:     '0' on success, error code otherwise
25035 + */
25036 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25037 +                         struct dpseci_attr *attr)
25038 +{
25039 +       struct mc_command cmd = { 0 };
25040 +       struct dpseci_rsp_get_attributes *rsp_params;
25041 +       int err;
25042 +
25043 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
25044 +                                         cmd_flags,
25045 +                                         token);
25046 +       err = mc_send_command(mc_io, &cmd);
25047 +       if (err)
25048 +               return err;
25049 +
25050 +       rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params;
25051 +       attr->id = le32_to_cpu(rsp_params->id);
25052 +       attr->num_tx_queues = rsp_params->num_tx_queues;
25053 +       attr->num_rx_queues = rsp_params->num_rx_queues;
25054 +       attr->options = le32_to_cpu(rsp_params->options);
25055 +
25056 +       return 0;
25057 +}
25058 +
25059 +/**
25060 + * dpseci_set_rx_queue() - Set Rx queue configuration
25061 + * @mc_io:     Pointer to MC portal's I/O object
25062 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25063 + * @token:     Token of DPSECI object
25064 + * @queue:     Select the queue relative to number of priorities configured at
25065 + *             DPSECI creation; use DPSECI_ALL_QUEUES to configure all
25066 + *             Rx queues identically.
25067 + * @cfg:       Rx queue configuration
25068 + *
25069 + * Return:     '0' on success, error code otherwise
25070 + */
25071 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25072 +                       u8 queue, const struct dpseci_rx_queue_cfg *cfg)
25073 +{
25074 +       struct mc_command cmd = { 0 };
25075 +       struct dpseci_cmd_queue *cmd_params;
25076 +
25077 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
25078 +                                         cmd_flags,
25079 +                                         token);
25080 +       cmd_params = (struct dpseci_cmd_queue *)cmd.params;
25081 +       cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
25082 +       cmd_params->priority = cfg->dest_cfg.priority;
25083 +       cmd_params->queue = queue;
25084 +       cmd_params->dest_type = cfg->dest_cfg.dest_type;
25085 +       cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
25086 +       cmd_params->options = cpu_to_le32(cfg->options);
25087 +       cmd_params->order_preservation_en =
25088 +               cpu_to_le32(cfg->order_preservation_en);
25089 +
25090 +       return mc_send_command(mc_io, &cmd);
25091 +}
25092 +
25093 +/**
25094 + * dpseci_get_rx_queue() - Retrieve Rx queue attributes
25095 + * @mc_io:     Pointer to MC portal's I/O object
25096 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25097 + * @token:     Token of DPSECI object
25098 + * @queue:     Select the queue relative to number of priorities configured at
25099 + *             DPSECI creation
25100 + * @attr:      Returned Rx queue attributes
25101 + *
25102 + * Return:     '0' on success, error code otherwise
25103 + */
25104 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25105 +                       u8 queue, struct dpseci_rx_queue_attr *attr)
25106 +{
25107 +       struct mc_command cmd = { 0 };
25108 +       struct dpseci_cmd_queue *cmd_params;
25109 +       int err;
25110 +
25111 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
25112 +                                         cmd_flags,
25113 +                                         token);
25114 +       cmd_params = (struct dpseci_cmd_queue *)cmd.params;
25115 +       cmd_params->queue = queue;
25116 +       err = mc_send_command(mc_io, &cmd);
25117 +       if (err)
25118 +               return err;
25119 +
25120 +       attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
25121 +       attr->dest_cfg.priority = cmd_params->priority;
25122 +       attr->dest_cfg.dest_type = cmd_params->dest_type;
25123 +       attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
25124 +       attr->fqid = le32_to_cpu(cmd_params->fqid);
25125 +       attr->order_preservation_en =
25126 +               le32_to_cpu(cmd_params->order_preservation_en);
25127 +
25128 +       return 0;
25129 +}
25130 +
25131 +/**
25132 + * dpseci_get_tx_queue() - Retrieve Tx queue attributes
25133 + * @mc_io:     Pointer to MC portal's I/O object
25134 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25135 + * @token:     Token of DPSECI object
25136 + * @queue:     Select the queue relative to number of priorities configured at
25137 + *             DPSECI creation
25138 + * @attr:      Returned Tx queue attributes
25139 + *
25140 + * Return:     '0' on success, error code otherwise
25141 + */
25142 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25143 +                       u8 queue, struct dpseci_tx_queue_attr *attr)
25144 +{
25145 +       struct mc_command cmd = { 0 };
25146 +       struct dpseci_cmd_queue *cmd_params;
25147 +       struct dpseci_rsp_get_tx_queue *rsp_params;
25148 +       int err;
25149 +
25150 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
25151 +                                         cmd_flags,
25152 +                                         token);
25153 +       cmd_params = (struct dpseci_cmd_queue *)cmd.params;
25154 +       cmd_params->queue = queue;
25155 +       err = mc_send_command(mc_io, &cmd);
25156 +       if (err)
25157 +               return err;
25158 +
25159 +       rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
25160 +       attr->fqid = le32_to_cpu(rsp_params->fqid);
25161 +       attr->priority = rsp_params->priority;
25162 +
25163 +       return 0;
25164 +}
25165 +
25166 +/**
25167 + * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes
25168 + * @mc_io:     Pointer to MC portal's I/O object
25169 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25170 + * @token:     Token of DPSECI object
25171 + * @attr:      Returned SEC attributes
25172 + *
25173 + * Return:     '0' on success, error code otherwise
25174 + */
25175 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25176 +                       struct dpseci_sec_attr *attr)
25177 +{
25178 +       struct mc_command cmd = { 0 };
25179 +       struct dpseci_rsp_get_sec_attr *rsp_params;
25180 +       int err;
25181 +
25182 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
25183 +                                         cmd_flags,
25184 +                                         token);
25185 +       err = mc_send_command(mc_io, &cmd);
25186 +       if (err)
25187 +               return err;
25188 +
25189 +       rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
25190 +       attr->ip_id = le16_to_cpu(rsp_params->ip_id);
25191 +       attr->major_rev = rsp_params->major_rev;
25192 +       attr->minor_rev = rsp_params->minor_rev;
25193 +       attr->era = rsp_params->era;
25194 +       attr->deco_num = rsp_params->deco_num;
25195 +       attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
25196 +       attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
25197 +       attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
25198 +       attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
25199 +       attr->crc_acc_num = rsp_params->crc_acc_num;
25200 +       attr->pk_acc_num = rsp_params->pk_acc_num;
25201 +       attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
25202 +       attr->rng_acc_num = rsp_params->rng_acc_num;
25203 +       attr->md_acc_num = rsp_params->md_acc_num;
25204 +       attr->arc4_acc_num = rsp_params->arc4_acc_num;
25205 +       attr->des_acc_num = rsp_params->des_acc_num;
25206 +       attr->aes_acc_num = rsp_params->aes_acc_num;
25207 +
25208 +       return 0;
25209 +}
25210 +
25211 +/**
25212 + * dpseci_get_sec_counters() - Retrieve SEC accelerator counters
25213 + * @mc_io:     Pointer to MC portal's I/O object
25214 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25215 + * @token:     Token of DPSECI object
25216 + * @counters:  Returned SEC counters
25217 + *
25218 + * Return:     '0' on success, error code otherwise
25219 + */
25220 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25221 +                           struct dpseci_sec_counters *counters)
25222 +{
25223 +       struct mc_command cmd = { 0 };
25224 +       struct dpseci_rsp_get_sec_counters *rsp_params;
25225 +       int err;
25226 +
25227 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
25228 +                                         cmd_flags,
25229 +                                         token);
25230 +       err = mc_send_command(mc_io, &cmd);
25231 +       if (err)
25232 +               return err;
25233 +
25234 +       rsp_params = (struct dpseci_rsp_get_sec_counters *)cmd.params;
25235 +       counters->dequeued_requests =
25236 +               le64_to_cpu(rsp_params->dequeued_requests);
25237 +       counters->ob_enc_requests = le64_to_cpu(rsp_params->ob_enc_requests);
25238 +       counters->ib_dec_requests = le64_to_cpu(rsp_params->ib_dec_requests);
25239 +       counters->ob_enc_bytes = le64_to_cpu(rsp_params->ob_enc_bytes);
25240 +       counters->ob_prot_bytes = le64_to_cpu(rsp_params->ob_prot_bytes);
25241 +       counters->ib_dec_bytes = le64_to_cpu(rsp_params->ib_dec_bytes);
25242 +       counters->ib_valid_bytes = le64_to_cpu(rsp_params->ib_valid_bytes);
25243 +
25244 +       return 0;
25245 +}
25246 +
25247 +/**
25248 + * dpseci_get_api_version() - Get Data Path SEC Interface API version
25249 + * @mc_io:     Pointer to MC portal's I/O object
25250 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25251 + * @major_ver: Major version of data path sec API
25252 + * @minor_ver: Minor version of data path sec API
25253 + *
25254 + * Return:     '0' on success, error code otherwise
25255 + */
25256 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
25257 +                          u16 *major_ver, u16 *minor_ver)
25258 +{
25259 +       struct mc_command cmd = { 0 };
25260 +       struct dpseci_rsp_get_api_version *rsp_params;
25261 +       int err;
25262 +
25263 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
25264 +                                         cmd_flags, 0);
25265 +       err = mc_send_command(mc_io, &cmd);
25266 +       if (err)
25267 +               return err;
25268 +
25269 +       rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
25270 +       *major_ver = le16_to_cpu(rsp_params->major);
25271 +       *minor_ver = le16_to_cpu(rsp_params->minor);
25272 +
25273 +       return 0;
25274 +}
25275 +
25276 +/**
25277 + * dpseci_set_opr() - Set Order Restoration configuration
25278 + * @mc_io:     Pointer to MC portal's I/O object
25279 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25280 + * @token:     Token of DPSECI object
25281 + * @index:     The queue index
25282 + * @options:   Configuration mode options; can be OPR_OPT_CREATE or
25283 + *             OPR_OPT_RETIRE
25284 + * @cfg:       Configuration options for the OPR
25285 + *
25286 + * Return:     '0' on success, error code otherwise
25287 + */
25288 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
25289 +                  u8 options, struct opr_cfg *cfg)
25290 +{
25291 +       struct mc_command cmd = { 0 };
25292 +       struct dpseci_cmd_opr *cmd_params;
25293 +
25294 +       cmd.header = mc_encode_cmd_header(
25295 +                       DPSECI_CMDID_SET_OPR,
25296 +                       cmd_flags,
25297 +                       token);
25298 +       cmd_params = (struct dpseci_cmd_opr *)cmd.params;
25299 +       cmd_params->index = index;
25300 +       cmd_params->options = options;
25301 +       cmd_params->oloe = cfg->oloe;
25302 +       cmd_params->oeane = cfg->oeane;
25303 +       cmd_params->olws = cfg->olws;
25304 +       cmd_params->oa = cfg->oa;
25305 +       cmd_params->oprrws = cfg->oprrws;
25306 +
25307 +       return mc_send_command(mc_io, &cmd);
25308 +}
25309 +
25310 +/**
25311 + * dpseci_get_opr() - Retrieve Order Restoration config and query
25312 + * @mc_io:     Pointer to MC portal's I/O object
25313 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25314 + * @token:     Token of DPSECI object
25315 + * @index:     The queue index
25316 + * @cfg:       Returned OPR configuration
25317 + * @qry:       Returned OPR query
25318 + *
25319 + * Return:     '0' on success, error code otherwise
25320 + */
25321 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
25322 +                  struct opr_cfg *cfg, struct opr_qry *qry)
25323 +{
25324 +       struct mc_command cmd = { 0 };
25325 +       struct dpseci_cmd_opr *cmd_params;
25326 +       struct dpseci_rsp_get_opr *rsp_params;
25327 +       int err;
25328 +
25329 +       cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR,
25330 +                                         cmd_flags,
25331 +                                         token);
25332 +       cmd_params = (struct dpseci_cmd_opr *)cmd.params;
25333 +       cmd_params->index = index;
25334 +       err = mc_send_command(mc_io, &cmd);
25335 +       if (err)
25336 +               return err;
25337 +
25338 +       rsp_params = (struct dpseci_rsp_get_opr *)cmd.params;
25339 +       qry->rip = dpseci_get_field(rsp_params->rip_enable, OPR_RIP);
25340 +       qry->enable = dpseci_get_field(rsp_params->rip_enable, OPR_ENABLE);
25341 +       cfg->oloe = rsp_params->oloe;
25342 +       cfg->oeane = rsp_params->oeane;
25343 +       cfg->olws = rsp_params->olws;
25344 +       cfg->oa = rsp_params->oa;
25345 +       cfg->oprrws = rsp_params->oprrws;
25346 +       qry->nesn = le16_to_cpu(rsp_params->nesn);
25347 +       qry->ndsn = le16_to_cpu(rsp_params->ndsn);
25348 +       qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
25349 +       qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_TSEQ_NLIS);
25350 +       qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
25351 +       qry->hseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_HSEQ_NLIS);
25352 +       qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
25353 +       qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
25354 +       qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
25355 +       qry->opr_id = le16_to_cpu(rsp_params->opr_id);
25356 +
25357 +       return 0;
25358 +}
25359 +
25360 +/**
25361 + * dpseci_set_congestion_notification() - Set congestion group
25362 + *     notification configuration
25363 + * @mc_io:     Pointer to MC portal's I/O object
25364 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25365 + * @token:     Token of DPSECI object
25366 + * @cfg:       congestion notification configuration
25367 + *
25368 + * Return:     '0' on success, error code otherwise
25369 + */
25370 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
25371 +       u16 token, const struct dpseci_congestion_notification_cfg *cfg)
25372 +{
25373 +       struct mc_command cmd = { 0 };
25374 +       struct dpseci_cmd_congestion_notification *cmd_params;
25375 +
25376 +       cmd.header = mc_encode_cmd_header(
25377 +                       DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
25378 +                       cmd_flags,
25379 +                       token);
25380 +       cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
25381 +       cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
25382 +       cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
25383 +       cmd_params->priority = cfg->dest_cfg.priority;
25384 +       dpseci_set_field(cmd_params->options, CGN_DEST_TYPE,
25385 +                        cfg->dest_cfg.dest_type);
25386 +       dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units);
25387 +       cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
25388 +       cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
25389 +       cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
25390 +       cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
25391 +
25392 +       return mc_send_command(mc_io, &cmd);
25393 +}
25394 +
25395 +/**
25396 + * dpseci_get_congestion_notification() - Get congestion group notification
25397 + *     configuration
25398 + * @mc_io:     Pointer to MC portal's I/O object
25399 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25400 + * @token:     Token of DPSECI object
25401 + * @cfg:       congestion notification configuration
25402 + *
25403 + * Return:     '0' on success, error code otherwise
25404 + */
25405 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
25406 +       u16 token, struct dpseci_congestion_notification_cfg *cfg)
25407 +{
25408 +       struct mc_command cmd = { 0 };
25409 +       struct dpseci_cmd_congestion_notification *rsp_params;
25410 +       int err;
25411 +
25412 +       cmd.header = mc_encode_cmd_header(
25413 +                       DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
25414 +                       cmd_flags,
25415 +                       token);
25416 +       err = mc_send_command(mc_io, &cmd);
25417 +       if (err)
25418 +               return err;
25419 +
25420 +       rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
25421 +       cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
25422 +       cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
25423 +       cfg->dest_cfg.priority = rsp_params->priority;
25424 +       cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options,
25425 +                                                  CGN_DEST_TYPE);
25426 +       cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS);
25427 +       cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
25428 +       cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
25429 +       cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
25430 +       cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
25431 +
25432 +       return 0;
25433 +}
25434 --- /dev/null
25435 +++ b/drivers/crypto/caam/dpseci.h
25436 @@ -0,0 +1,395 @@
25437 +/*
25438 + * Copyright 2013-2016 Freescale Semiconductor Inc.
25439 + * Copyright 2017 NXP
25440 + *
25441 + * Redistribution and use in source and binary forms, with or without
25442 + * modification, are permitted provided that the following conditions are met:
25443 + *     * Redistributions of source code must retain the above copyright
25444 + *      notice, this list of conditions and the following disclaimer.
25445 + *     * Redistributions in binary form must reproduce the above copyright
25446 + *      notice, this list of conditions and the following disclaimer in the
25447 + *      documentation and/or other materials provided with the distribution.
25448 + *     * Neither the names of the above-listed copyright holders nor the
25449 + *      names of any contributors may be used to endorse or promote products
25450 + *      derived from this software without specific prior written permission.
25451 + *
25452 + *
25453 + * ALTERNATIVELY, this software may be distributed under the terms of the
25454 + * GNU General Public License ("GPL") as published by the Free Software
25455 + * Foundation, either version 2 of that License or (at your option) any
25456 + * later version.
25457 + *
25458 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25459 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25460 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25461 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
25462 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25463 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25464 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25465 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25466 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25467 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25468 + * POSSIBILITY OF SUCH DAMAGE.
25469 + */
25470 +#ifndef _DPSECI_H_
25471 +#define _DPSECI_H_
25472 +
25473 +/*
25474 + * Data Path SEC Interface API
25475 + * Contains initialization APIs and runtime control APIs for DPSECI
25476 + */
25477 +
25478 +struct fsl_mc_io;
25479 +struct opr_cfg;
25480 +struct opr_qry;
25481 +
25482 +/**
25483 + * General DPSECI macros
25484 + */
25485 +
25486 +/**
25487 + * Maximum number of Tx/Rx priorities per DPSECI object
25488 + */
25489 +#define DPSECI_PRIO_NUM                8
25490 +
25491 +/**
25492 + * All queues considered; see dpseci_set_rx_queue()
25493 + */
25494 +#define DPSECI_ALL_QUEUES      (u8)(-1)
25495 +
25496 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
25497 +               u16 *token);
25498 +
25499 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
25500 +
25501 +/**
25502 + * Enable the Congestion Group support
25503 + */
25504 +#define DPSECI_OPT_HAS_CG              0x000020
25505 +
25506 +/**
25507 + * Enable the Order Restoration support
25508 + */
25509 +#define DPSECI_OPT_HAS_OPR             0x000040
25510 +
25511 +/**
25512 + * Order Point Records are shared for the entire DPSECI
25513 + */
25514 +#define DPSECI_OPT_OPR_SHARED          0x000080
25515 +
25516 +/**
25517 + * struct dpseci_cfg - Structure representing DPSECI configuration
25518 + * @options: Any combination of the following options:
25519 + *             DPSECI_OPT_HAS_CG
25520 + *             DPSECI_OPT_HAS_OPR
25521 + *             DPSECI_OPT_OPR_SHARED
25522 + * @num_tx_queues: num of queues towards the SEC
25523 + * @num_rx_queues: num of queues back from the SEC
25524 + * @priorities: Priorities for the SEC hardware processing;
25525 + *             each place in the array is the priority of the tx queue
25526 + *             towards the SEC;
25527 + *             valid priorities are configured with values 1-8;
25528 + */
25529 +struct dpseci_cfg {
25530 +       u32 options;
25531 +       u8 num_tx_queues;
25532 +       u8 num_rx_queues;
25533 +       u8 priorities[DPSECI_PRIO_NUM];
25534 +};
25535 +
25536 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
25537 +                 const struct dpseci_cfg *cfg, u32 *obj_id);
25538 +
25539 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
25540 +                  u32 object_id);
25541 +
25542 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
25543 +
25544 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
25545 +
25546 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25547 +                     int *en);
25548 +
25549 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
25550 +
25551 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25552 +                         u8 irq_index, u8 *en);
25553 +
25554 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25555 +                         u8 irq_index, u8 en);
25556 +
25557 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25558 +                       u8 irq_index, u32 *mask);
25559 +
25560 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25561 +                       u8 irq_index, u32 mask);
25562 +
25563 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25564 +                         u8 irq_index, u32 *status);
25565 +
25566 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25567 +                           u8 irq_index, u32 status);
25568 +
25569 +/**
25570 + * struct dpseci_attr - Structure representing DPSECI attributes
25571 + * @id: DPSECI object ID
25572 + * @num_tx_queues: number of queues towards the SEC
25573 + * @num_rx_queues: number of queues back from the SEC
25574 + * @options: any combination of the following options:
25575 + *             DPSECI_OPT_HAS_CG
25576 + *             DPSECI_OPT_HAS_OPR
25577 + *             DPSECI_OPT_OPR_SHARED
25578 + */
25579 +struct dpseci_attr {
25580 +       int id;
25581 +       u8 num_tx_queues;
25582 +       u8 num_rx_queues;
25583 +       u32 options;
25584 +};
25585 +
25586 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25587 +                         struct dpseci_attr *attr);
25588 +
25589 +/**
25590 + * enum dpseci_dest - DPSECI destination types
25591 + * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
25592 + *     and does not generate FQDAN notifications; user is expected to dequeue
25593 + *     from the queue based on polling or other user-defined method
25594 + * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
25595 + *     notifications to the specified DPIO; user is expected to dequeue from
25596 + *     the queue only after notification is received
25597 + * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
25598 + *     FQDAN notifications, but is connected to the specified DPCON object;
25599 + *     user is expected to dequeue from the DPCON channel
25600 + */
25601 +enum dpseci_dest {
25602 +       DPSECI_DEST_NONE = 0,
25603 +       DPSECI_DEST_DPIO,
25604 +       DPSECI_DEST_DPCON
25605 +};
25606 +
25607 +/**
25608 + * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
25609 + * @dest_type: Destination type
25610 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
25611 + * @priority: Priority selection within the DPIO or DPCON channel; valid values
25612 + *     are 0-1 or 0-7, depending on the number of priorities in that channel;
25613 + *     not relevant for 'DPSECI_DEST_NONE' option
25614 + */
25615 +struct dpseci_dest_cfg {
25616 +       enum dpseci_dest dest_type;
25617 +       int dest_id;
25618 +       u8 priority;
25619 +};
25620 +
25621 +/**
25622 + * DPSECI queue modification options
25623 + */
25624 +
25625 +/**
25626 + * Select to modify the user's context associated with the queue
25627 + */
25628 +#define DPSECI_QUEUE_OPT_USER_CTX              0x00000001
25629 +
25630 +/**
25631 + * Select to modify the queue's destination
25632 + */
25633 +#define DPSECI_QUEUE_OPT_DEST                  0x00000002
25634 +
25635 +/**
25636 + * Select to modify the queue's order preservation
25637 + */
25638 +#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION    0x00000004
25639 +
25640 +/**
25641 + * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
25642 + * @options: Flags representing the suggested modifications to the queue;
25643 + *     Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
25644 + * @order_preservation_en: order preservation configuration for the rx queue
25645 + * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
25646 + * @user_ctx: User context value provided in the frame descriptor of each
25647 + *     dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained
25648 + *     in 'options'
25649 + * @dest_cfg: Queue destination parameters; valid only if
25650 + *     'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
25651 + */
25652 +struct dpseci_rx_queue_cfg {
25653 +       u32 options;
25654 +       int order_preservation_en;
25655 +       u64 user_ctx;
25656 +       struct dpseci_dest_cfg dest_cfg;
25657 +};
25658 +
25659 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25660 +                       u8 queue, const struct dpseci_rx_queue_cfg *cfg);
25661 +
25662 +/**
25663 + * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
25664 + * @user_ctx: User context value provided in the frame descriptor of each
25665 + *     dequeued frame
25666 + * @order_preservation_en: Status of the order preservation configuration on the
25667 + *     queue
25668 + * @dest_cfg: Queue destination configuration
25669 + * @fqid: Virtual FQID value to be used for dequeue operations
25670 + */
25671 +struct dpseci_rx_queue_attr {
25672 +       u64 user_ctx;
25673 +       int order_preservation_en;
25674 +       struct dpseci_dest_cfg dest_cfg;
25675 +       u32 fqid;
25676 +};
25677 +
25678 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25679 +                       u8 queue, struct dpseci_rx_queue_attr *attr);
25680 +
25681 +/**
25682 + * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
25683 + * @fqid: Virtual FQID to be used for sending frames to SEC hardware
25684 + * @priority: SEC hardware processing priority for the queue
25685 + */
25686 +struct dpseci_tx_queue_attr {
25687 +       u32 fqid;
25688 +       u8 priority;
25689 +};
25690 +
25691 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25692 +                       u8 queue, struct dpseci_tx_queue_attr *attr);
25693 +
25694 +/**
25695 + * struct dpseci_sec_attr - Structure representing attributes of the SEC
25696 + *     hardware accelerator
25697 + * @ip_id: ID for SEC
25698 + * @major_rev: Major revision number for SEC
25699 + * @minor_rev: Minor revision number for SEC
25700 + * @era: SEC Era
25701 + * @deco_num: The number of copies of the DECO that are implemented in this
25702 + *     version of SEC
25703 + * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this
25704 + *     version of SEC
25705 + * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this
25706 + *     version of SEC
25707 + * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
25708 + *     implemented in this version of SEC
25709 + * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
25710 + *     implemented in this version of SEC
25711 + * @crc_acc_num: The number of copies of the CRC module that are implemented in
25712 + *     this version of SEC
25713 + * @pk_acc_num:  The number of copies of the Public Key module that are
25714 + *     implemented in this version of SEC
25715 + * @kasumi_acc_num: The number of copies of the Kasumi module that are
25716 + *     implemented in this version of SEC
25717 + * @rng_acc_num: The number of copies of the Random Number Generator that are
25718 + *     implemented in this version of SEC
25719 + * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
25720 + *     implemented in this version of SEC
25721 + * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
25722 + *     in this version of SEC
25723 + * @des_acc_num: The number of copies of the DES module that are implemented in
25724 + *     this version of SEC
25725 + * @aes_acc_num: The number of copies of the AES module that are implemented in
25726 + *     this version of SEC
25727 + **/
25728 +struct dpseci_sec_attr {
25729 +       u16 ip_id;
25730 +       u8 major_rev;
25731 +       u8 minor_rev;
25732 +       u8 era;
25733 +       u8 deco_num;
25734 +       u8 zuc_auth_acc_num;
25735 +       u8 zuc_enc_acc_num;
25736 +       u8 snow_f8_acc_num;
25737 +       u8 snow_f9_acc_num;
25738 +       u8 crc_acc_num;
25739 +       u8 pk_acc_num;
25740 +       u8 kasumi_acc_num;
25741 +       u8 rng_acc_num;
25742 +       u8 md_acc_num;
25743 +       u8 arc4_acc_num;
25744 +       u8 des_acc_num;
25745 +       u8 aes_acc_num;
25746 +};
25747 +
25748 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25749 +                       struct dpseci_sec_attr *attr);
25750 +
25751 +/**
25752 + * struct dpseci_sec_counters - Structure representing global SEC counters and
25753 + *                             not per dpseci counters
25754 + * @dequeued_requests: Number of Requests Dequeued
25755 + * @ob_enc_requests:   Number of Outbound Encrypt Requests
25756 + * @ib_dec_requests:   Number of Inbound Decrypt Requests
25757 + * @ob_enc_bytes:      Number of Outbound Bytes Encrypted
25758 + * @ob_prot_bytes:     Number of Outbound Bytes Protected
25759 + * @ib_dec_bytes:      Number of Inbound Bytes Decrypted
25760 + * @ib_valid_bytes:    Number of Inbound Bytes Validated
25761 + */
25762 +struct dpseci_sec_counters {
25763 +       u64 dequeued_requests;
25764 +       u64 ob_enc_requests;
25765 +       u64 ib_dec_requests;
25766 +       u64 ob_enc_bytes;
25767 +       u64 ob_prot_bytes;
25768 +       u64 ib_dec_bytes;
25769 +       u64 ib_valid_bytes;
25770 +};
25771 +
25772 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25773 +                           struct dpseci_sec_counters *counters);
25774 +
25775 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
25776 +                          u16 *major_ver, u16 *minor_ver);
25777 +
25778 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
25779 +                  u8 options, struct opr_cfg *cfg);
25780 +
25781 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
25782 +                  struct opr_cfg *cfg, struct opr_qry *qry);
25783 +
25784 +/**
25785 + * enum dpseci_congestion_unit - DPSECI congestion units
25786 + * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
25787 + * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
25788 + */
25789 +enum dpseci_congestion_unit {
25790 +       DPSECI_CONGESTION_UNIT_BYTES = 0,
25791 +       DPSECI_CONGESTION_UNIT_FRAMES
25792 +};
25793 +
25794 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER             0x00000001
25795 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT              0x00000002
25796 +#define DPSECI_CGN_MODE_COHERENT_WRITE                 0x00000004
25797 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER           0x00000008
25798 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT            0x00000010
25799 +#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED       0x00000020
25800 +
25801 +/**
25802 + * struct dpseci_congestion_notification_cfg - congestion notification
25803 + *     configuration
25804 + * @units: units type
25805 + * @threshold_entry: above this threshold we enter a congestion state.
25806 + *     set it to '0' to disable it
25807 + * @threshold_exit: below this threshold we exit the congestion state.
25808 + * @message_ctx: The context that will be part of the CSCN message
25809 + * @message_iova: I/O virtual address (must be in DMA-able memory),
25810 + *     must be 16B aligned;
25811 + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
25812 + * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
25813 + *     values
25814 + */
25815 +struct dpseci_congestion_notification_cfg {
25816 +       enum dpseci_congestion_unit units;
25817 +       u32 threshold_entry;
25818 +       u32 threshold_exit;
25819 +       u64 message_ctx;
25820 +       u64 message_iova;
25821 +       struct dpseci_dest_cfg dest_cfg;
25822 +       u16 notification_mode;
25823 +};
25824 +
25825 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
25826 +       u16 token, const struct dpseci_congestion_notification_cfg *cfg);
25827 +
25828 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
25829 +       u16 token, struct dpseci_congestion_notification_cfg *cfg);
25830 +
25831 +#endif /* _DPSECI_H_ */
25832 --- /dev/null
25833 +++ b/drivers/crypto/caam/dpseci_cmd.h
25834 @@ -0,0 +1,261 @@
25835 +/*
25836 + * Copyright 2013-2016 Freescale Semiconductor Inc.
25837 + * Copyright 2017 NXP
25838 + *
25839 + * Redistribution and use in source and binary forms, with or without
25840 + * modification, are permitted provided that the following conditions are met:
25841 + *     * Redistributions of source code must retain the above copyright
25842 + *      notice, this list of conditions and the following disclaimer.
25843 + *     * Redistributions in binary form must reproduce the above copyright
25844 + *      notice, this list of conditions and the following disclaimer in the
25845 + *      documentation and/or other materials provided with the distribution.
25846 + *     * Neither the names of the above-listed copyright holders nor the
25847 + *      names of any contributors may be used to endorse or promote products
25848 + *      derived from this software without specific prior written permission.
25849 + *
25850 + *
25851 + * ALTERNATIVELY, this software may be distributed under the terms of the
25852 + * GNU General Public License ("GPL") as published by the Free Software
25853 + * Foundation, either version 2 of that License or (at your option) any
25854 + * later version.
25855 + *
25856 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25857 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25858 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25859 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
25860 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25861 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25862 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25863 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25864 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25865 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25866 + * POSSIBILITY OF SUCH DAMAGE.
25867 + */
25868 +
25869 +#ifndef _DPSECI_CMD_H_
25870 +#define _DPSECI_CMD_H_
25871 +
25872 +/* DPSECI Version */
25873 +#define DPSECI_VER_MAJOR                               5
25874 +#define DPSECI_VER_MINOR                               1
25875 +
25876 +#define DPSECI_VER(maj, min)   (((maj) << 16) | (min))
25877 +#define DPSECI_VERSION         DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR)
25878 +
25879 +/* Command IDs */
25880 +
25881 +#define DPSECI_CMDID_CLOSE                              0x8001
25882 +#define DPSECI_CMDID_OPEN                               0x8091
25883 +#define DPSECI_CMDID_CREATE                             0x9092
25884 +#define DPSECI_CMDID_DESTROY                            0x9891
25885 +#define DPSECI_CMDID_GET_API_VERSION                    0xa091
25886 +
25887 +#define DPSECI_CMDID_ENABLE                             0x0021
25888 +#define DPSECI_CMDID_DISABLE                            0x0031
25889 +#define DPSECI_CMDID_GET_ATTR                           0x0041
25890 +#define DPSECI_CMDID_RESET                              0x0051
25891 +#define DPSECI_CMDID_IS_ENABLED                         0x0061
25892 +
25893 +#define DPSECI_CMDID_SET_IRQ_ENABLE                     0x0121
25894 +#define DPSECI_CMDID_GET_IRQ_ENABLE                     0x0131
25895 +#define DPSECI_CMDID_SET_IRQ_MASK                       0x0141
25896 +#define DPSECI_CMDID_GET_IRQ_MASK                       0x0151
25897 +#define DPSECI_CMDID_GET_IRQ_STATUS                     0x0161
25898 +#define DPSECI_CMDID_CLEAR_IRQ_STATUS                   0x0171
25899 +
25900 +#define DPSECI_CMDID_SET_RX_QUEUE                       0x1941
25901 +#define DPSECI_CMDID_GET_RX_QUEUE                       0x1961
25902 +#define DPSECI_CMDID_GET_TX_QUEUE                       0x1971
25903 +#define DPSECI_CMDID_GET_SEC_ATTR                       0x1981
25904 +#define DPSECI_CMDID_GET_SEC_COUNTERS                   0x1991
25905 +#define DPSECI_CMDID_SET_OPR                           0x19A1
25906 +#define DPSECI_CMDID_GET_OPR                           0x19B1
25907 +
25908 +#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION       0x1701
25909 +#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION       0x1711
25910 +
25911 +/* Macros for accessing command fields smaller than 1 byte */
25912 +#define DPSECI_MASK(field)     \
25913 +       GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1,     \
25914 +               DPSECI_##field##_SHIFT)
25915 +
25916 +#define dpseci_set_field(var, field, val)      \
25917 +       ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
25918 +
25919 +#define dpseci_get_field(var, field)   \
25920 +       (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
25921 +
25922 +struct dpseci_cmd_open {
25923 +       __le32 dpseci_id;
25924 +};
25925 +
25926 +struct dpseci_cmd_create {
25927 +       u8 priorities[8];
25928 +       u8 num_tx_queues;
25929 +       u8 num_rx_queues;
25930 +       __le16 pad;
25931 +       __le32 options;
25932 +};
25933 +
25934 +struct dpseci_cmd_destroy {
25935 +       __le32 object_id;
25936 +};
25937 +
25938 +struct dpseci_rsp_is_enabled {
25939 +       __le32 is_enabled;
25940 +};
25941 +
25942 +struct dpseci_cmd_irq_enable {
25943 +       u8 enable_state;
25944 +       u8 pad[3];
25945 +       u8 irq_index;
25946 +};
25947 +
25948 +struct dpseci_rsp_get_irq_enable {
25949 +       u8 enable_state;
25950 +};
25951 +
25952 +struct dpseci_cmd_irq_mask {
25953 +       __le32 mask;
25954 +       u8 irq_index;
25955 +};
25956 +
25957 +struct dpseci_cmd_irq_status {
25958 +       __le32 status;
25959 +       u8 irq_index;
25960 +};
25961 +
25962 +struct dpseci_rsp_get_attributes {
25963 +       __le32 id;
25964 +       __le32 pad0;
25965 +       u8 num_tx_queues;
25966 +       u8 num_rx_queues;
25967 +       u8 pad1[6];
25968 +       __le32 options;
25969 +};
25970 +
25971 +struct dpseci_cmd_queue {
25972 +       __le32 dest_id;
25973 +       u8 priority;
25974 +       u8 queue;
25975 +       u8 dest_type;
25976 +       u8 pad;
25977 +       __le64 user_ctx;
25978 +       union {
25979 +               __le32 options;
25980 +               __le32 fqid;
25981 +       };
25982 +       __le32 order_preservation_en;
25983 +};
25984 +
25985 +struct dpseci_rsp_get_tx_queue {
25986 +       __le32 pad;
25987 +       __le32 fqid;
25988 +       u8 priority;
25989 +};
25990 +
25991 +struct dpseci_rsp_get_sec_attr {
25992 +       __le16 ip_id;
25993 +       u8 major_rev;
25994 +       u8 minor_rev;
25995 +       u8 era;
25996 +       u8 pad0[3];
25997 +       u8 deco_num;
25998 +       u8 zuc_auth_acc_num;
25999 +       u8 zuc_enc_acc_num;
26000 +       u8 pad1;
26001 +       u8 snow_f8_acc_num;
26002 +       u8 snow_f9_acc_num;
26003 +       u8 crc_acc_num;
26004 +       u8 pad2;
26005 +       u8 pk_acc_num;
26006 +       u8 kasumi_acc_num;
26007 +       u8 rng_acc_num;
26008 +       u8 pad3;
26009 +       u8 md_acc_num;
26010 +       u8 arc4_acc_num;
26011 +       u8 des_acc_num;
26012 +       u8 aes_acc_num;
26013 +};
26014 +
26015 +struct dpseci_rsp_get_sec_counters {
26016 +       __le64 dequeued_requests;
26017 +       __le64 ob_enc_requests;
26018 +       __le64 ib_dec_requests;
26019 +       __le64 ob_enc_bytes;
26020 +       __le64 ob_prot_bytes;
26021 +       __le64 ib_dec_bytes;
26022 +       __le64 ib_valid_bytes;
26023 +};
26024 +
26025 +struct dpseci_rsp_get_api_version {
26026 +       __le16 major;
26027 +       __le16 minor;
26028 +};
26029 +
26030 +struct dpseci_cmd_opr {
26031 +       __le16 pad;
26032 +       u8 index;
26033 +       u8 options;
26034 +       u8 pad1[7];
26035 +       u8 oloe;
26036 +       u8 oeane;
26037 +       u8 olws;
26038 +       u8 oa;
26039 +       u8 oprrws;
26040 +};
26041 +
26042 +#define DPSECI_OPR_RIP_SHIFT           0
26043 +#define DPSECI_OPR_RIP_SIZE            1
26044 +#define DPSECI_OPR_ENABLE_SHIFT                1
26045 +#define DPSECI_OPR_ENABLE_SIZE         1
26046 +#define DPSECI_OPR_TSEQ_NLIS_SHIFT     1
26047 +#define DPSECI_OPR_TSEQ_NLIS_SIZE      1
26048 +#define DPSECI_OPR_HSEQ_NLIS_SHIFT     1
26049 +#define DPSECI_OPR_HSEQ_NLIS_SIZE      1
26050 +
26051 +struct dpseci_rsp_get_opr {
26052 +       __le64 pad;
26053 +       u8 rip_enable;
26054 +       u8 pad0[2];
26055 +       u8 oloe;
26056 +       u8 oeane;
26057 +       u8 olws;
26058 +       u8 oa;
26059 +       u8 oprrws;
26060 +       __le16 nesn;
26061 +       __le16 pad1;
26062 +       __le16 ndsn;
26063 +       __le16 pad2;
26064 +       __le16 ea_tseq;
26065 +       u8 tseq_nlis;
26066 +       u8 pad3;
26067 +       __le16 ea_hseq;
26068 +       u8 hseq_nlis;
26069 +       u8 pad4;
26070 +       __le16 ea_hptr;
26071 +       __le16 pad5;
26072 +       __le16 ea_tptr;
26073 +       __le16 pad6;
26074 +       __le16 opr_vid;
26075 +       __le16 pad7;
26076 +       __le16 opr_id;
26077 +};
26078 +
26079 +#define DPSECI_CGN_DEST_TYPE_SHIFT     0
26080 +#define DPSECI_CGN_DEST_TYPE_SIZE      4
26081 +#define DPSECI_CGN_UNITS_SHIFT         4
26082 +#define DPSECI_CGN_UNITS_SIZE          2
26083 +
26084 +struct dpseci_cmd_congestion_notification {
26085 +       __le32 dest_id;
26086 +       __le16 notification_mode;
26087 +       u8 priority;
26088 +       u8 options;
26089 +       __le64 message_iova;
26090 +       __le64 message_ctx;
26091 +       __le32 threshold_entry;
26092 +       __le32 threshold_exit;
26093 +};
26094 +
26095 +#endif /* _DPSECI_CMD_H_ */
26096 --- a/drivers/crypto/caam/error.c
26097 +++ b/drivers/crypto/caam/error.c
26098 @@ -6,11 +6,54 @@
26099  
26100  #include "compat.h"
26101  #include "regs.h"
26102 -#include "intern.h"
26103  #include "desc.h"
26104 -#include "jr.h"
26105  #include "error.h"
26106  
26107 +#ifdef DEBUG
26108 +
26109 +#include <linux/highmem.h>
26110 +
26111 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
26112 +                 int rowsize, int groupsize, struct scatterlist *sg,
26113 +                 size_t tlen, bool ascii)
26114 +{
26115 +       struct scatterlist *it;
26116 +       void *it_page;
26117 +       size_t len;
26118 +       void *buf;
26119 +
26120 +       for (it = sg; it && tlen > 0 ; it = sg_next(sg)) {
26121 +               /*
26122 +                * make sure the scatterlist's page
26123 +                * has a valid virtual memory mapping
26124 +                */
26125 +               it_page = kmap_atomic(sg_page(it));
26126 +               if (unlikely(!it_page)) {
26127 +                       pr_err("caam_dump_sg: kmap failed\n");
26128 +                       return;
26129 +               }
26130 +
26131 +               buf = it_page + it->offset;
26132 +               len = min_t(size_t, tlen, it->length);
26133 +               print_hex_dump(level, prefix_str, prefix_type, rowsize,
26134 +                              groupsize, buf, len, ascii);
26135 +               tlen -= len;
26136 +
26137 +               kunmap_atomic(it_page);
26138 +       }
26139 +}
26140 +
26141 +#else
26142 +
26143 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
26144 +                 int rowsize, int groupsize, struct scatterlist *sg,
26145 +                 size_t tlen, bool ascii)
26146 +{}
26147 +
26148 +#endif
26149 +
26150 +EXPORT_SYMBOL(caam_dump_sg);
26151 +
26152  static const struct {
26153         u8 value;
26154         const char *error_text;
26155 @@ -69,6 +112,54 @@ static const struct {
26156         { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
26157  };
26158  
26159 +static const struct {
26160 +       u8 value;
26161 +       const char *error_text;
26162 +} qi_error_list[] = {
26163 +       { 0x1F, "Job terminated by FQ or ICID flush" },
26164 +       { 0x20, "FD format error"},
26165 +       { 0x21, "FD command format error"},
26166 +       { 0x23, "FL format error"},
26167 +       { 0x25, "CRJD specified in FD, but not enabled in FLC"},
26168 +       { 0x30, "Max. buffer size too small"},
26169 +       { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"},
26170 +       { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"},
26171 +       { 0x33, "Size over/underflow (allocate mode)"},
26172 +       { 0x34, "Size over/underflow (reuse mode)"},
26173 +       { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"},
26174 +       { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"},
26175 +       { 0x41, "SBC frame format not supported (allocate mode)"},
26176 +       { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"},
26177 +       { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"},
26178 +       { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"},
26179 +       { 0x45, "DHR correction underflow (reuse mode, single buffer format)"},
26180 +       { 0x46, "Annotation length exceeds offset (reuse mode)"},
26181 +       { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
26182 +       { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
26183 +       { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
26184 +       { 0x51, "Unsupported IF reuse mode"},
26185 +       { 0x52, "Unsupported FL use mode"},
26186 +       { 0x53, "Unsupported RJD use mode"},
26187 +       { 0x54, "Unsupported inline descriptor use mode"},
26188 +       { 0xC0, "Table buffer pool 0 depletion"},
26189 +       { 0xC1, "Table buffer pool 1 depletion"},
26190 +       { 0xC2, "Data buffer pool 0 depletion, no OF allocated"},
26191 +       { 0xC3, "Data buffer pool 1 depletion, no OF allocated"},
26192 +       { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"},
26193 +       { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"},
26194 +       { 0xD0, "FLC read error"},
26195 +       { 0xD1, "FL read error"},
26196 +       { 0xD2, "FL write error"},
26197 +       { 0xD3, "OF SGT write error"},
26198 +       { 0xD4, "PTA read error"},
26199 +       { 0xD5, "PTA write error"},
26200 +       { 0xD6, "OF SGT F-bit write error"},
26201 +       { 0xD7, "ASA write error"},
26202 +       { 0xE1, "FLC[ICR]=0 ICID error"},
26203 +       { 0xE2, "FLC[ICR]=1 ICID error"},
26204 +       { 0xE4, "source of ICID flush not trusted (BDI = 0)"},
26205 +};
26206 +
26207  static const char * const cha_id_list[] = {
26208         "",
26209         "AES",
26210 @@ -146,10 +237,9 @@ static void report_ccb_status(struct dev
26211             strlen(rng_err_id_list[err_id])) {
26212                 /* RNG-only error */
26213                 err_str = rng_err_id_list[err_id];
26214 -       } else if (err_id < ARRAY_SIZE(err_id_list))
26215 +       } else {
26216                 err_str = err_id_list[err_id];
26217 -       else
26218 -               snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
26219 +       }
26220  
26221         /*
26222          * CCB ICV check failures are part of normal operation life;
26223 @@ -198,6 +288,27 @@ static void report_deco_status(struct de
26224                 status, error, idx_str, idx, err_str, err_err_code);
26225  }
26226  
26227 +static void report_qi_status(struct device *qidev, const u32 status,
26228 +                            const char *error)
26229 +{
26230 +       u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
26231 +       const char *err_str = "unidentified error value 0x";
26232 +       char err_err_code[3] = { 0 };
26233 +       int i;
26234 +
26235 +       for (i = 0; i < ARRAY_SIZE(qi_error_list); i++)
26236 +               if (qi_error_list[i].value == err_id)
26237 +                       break;
26238 +
26239 +       if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text)
26240 +               err_str = qi_error_list[i].error_text;
26241 +       else
26242 +               snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
26243 +
26244 +       dev_err(qidev, "%08x: %s: %s%s\n",
26245 +               status, error, err_str, err_err_code);
26246 +}
26247 +
26248  static void report_jr_status(struct device *jrdev, const u32 status,
26249                              const char *error)
26250  {
26251 @@ -212,7 +323,7 @@ static void report_cond_code_status(stru
26252                 status, error, __func__);
26253  }
26254  
26255 -void caam_jr_strstatus(struct device *jrdev, u32 status)
26256 +void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
26257  {
26258         static const struct stat_src {
26259                 void (*report_ssed)(struct device *jrdev, const u32 status,
26260 @@ -224,7 +335,7 @@ void caam_jr_strstatus(struct device *jr
26261                 { report_ccb_status, "CCB" },
26262                 { report_jump_status, "Jump" },
26263                 { report_deco_status, "DECO" },
26264 -               { NULL, "Queue Manager Interface" },
26265 +               { report_qi_status, "Queue Manager Interface" },
26266                 { report_jr_status, "Job Ring" },
26267                 { report_cond_code_status, "Condition Code" },
26268                 { NULL, NULL },
26269 @@ -250,4 +361,4 @@ void caam_jr_strstatus(struct device *jr
26270         else
26271                 dev_err(jrdev, "%d: unknown error source\n", ssrc);
26272  }
26273 -EXPORT_SYMBOL(caam_jr_strstatus);
26274 +EXPORT_SYMBOL(caam_strstatus);
26275 --- a/drivers/crypto/caam/error.h
26276 +++ b/drivers/crypto/caam/error.h
26277 @@ -7,5 +7,13 @@
26278  #ifndef CAAM_ERROR_H
26279  #define CAAM_ERROR_H
26280  #define CAAM_ERROR_STR_MAX 302
26281 -void caam_jr_strstatus(struct device *jrdev, u32 status);
26282 +
26283 +void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
26284 +
26285 +#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
26286 +#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
26287 +
26288 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
26289 +                 int rowsize, int groupsize, struct scatterlist *sg,
26290 +                 size_t tlen, bool ascii);
26291  #endif /* CAAM_ERROR_H */
26292 --- a/drivers/crypto/caam/intern.h
26293 +++ b/drivers/crypto/caam/intern.h
26294 @@ -64,10 +64,9 @@ struct caam_drv_private_jr {
26295   * Driver-private storage for a single CAAM block instance
26296   */
26297  struct caam_drv_private {
26298 -
26299 -       struct device *dev;
26300 -       struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
26301 -       struct platform_device *pdev;
26302 +#ifdef CONFIG_CAAM_QI
26303 +       struct device *qidev;
26304 +#endif
26305  
26306         /* Physical-presence section */
26307         struct caam_ctrl __iomem *ctrl; /* controller region */
26308 @@ -84,6 +83,7 @@ struct caam_drv_private {
26309         u8 qi_present;          /* Nonzero if QI present in device */
26310         int secvio_irq;         /* Security violation interrupt number */
26311         int virt_en;            /* Virtualization enabled in CAAM */
26312 +       int era;                /* CAAM Era (internal HW revision) */
26313  
26314  #define        RNG4_MAX_HANDLES 2
26315         /* RNG4 block */
26316 @@ -103,11 +103,6 @@ struct caam_drv_private {
26317  #ifdef CONFIG_DEBUG_FS
26318         struct dentry *dfs_root;
26319         struct dentry *ctl; /* controller dir */
26320 -       struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req;
26321 -       struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes;
26322 -       struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes;
26323 -       struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus;
26324 -
26325         struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
26326         struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
26327  #endif
26328 @@ -115,4 +110,22 @@ struct caam_drv_private {
26329  
26330  void caam_jr_algapi_init(struct device *dev);
26331  void caam_jr_algapi_remove(struct device *dev);
26332 +
26333 +#ifdef CONFIG_DEBUG_FS
26334 +static int caam_debugfs_u64_get(void *data, u64 *val)
26335 +{
26336 +       *val = caam64_to_cpu(*(u64 *)data);
26337 +       return 0;
26338 +}
26339 +
26340 +static int caam_debugfs_u32_get(void *data, u64 *val)
26341 +{
26342 +       *val = caam32_to_cpu(*(u32 *)data);
26343 +       return 0;
26344 +}
26345 +
26346 +DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
26347 +DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
26348 +#endif
26349 +
26350  #endif /* INTERN_H */
26351 --- a/drivers/crypto/caam/jr.c
26352 +++ b/drivers/crypto/caam/jr.c
26353 @@ -9,6 +9,7 @@
26354  #include <linux/of_address.h>
26355  
26356  #include "compat.h"
26357 +#include "ctrl.h"
26358  #include "regs.h"
26359  #include "jr.h"
26360  #include "desc.h"
26361 @@ -22,6 +23,14 @@ struct jr_driver_data {
26362  
26363  static struct jr_driver_data driver_data;
26364  
26365 +static int jr_driver_probed;
26366 +
26367 +int caam_jr_driver_probed(void)
26368 +{
26369 +       return jr_driver_probed;
26370 +}
26371 +EXPORT_SYMBOL(caam_jr_driver_probed);
26372 +
26373  static int caam_reset_hw_jr(struct device *dev)
26374  {
26375         struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
26376 @@ -118,6 +127,8 @@ static int caam_jr_remove(struct platfor
26377                 dev_err(jrdev, "Failed to shut down job ring\n");
26378         irq_dispose_mapping(jrpriv->irq);
26379  
26380 +       jr_driver_probed--;
26381 +
26382         return ret;
26383  }
26384  
26385 @@ -280,6 +291,36 @@ struct device *caam_jr_alloc(void)
26386  EXPORT_SYMBOL(caam_jr_alloc);
26387  
26388  /**
26389 + * caam_jridx_alloc() - Alloc a specific job ring based on its index.
26390 + *
26391 + * returns :  pointer to the newly allocated physical
26392 + *           JobR dev can be written to if successful.
26393 + **/
26394 +struct device *caam_jridx_alloc(int idx)
26395 +{
26396 +       struct caam_drv_private_jr *jrpriv;
26397 +       struct device *dev = ERR_PTR(-ENODEV);
26398 +
26399 +       spin_lock(&driver_data.jr_alloc_lock);
26400 +
26401 +       if (list_empty(&driver_data.jr_list))
26402 +               goto end;
26403 +
26404 +       list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
26405 +               if (jrpriv->ridx == idx) {
26406 +                       atomic_inc(&jrpriv->tfm_count);
26407 +                       dev = jrpriv->dev;
26408 +                       break;
26409 +               }
26410 +       }
26411 +
26412 +end:
26413 +       spin_unlock(&driver_data.jr_alloc_lock);
26414 +       return dev;
26415 +}
26416 +EXPORT_SYMBOL(caam_jridx_alloc);
26417 +
26418 +/**
26419   * caam_jr_free() - Free the Job Ring
26420   * @rdev     - points to the dev that identifies the Job ring to
26421   *             be released.
26422 @@ -496,15 +537,28 @@ static int caam_jr_probe(struct platform
26423                 return -ENOMEM;
26424         }
26425  
26426 -       jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
26427 +       jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
26428  
26429 -       if (sizeof(dma_addr_t) == sizeof(u64))
26430 -               if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
26431 -                       dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40));
26432 +       if (sizeof(dma_addr_t) == sizeof(u64)) {
26433 +               if (caam_dpaa2)
26434 +                       error = dma_set_mask_and_coherent(jrdev,
26435 +                                                         DMA_BIT_MASK(49));
26436 +               else if (of_device_is_compatible(nprop,
26437 +                                                "fsl,sec-v5.0-job-ring"))
26438 +                       error = dma_set_mask_and_coherent(jrdev,
26439 +                                                         DMA_BIT_MASK(40));
26440                 else
26441 -                       dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36));
26442 -       else
26443 -               dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
26444 +                       error = dma_set_mask_and_coherent(jrdev,
26445 +                                                         DMA_BIT_MASK(36));
26446 +       } else {
26447 +               error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
26448 +       }
26449 +       if (error) {
26450 +               dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n",
26451 +                       error);
26452 +               iounmap(ctrl);
26453 +               return error;
26454 +       }
26455  
26456         /* Identify the interrupt */
26457         jrpriv->irq = irq_of_parse_and_map(nprop, 0);
26458 @@ -524,10 +578,12 @@ static int caam_jr_probe(struct platform
26459  
26460         atomic_set(&jrpriv->tfm_count, 0);
26461  
26462 +       jr_driver_probed++;
26463 +
26464         return 0;
26465  }
26466  
26467 -static struct of_device_id caam_jr_match[] = {
26468 +static const struct of_device_id caam_jr_match[] = {
26469         {
26470                 .compatible = "fsl,sec-v4.0-job-ring",
26471         },
26472 --- a/drivers/crypto/caam/jr.h
26473 +++ b/drivers/crypto/caam/jr.h
26474 @@ -8,7 +8,9 @@
26475  #define JR_H
26476  
26477  /* Prototypes for backend-level services exposed to APIs */
26478 +int caam_jr_driver_probed(void);
26479  struct device *caam_jr_alloc(void);
26480 +struct device *caam_jridx_alloc(int idx);
26481  void caam_jr_free(struct device *rdev);
26482  int caam_jr_enqueue(struct device *dev, u32 *desc,
26483                     void (*cbk)(struct device *dev, u32 *desc, u32 status,
26484 --- a/drivers/crypto/caam/key_gen.c
26485 +++ b/drivers/crypto/caam/key_gen.c
26486 @@ -41,15 +41,29 @@ Split key generation--------------------
26487  [06] 0x64260028    fifostr: class2 mdsplit-jdk len=40
26488                         @0xffe04000
26489  */
26490 -int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
26491 -                 int split_key_pad_len, const u8 *key_in, u32 keylen,
26492 -                 u32 alg_op)
26493 +int gen_split_key(struct device *jrdev, u8 *key_out,
26494 +                 struct alginfo * const adata, const u8 *key_in, u32 keylen,
26495 +                 int max_keylen)
26496  {
26497         u32 *desc;
26498         struct split_key_result result;
26499         dma_addr_t dma_addr_in, dma_addr_out;
26500         int ret = -ENOMEM;
26501  
26502 +       adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK);
26503 +       adata->keylen_pad = split_key_pad_len(adata->algtype &
26504 +                                             OP_ALG_ALGSEL_MASK);
26505 +
26506 +#ifdef DEBUG
26507 +       dev_err(jrdev, "split keylen %d split keylen padded %d\n",
26508 +               adata->keylen, adata->keylen_pad);
26509 +       print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
26510 +                      DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
26511 +#endif
26512 +
26513 +       if (adata->keylen_pad > max_keylen)
26514 +               return -EINVAL;
26515 +
26516         desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
26517         if (!desc) {
26518                 dev_err(jrdev, "unable to allocate key input memory\n");
26519 @@ -63,7 +77,7 @@ int gen_split_key(struct device *jrdev,
26520                 goto out_free;
26521         }
26522  
26523 -       dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
26524 +       dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad,
26525                                       DMA_FROM_DEVICE);
26526         if (dma_mapping_error(jrdev, dma_addr_out)) {
26527                 dev_err(jrdev, "unable to map key output memory\n");
26528 @@ -74,7 +88,9 @@ int gen_split_key(struct device *jrdev,
26529         append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
26530  
26531         /* Sets MDHA up into an HMAC-INIT */
26532 -       append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT);
26533 +       append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
26534 +                        OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
26535 +                        OP_ALG_AS_INIT);
26536  
26537         /*
26538          * do a FIFO_LOAD of zero, this will trigger the internal key expansion
26539 @@ -87,7 +103,7 @@ int gen_split_key(struct device *jrdev,
26540          * FIFO_STORE with the explicit split-key content store
26541          * (0x26 output type)
26542          */
26543 -       append_fifo_store(desc, dma_addr_out, split_key_len,
26544 +       append_fifo_store(desc, dma_addr_out, adata->keylen,
26545                           LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
26546  
26547  #ifdef DEBUG
26548 @@ -108,11 +124,11 @@ int gen_split_key(struct device *jrdev,
26549  #ifdef DEBUG
26550                 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
26551                                DUMP_PREFIX_ADDRESS, 16, 4, key_out,
26552 -                              split_key_pad_len, 1);
26553 +                              adata->keylen_pad, 1);
26554  #endif
26555         }
26556  
26557 -       dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
26558 +       dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad,
26559                          DMA_FROM_DEVICE);
26560  out_unmap_in:
26561         dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
26562 --- a/drivers/crypto/caam/key_gen.h
26563 +++ b/drivers/crypto/caam/key_gen.h
26564 @@ -5,6 +5,36 @@
26565   *
26566   */
26567  
26568 +/**
26569 + * split_key_len - Compute MDHA split key length for a given algorithm
26570 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
26571 + *        SHA224, SHA384, SHA512.
26572 + *
26573 + * Return: MDHA split key length
26574 + */
26575 +static inline u32 split_key_len(u32 hash)
26576 +{
26577 +       /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
26578 +       static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
26579 +       u32 idx;
26580 +
26581 +       idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
26582 +
26583 +       return (u32)(mdpadlen[idx] * 2);
26584 +}
26585 +
26586 +/**
26587 + * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
26588 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
26589 + *        SHA224, SHA384, SHA512.
26590 + *
26591 + * Return: MDHA split key pad length
26592 + */
26593 +static inline u32 split_key_pad_len(u32 hash)
26594 +{
26595 +       return ALIGN(split_key_len(hash), 16);
26596 +}
26597 +
26598  struct split_key_result {
26599         struct completion completion;
26600         int err;
26601 @@ -12,6 +42,6 @@ struct split_key_result {
26602  
26603  void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
26604  
26605 -int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
26606 -                   int split_key_pad_len, const u8 *key_in, u32 keylen,
26607 -                   u32 alg_op);
26608 +int gen_split_key(struct device *jrdev, u8 *key_out,
26609 +                 struct alginfo * const adata, const u8 *key_in, u32 keylen,
26610 +                 int max_keylen);
26611 --- a/drivers/crypto/caam/pdb.h
26612 +++ b/drivers/crypto/caam/pdb.h
26613 @@ -483,6 +483,8 @@ struct dsa_verify_pdb {
26614  #define RSA_PDB_E_MASK          (0xFFF << RSA_PDB_E_SHIFT)
26615  #define RSA_PDB_D_SHIFT         12
26616  #define RSA_PDB_D_MASK          (0xFFF << RSA_PDB_D_SHIFT)
26617 +#define RSA_PDB_Q_SHIFT         12
26618 +#define RSA_PDB_Q_MASK          (0xFFF << RSA_PDB_Q_SHIFT)
26619  
26620  #define RSA_PDB_SGF_F           (0x8 << RSA_PDB_SGF_SHIFT)
26621  #define RSA_PDB_SGF_G           (0x4 << RSA_PDB_SGF_SHIFT)
26622 @@ -490,6 +492,8 @@ struct dsa_verify_pdb {
26623  #define RSA_PRIV_PDB_SGF_G      (0x8 << RSA_PDB_SGF_SHIFT)
26624  
26625  #define RSA_PRIV_KEY_FRM_1      0
26626 +#define RSA_PRIV_KEY_FRM_2      1
26627 +#define RSA_PRIV_KEY_FRM_3      2
26628  
26629  /**
26630   * RSA Encrypt Protocol Data Block
26631 @@ -525,4 +529,62 @@ struct rsa_priv_f1_pdb {
26632         dma_addr_t      d_dma;
26633  } __packed;
26634  
26635 +/**
26636 + * RSA Decrypt PDB - Private Key Form #2
26637 + * @sgf     : scatter-gather field
26638 + * @g_dma   : dma address of encrypted input data
26639 + * @f_dma   : dma address of output data
26640 + * @d_dma   : dma address of RSA private exponent
26641 + * @p_dma   : dma address of RSA prime factor p of RSA modulus n
26642 + * @q_dma   : dma address of RSA prime factor q of RSA modulus n
26643 + * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
26644 + *            as internal state buffer. It is assumed to be as long as p.
26645 + * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
26646 + *            as internal state buffer. It is assumed to be as long as q.
26647 + * @p_q_len : length in bytes of first two prime factors of the RSA modulus n
26648 + */
26649 +struct rsa_priv_f2_pdb {
26650 +       u32             sgf;
26651 +       dma_addr_t      g_dma;
26652 +       dma_addr_t      f_dma;
26653 +       dma_addr_t      d_dma;
26654 +       dma_addr_t      p_dma;
26655 +       dma_addr_t      q_dma;
26656 +       dma_addr_t      tmp1_dma;
26657 +       dma_addr_t      tmp2_dma;
26658 +       u32             p_q_len;
26659 +} __packed;
26660 +
26661 +/**
26662 + * RSA Decrypt PDB - Private Key Form #3
26663 + * This is the RSA Chinese Reminder Theorem (CRT) form for two prime factors of
26664 + * the RSA modulus.
26665 + * @sgf     : scatter-gather field
26666 + * @g_dma   : dma address of encrypted input data
26667 + * @f_dma   : dma address of output data
26668 + * @c_dma   : dma address of RSA CRT coefficient
26669 + * @p_dma   : dma address of RSA prime factor p of RSA modulus n
26670 + * @q_dma   : dma address of RSA prime factor q of RSA modulus n
26671 + * @dp_dma  : dma address of RSA CRT exponent of RSA prime factor p
26672 + * @dp_dma  : dma address of RSA CRT exponent of RSA prime factor q
26673 + * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
26674 + *            as internal state buffer. It is assumed to be as long as p.
26675 + * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
26676 + *            as internal state buffer. It is assumed to be as long as q.
26677 + * @p_q_len : length in bytes of first two prime factors of the RSA modulus n
26678 + */
26679 +struct rsa_priv_f3_pdb {
26680 +       u32             sgf;
26681 +       dma_addr_t      g_dma;
26682 +       dma_addr_t      f_dma;
26683 +       dma_addr_t      c_dma;
26684 +       dma_addr_t      p_dma;
26685 +       dma_addr_t      q_dma;
26686 +       dma_addr_t      dp_dma;
26687 +       dma_addr_t      dq_dma;
26688 +       dma_addr_t      tmp1_dma;
26689 +       dma_addr_t      tmp2_dma;
26690 +       u32             p_q_len;
26691 +} __packed;
26692 +
26693  #endif
26694 --- a/drivers/crypto/caam/pkc_desc.c
26695 +++ b/drivers/crypto/caam/pkc_desc.c
26696 @@ -34,3 +34,39 @@ void init_rsa_priv_f1_desc(u32 *desc, st
26697         append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
26698                          RSA_PRIV_KEY_FRM_1);
26699  }
26700 +
26701 +/* Descriptor for RSA Private operation - Private Key Form #2 */
26702 +void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb)
26703 +{
26704 +       init_job_desc_pdb(desc, 0, sizeof(*pdb));
26705 +       append_cmd(desc, pdb->sgf);
26706 +       append_ptr(desc, pdb->g_dma);
26707 +       append_ptr(desc, pdb->f_dma);
26708 +       append_ptr(desc, pdb->d_dma);
26709 +       append_ptr(desc, pdb->p_dma);
26710 +       append_ptr(desc, pdb->q_dma);
26711 +       append_ptr(desc, pdb->tmp1_dma);
26712 +       append_ptr(desc, pdb->tmp2_dma);
26713 +       append_cmd(desc, pdb->p_q_len);
26714 +       append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
26715 +                        RSA_PRIV_KEY_FRM_2);
26716 +}
26717 +
26718 +/* Descriptor for RSA Private operation - Private Key Form #3 */
26719 +void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb)
26720 +{
26721 +       init_job_desc_pdb(desc, 0, sizeof(*pdb));
26722 +       append_cmd(desc, pdb->sgf);
26723 +       append_ptr(desc, pdb->g_dma);
26724 +       append_ptr(desc, pdb->f_dma);
26725 +       append_ptr(desc, pdb->c_dma);
26726 +       append_ptr(desc, pdb->p_dma);
26727 +       append_ptr(desc, pdb->q_dma);
26728 +       append_ptr(desc, pdb->dp_dma);
26729 +       append_ptr(desc, pdb->dq_dma);
26730 +       append_ptr(desc, pdb->tmp1_dma);
26731 +       append_ptr(desc, pdb->tmp2_dma);
26732 +       append_cmd(desc, pdb->p_q_len);
26733 +       append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
26734 +                        RSA_PRIV_KEY_FRM_3);
26735 +}
26736 --- /dev/null
26737 +++ b/drivers/crypto/caam/qi.c
26738 @@ -0,0 +1,797 @@
26739 +/*
26740 + * CAAM/SEC 4.x QI transport/backend driver
26741 + * Queue Interface backend functionality
26742 + *
26743 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
26744 + * Copyright 2016-2017 NXP
26745 + */
26746 +
26747 +#include <linux/cpumask.h>
26748 +#include <linux/kthread.h>
26749 +#include <linux/fsl_qman.h>
26750 +
26751 +#include "regs.h"
26752 +#include "qi.h"
26753 +#include "desc.h"
26754 +#include "intern.h"
26755 +#include "desc_constr.h"
26756 +
26757 +#define PREHDR_RSLS_SHIFT      31
26758 +
26759 +/*
26760 + * Use a reasonable backlog of frames (per CPU) as congestion threshold,
26761 + * so that resources used by the in-flight buffers do not become a memory hog.
26762 + */
26763 +#define MAX_RSP_FQ_BACKLOG_PER_CPU     256
26764 +
26765 +#define CAAM_QI_ENQUEUE_RETRIES        10000
26766 +
26767 +#define CAAM_NAPI_WEIGHT       63
26768 +
26769 +/*
26770 + * caam_napi - struct holding CAAM NAPI-related params
26771 + * @irqtask: IRQ task for QI backend
26772 + * @p: QMan portal
26773 + */
26774 +struct caam_napi {
26775 +       struct napi_struct irqtask;
26776 +       struct qman_portal *p;
26777 +};
26778 +
26779 +/*
26780 + * caam_qi_pcpu_priv - percpu private data structure to main list of pending
26781 + *                     responses expected on each cpu.
26782 + * @caam_napi: CAAM NAPI params
26783 + * @net_dev: netdev used by NAPI
26784 + * @rsp_fq: response FQ from CAAM
26785 + */
26786 +struct caam_qi_pcpu_priv {
26787 +       struct caam_napi caam_napi;
26788 +       struct net_device net_dev;
26789 +       struct qman_fq *rsp_fq;
26790 +} ____cacheline_aligned;
26791 +
26792 +static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
26793 +static DEFINE_PER_CPU(int, last_cpu);
26794 +
26795 +/*
26796 + * caam_qi_priv - CAAM QI backend private params
26797 + * @cgr: QMan congestion group
26798 + * @qi_pdev: platform device for QI backend
26799 + */
26800 +struct caam_qi_priv {
26801 +       struct qman_cgr cgr;
26802 +       struct platform_device *qi_pdev;
26803 +};
26804 +
26805 +static struct caam_qi_priv qipriv ____cacheline_aligned;
26806 +
26807 +/*
26808 + * This is written by only one core - the one that initialized the CGR - and
26809 + * read by multiple cores (all the others).
26810 + */
26811 +bool caam_congested __read_mostly;
26812 +EXPORT_SYMBOL(caam_congested);
26813 +
26814 +#ifdef CONFIG_DEBUG_FS
26815 +/*
26816 + * This is a counter for the number of times the congestion group (where all
26817 + * the request and response queueus are) reached congestion. Incremented
26818 + * each time the congestion callback is called with congested == true.
26819 + */
26820 +static u64 times_congested;
26821 +#endif
26822 +
26823 +/*
26824 + * CPU from where the module initialised. This is required because QMan driver
26825 + * requires CGRs to be removed from same CPU from where they were originally
26826 + * allocated.
26827 + */
26828 +static int mod_init_cpu;
26829 +
26830 +/*
26831 + * This is a a cache of buffers, from which the users of CAAM QI driver
26832 + * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
26833 + * doing malloc on the hotpath.
26834 + * NOTE: A more elegant solution would be to have some headroom in the frames
26835 + *       being processed. This could be added by the dpaa-ethernet driver.
26836 + *       This would pose a problem for userspace application processing which
26837 + *       cannot know of this limitation. So for now, this will work.
26838 + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
26839 + */
26840 +static struct kmem_cache *qi_cache;
26841 +
26842 +int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
26843 +{
26844 +       struct qm_fd fd;
26845 +       int ret;
26846 +       int num_retries = 0;
26847 +
26848 +       fd.cmd = 0;
26849 +       fd.format = qm_fd_compound;
26850 +       fd.cong_weight = caam32_to_cpu(req->fd_sgt[1].length);
26851 +       fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
26852 +                             DMA_BIDIRECTIONAL);
26853 +       if (dma_mapping_error(qidev, fd.addr)) {
26854 +               dev_err(qidev, "DMA mapping error for QI enqueue request\n");
26855 +               return -EIO;
26856 +       }
26857 +
26858 +       do {
26859 +               ret = qman_enqueue(req->drv_ctx->req_fq, &fd, 0);
26860 +               if (likely(!ret))
26861 +                       return 0;
26862 +
26863 +               if (ret != -EBUSY)
26864 +                       break;
26865 +               num_retries++;
26866 +       } while (num_retries < CAAM_QI_ENQUEUE_RETRIES);
26867 +
26868 +       dev_err(qidev, "qman_enqueue failed: %d\n", ret);
26869 +
26870 +       return ret;
26871 +}
26872 +EXPORT_SYMBOL(caam_qi_enqueue);
26873 +
26874 +static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
26875 +                          const struct qm_mr_entry *msg)
26876 +{
26877 +       const struct qm_fd *fd;
26878 +       struct caam_drv_req *drv_req;
26879 +       struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
26880 +
26881 +       fd = &msg->ern.fd;
26882 +
26883 +       if (fd->format != qm_fd_compound) {
26884 +               dev_err(qidev, "Non-compound FD from CAAM\n");
26885 +               return;
26886 +       }
26887 +
26888 +       drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
26889 +       if (!drv_req) {
26890 +               dev_err(qidev,
26891 +                       "Can't find original request for CAAM response\n");
26892 +               return;
26893 +       }
26894 +
26895 +       dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
26896 +                        sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
26897 +
26898 +       drv_req->cbk(drv_req, -EIO);
26899 +}
26900 +
26901 +static struct qman_fq *create_caam_req_fq(struct device *qidev,
26902 +                                         struct qman_fq *rsp_fq,
26903 +                                         dma_addr_t hwdesc,
26904 +                                         int fq_sched_flag)
26905 +{
26906 +       int ret;
26907 +       struct qman_fq *req_fq;
26908 +       struct qm_mcc_initfq opts;
26909 +
26910 +       req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC);
26911 +       if (!req_fq)
26912 +               return ERR_PTR(-ENOMEM);
26913 +
26914 +       req_fq->cb.ern = caam_fq_ern_cb;
26915 +       req_fq->cb.fqs = NULL;
26916 +
26917 +       ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
26918 +                               QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_LOCKED,
26919 +                            req_fq);
26920 +       if (ret) {
26921 +               dev_err(qidev, "Failed to create session req FQ\n");
26922 +               goto create_req_fq_fail;
26923 +       }
26924 +
26925 +       opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
26926 +                      QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
26927 +                      QM_INITFQ_WE_CGID;
26928 +       opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE;
26929 +       opts.fqd.dest.channel = qm_channel_caam;
26930 +       opts.fqd.dest.wq = 2;
26931 +       opts.fqd.context_b = qman_fq_fqid(rsp_fq);
26932 +       opts.fqd.context_a.hi = upper_32_bits(hwdesc);
26933 +       opts.fqd.context_a.lo = lower_32_bits(hwdesc);
26934 +       opts.fqd.cgid = qipriv.cgr.cgrid;
26935 +
26936 +       ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
26937 +       if (ret) {
26938 +               dev_err(qidev, "Failed to init session req FQ\n");
26939 +               goto init_req_fq_fail;
26940 +       }
26941 +
26942 +       dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
26943 +               smp_processor_id());
26944 +       return req_fq;
26945 +
26946 +init_req_fq_fail:
26947 +       qman_destroy_fq(req_fq, 0);
26948 +create_req_fq_fail:
26949 +       kfree(req_fq);
26950 +       return ERR_PTR(ret);
26951 +}
26952 +
26953 +static int empty_retired_fq(struct device *qidev, struct qman_fq *fq)
26954 +{
26955 +       int ret;
26956 +
26957 +       ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT |
26958 +                                   QMAN_VOLATILE_FLAG_FINISH,
26959 +                                   QM_VDQCR_PRECEDENCE_VDQCR |
26960 +                                   QM_VDQCR_NUMFRAMES_TILLEMPTY);
26961 +       if (ret) {
26962 +               dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid);
26963 +               return ret;
26964 +       }
26965 +
26966 +       do {
26967 +               struct qman_portal *p;
26968 +
26969 +               p = qman_get_affine_portal(smp_processor_id());
26970 +               qman_p_poll_dqrr(p, 16);
26971 +       } while (fq->flags & QMAN_FQ_STATE_NE);
26972 +
26973 +       return 0;
26974 +}
26975 +
26976 +static int kill_fq(struct device *qidev, struct qman_fq *fq)
26977 +{
26978 +       u32 flags;
26979 +       int ret;
26980 +
26981 +       ret = qman_retire_fq(fq, &flags);
26982 +       if (ret < 0) {
26983 +               dev_err(qidev, "qman_retire_fq failed: %d\n", ret);
26984 +               return ret;
26985 +       }
26986 +
26987 +       if (!ret)
26988 +               goto empty_fq;
26989 +
26990 +       /* Async FQ retirement condition */
26991 +       if (ret == 1) {
26992 +               /* Retry till FQ gets in retired state */
26993 +               do {
26994 +                       msleep(20);
26995 +               } while (fq->state != qman_fq_state_retired);
26996 +
26997 +               WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS);
26998 +               WARN_ON(fq->flags & QMAN_FQ_STATE_ORL);
26999 +       }
27000 +
27001 +empty_fq:
27002 +       if (fq->flags & QMAN_FQ_STATE_NE) {
27003 +               ret = empty_retired_fq(qidev, fq);
27004 +               if (ret) {
27005 +                       dev_err(qidev, "empty_retired_fq fail for FQ: %u\n",
27006 +                               fq->fqid);
27007 +                       return ret;
27008 +               }
27009 +       }
27010 +
27011 +       ret = qman_oos_fq(fq);
27012 +       if (ret)
27013 +               dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
27014 +
27015 +       qman_destroy_fq(fq, 0);
27016 +       kfree(fq);
27017 +
27018 +       return ret;
27019 +}
27020 +
27021 +static int empty_caam_fq(struct qman_fq *fq)
27022 +{
27023 +       int ret;
27024 +       struct qm_mcr_queryfq_np np;
27025 +
27026 +       /* Wait till the older CAAM FQ get empty */
27027 +       do {
27028 +               ret = qman_query_fq_np(fq, &np);
27029 +               if (ret)
27030 +                       return ret;
27031 +
27032 +               if (!np.frm_cnt)
27033 +                       break;
27034 +
27035 +               msleep(20);
27036 +       } while (1);
27037 +
27038 +       /*
27039 +        * Give extra time for pending jobs from this FQ in holding tanks
27040 +        * to get processed
27041 +        */
27042 +       msleep(20);
27043 +       return 0;
27044 +}
27045 +
27046 +int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
27047 +{
27048 +       int ret;
27049 +       u32 num_words;
27050 +       struct qman_fq *new_fq, *old_fq;
27051 +       struct device *qidev = drv_ctx->qidev;
27052 +
27053 +       num_words = desc_len(sh_desc);
27054 +       if (num_words > MAX_SDLEN) {
27055 +               dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
27056 +               return -EINVAL;
27057 +       }
27058 +
27059 +       /* Note down older req FQ */
27060 +       old_fq = drv_ctx->req_fq;
27061 +
27062 +       /* Create a new req FQ in parked state */
27063 +       new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
27064 +                                   drv_ctx->context_a, 0);
27065 +       if (unlikely(IS_ERR_OR_NULL(new_fq))) {
27066 +               dev_err(qidev, "FQ allocation for shdesc update failed\n");
27067 +               return PTR_ERR(new_fq);
27068 +       }
27069 +
27070 +       /* Hook up new FQ to context so that new requests keep queuing */
27071 +       drv_ctx->req_fq = new_fq;
27072 +
27073 +       /* Empty and remove the older FQ */
27074 +       ret = empty_caam_fq(old_fq);
27075 +       if (ret) {
27076 +               dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
27077 +
27078 +               /* We can revert to older FQ */
27079 +               drv_ctx->req_fq = old_fq;
27080 +
27081 +               if (kill_fq(qidev, new_fq))
27082 +                       dev_warn(qidev, "New CAAM FQ kill failed\n");
27083 +
27084 +               return ret;
27085 +       }
27086 +
27087 +       /*
27088 +        * Re-initialise pre-header. Set RSLS and SDLEN.
27089 +        * Update the shared descriptor for driver context.
27090 +        */
27091 +       drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
27092 +                                          num_words);
27093 +       memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
27094 +       dma_sync_single_for_device(qidev, drv_ctx->context_a,
27095 +                                  sizeof(drv_ctx->sh_desc) +
27096 +                                  sizeof(drv_ctx->prehdr),
27097 +                                  DMA_BIDIRECTIONAL);
27098 +
27099 +       /* Put the new FQ in scheduled state */
27100 +       ret = qman_schedule_fq(new_fq);
27101 +       if (ret) {
27102 +               dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);
27103 +
27104 +               /*
27105 +                * We can kill new FQ and revert to old FQ.
27106 +                * Since the desc is already modified, it is success case
27107 +                */
27108 +
27109 +               drv_ctx->req_fq = old_fq;
27110 +
27111 +               if (kill_fq(qidev, new_fq))
27112 +                       dev_warn(qidev, "New CAAM FQ kill failed\n");
27113 +       } else if (kill_fq(qidev, old_fq)) {
27114 +               dev_warn(qidev, "Old CAAM FQ kill failed\n");
27115 +       }
27116 +
27117 +       return 0;
27118 +}
27119 +EXPORT_SYMBOL(caam_drv_ctx_update);
27120 +
27121 +struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
27122 +                                      int *cpu,
27123 +                                      u32 *sh_desc)
27124 +{
27125 +       size_t size;
27126 +       u32 num_words;
27127 +       dma_addr_t hwdesc;
27128 +       struct caam_drv_ctx *drv_ctx;
27129 +       const cpumask_t *cpus = qman_affine_cpus();
27130 +
27131 +       num_words = desc_len(sh_desc);
27132 +       if (num_words > MAX_SDLEN) {
27133 +               dev_err(qidev, "Invalid descriptor len: %d words\n",
27134 +                       num_words);
27135 +               return ERR_PTR(-EINVAL);
27136 +       }
27137 +
27138 +       drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC);
27139 +       if (!drv_ctx)
27140 +               return ERR_PTR(-ENOMEM);
27141 +
27142 +       /*
27143 +        * Initialise pre-header - set RSLS and SDLEN - and shared descriptor
27144 +        * and dma-map them.
27145 +        */
27146 +       drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
27147 +                                          num_words);
27148 +       memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
27149 +       size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
27150 +       hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
27151 +                               DMA_BIDIRECTIONAL);
27152 +       if (dma_mapping_error(qidev, hwdesc)) {
27153 +               dev_err(qidev, "DMA map error for preheader + shdesc\n");
27154 +               kfree(drv_ctx);
27155 +               return ERR_PTR(-ENOMEM);
27156 +       }
27157 +       drv_ctx->context_a = hwdesc;
27158 +
27159 +       /* If given CPU does not own the portal, choose another one that does */
27160 +       if (!cpumask_test_cpu(*cpu, cpus)) {
27161 +               int *pcpu = &get_cpu_var(last_cpu);
27162 +
27163 +               *pcpu = cpumask_next(*pcpu, cpus);
27164 +               if (*pcpu >= nr_cpu_ids)
27165 +                       *pcpu = cpumask_first(cpus);
27166 +               *cpu = *pcpu;
27167 +
27168 +               put_cpu_var(last_cpu);
27169 +       }
27170 +       drv_ctx->cpu = *cpu;
27171 +
27172 +       /* Find response FQ hooked with this CPU */
27173 +       drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
27174 +
27175 +       /* Attach request FQ */
27176 +       drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
27177 +                                            QMAN_INITFQ_FLAG_SCHED);
27178 +       if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) {
27179 +               dev_err(qidev, "create_caam_req_fq failed\n");
27180 +               dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
27181 +               kfree(drv_ctx);
27182 +               return ERR_PTR(-ENOMEM);
27183 +       }
27184 +
27185 +       drv_ctx->qidev = qidev;
27186 +       return drv_ctx;
27187 +}
27188 +EXPORT_SYMBOL(caam_drv_ctx_init);
27189 +
27190 +void *qi_cache_alloc(gfp_t flags)
27191 +{
27192 +       return kmem_cache_alloc(qi_cache, flags);
27193 +}
27194 +EXPORT_SYMBOL(qi_cache_alloc);
27195 +
27196 +void qi_cache_free(void *obj)
27197 +{
27198 +       kmem_cache_free(qi_cache, obj);
27199 +}
27200 +EXPORT_SYMBOL(qi_cache_free);
27201 +
27202 +static int caam_qi_poll(struct napi_struct *napi, int budget)
27203 +{
27204 +       struct caam_napi *np = container_of(napi, struct caam_napi, irqtask);
27205 +
27206 +       int cleaned = qman_p_poll_dqrr(np->p, budget);
27207 +
27208 +       if (cleaned < budget) {
27209 +               napi_complete(napi);
27210 +               qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
27211 +       }
27212 +
27213 +       return cleaned;
27214 +}
27215 +
27216 +void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
27217 +{
27218 +       if (IS_ERR_OR_NULL(drv_ctx))
27219 +               return;
27220 +
27221 +       /* Remove request FQ */
27222 +       if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq))
27223 +               dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n");
27224 +
27225 +       dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a,
27226 +                        sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr),
27227 +                        DMA_BIDIRECTIONAL);
27228 +       kfree(drv_ctx);
27229 +}
27230 +EXPORT_SYMBOL(caam_drv_ctx_rel);
27231 +
27232 +int caam_qi_shutdown(struct device *qidev)
27233 +{
27234 +       int i, ret;
27235 +       struct caam_qi_priv *priv = dev_get_drvdata(qidev);
27236 +       const cpumask_t *cpus = qman_affine_cpus();
27237 +       struct cpumask old_cpumask = current->cpus_allowed;
27238 +
27239 +       for_each_cpu(i, cpus) {
27240 +               struct napi_struct *irqtask;
27241 +
27242 +               irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask;
27243 +               napi_disable(irqtask);
27244 +               netif_napi_del(irqtask);
27245 +
27246 +               if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
27247 +                       dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
27248 +       }
27249 +
27250 +       /*
27251 +        * QMan driver requires CGRs to be deleted from same CPU from where they
27252 +        * were instantiated. Hence we get the module removal execute from the
27253 +        * same CPU from where it was originally inserted.
27254 +        */
27255 +       set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
27256 +
27257 +       ret = qman_delete_cgr(&priv->cgr);
27258 +       if (ret)
27259 +               dev_err(qidev, "Deletion of CGR failed: %d\n", ret);
27260 +       else
27261 +               qman_release_cgrid(priv->cgr.cgrid);
27262 +
27263 +       kmem_cache_destroy(qi_cache);
27264 +
27265 +       /* Now that we're done with the CGRs, restore the cpus allowed mask */
27266 +       set_cpus_allowed_ptr(current, &old_cpumask);
27267 +
27268 +       platform_device_unregister(priv->qi_pdev);
27269 +       return ret;
27270 +}
27271 +
27272 +static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
27273 +{
27274 +       caam_congested = congested;
27275 +
27276 +       if (congested) {
27277 +#ifdef CONFIG_DEBUG_FS
27278 +               times_congested++;
27279 +#endif
27280 +               pr_debug_ratelimited("CAAM entered congestion\n");
27281 +
27282 +       } else {
27283 +               pr_debug_ratelimited("CAAM exited congestion\n");
27284 +       }
27285 +}
27286 +
27287 +static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
27288 +{
27289 +       /*
27290 +        * In case of threaded ISR, for RT kernels in_irq() does not return
27291 +        * appropriate value, so use in_serving_softirq to distinguish between
27292 +        * softirq and irq contexts.
27293 +        */
27294 +       if (unlikely(in_irq() || !in_serving_softirq())) {
27295 +               /* Disable QMan IRQ source and invoke NAPI */
27296 +               qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
27297 +               np->p = p;
27298 +               napi_schedule(&np->irqtask);
27299 +               return 1;
27300 +       }
27301 +       return 0;
27302 +}
27303 +
27304 +static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
27305 +                                                   struct qman_fq *rsp_fq,
27306 +                                                   const struct qm_dqrr_entry *dqrr)
27307 +{
27308 +       struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
27309 +       struct caam_drv_req *drv_req;
27310 +       const struct qm_fd *fd;
27311 +       struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
27312 +
27313 +       if (caam_qi_napi_schedule(p, caam_napi))
27314 +               return qman_cb_dqrr_stop;
27315 +
27316 +       fd = &dqrr->fd;
27317 +       if (unlikely(fd->status))
27318 +               dev_err(qidev, "Error: %#x in CAAM response FD\n", fd->status);
27319 +
27320 +       if (unlikely(fd->format != fd->format)) {
27321 +               dev_err(qidev, "Non-compound FD from CAAM\n");
27322 +               return qman_cb_dqrr_consume;
27323 +       }
27324 +
27325 +       drv_req = (struct caam_drv_req *)phys_to_virt(fd->addr);
27326 +       if (unlikely(!drv_req)) {
27327 +               dev_err(qidev,
27328 +                       "Can't find original request for caam response\n");
27329 +               return qman_cb_dqrr_consume;
27330 +       }
27331 +
27332 +       dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
27333 +                        sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
27334 +
27335 +       drv_req->cbk(drv_req, fd->status);
27336 +       return qman_cb_dqrr_consume;
27337 +}
27338 +
27339 +static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
27340 +{
27341 +       struct qm_mcc_initfq opts;
27342 +       struct qman_fq *fq;
27343 +       int ret;
27344 +
27345 +       fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA);
27346 +       if (!fq)
27347 +               return -ENOMEM;
27348 +
27349 +       fq->cb.dqrr = caam_rsp_fq_dqrr_cb;
27350 +
27351 +       ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE |
27352 +                            QMAN_FQ_FLAG_DYNAMIC_FQID, fq);
27353 +       if (ret) {
27354 +               dev_err(qidev, "Rsp FQ create failed\n");
27355 +               kfree(fq);
27356 +               return -ENODEV;
27357 +       }
27358 +
27359 +       opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
27360 +               QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
27361 +               QM_INITFQ_WE_CGID;
27362 +       opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING | QM_FQCTRL_CPCSTASH |
27363 +                          QM_FQCTRL_CGE;
27364 +       opts.fqd.dest.channel = qman_affine_channel(cpu);
27365 +       opts.fqd.dest.wq = 3;
27366 +       opts.fqd.cgid = qipriv.cgr.cgrid;
27367 +       opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
27368 +                                               QM_STASHING_EXCL_DATA;
27369 +       opts.fqd.context_a.stashing.data_cl = 1;
27370 +       opts.fqd.context_a.stashing.context_cl = 1;
27371 +
27372 +       ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
27373 +       if (ret) {
27374 +               dev_err(qidev, "Rsp FQ init failed\n");
27375 +               kfree(fq);
27376 +               return -ENODEV;
27377 +       }
27378 +
27379 +       per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
27380 +
27381 +       dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
27382 +       return 0;
27383 +}
27384 +
27385 +static int init_cgr(struct device *qidev)
27386 +{
27387 +       int ret;
27388 +       struct qm_mcc_initcgr opts;
27389 +       const u64 cpus = *(u64 *)qman_affine_cpus();
27390 +       const int num_cpus = hweight64(cpus);
27391 +       const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;
27392 +
27393 +       ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
27394 +       if (ret) {
27395 +               dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret);
27396 +               return ret;
27397 +       }
27398 +
27399 +       qipriv.cgr.cb = cgr_cb;
27400 +       memset(&opts, 0, sizeof(opts));
27401 +       opts.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | QM_CGR_WE_MODE;
27402 +       opts.cgr.cscn_en = QM_CGR_EN;
27403 +       opts.cgr.mode = QMAN_CGR_MODE_FRAME;
27404 +       qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
27405 +
27406 +       ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts);
27407 +       if (ret) {
27408 +               dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret,
27409 +                       qipriv.cgr.cgrid);
27410 +               return ret;
27411 +       }
27412 +
27413 +       dev_dbg(qidev, "Congestion threshold set to %llu\n", val);
27414 +       return 0;
27415 +}
27416 +
27417 +static int alloc_rsp_fqs(struct device *qidev)
27418 +{
27419 +       int ret, i;
27420 +       const cpumask_t *cpus = qman_affine_cpus();
27421 +
27422 +       /*Now create response FQs*/
27423 +       for_each_cpu(i, cpus) {
27424 +               ret = alloc_rsp_fq_cpu(qidev, i);
27425 +               if (ret) {
27426 +                       dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i);
27427 +                       return ret;
27428 +               }
27429 +       }
27430 +
27431 +       return 0;
27432 +}
27433 +
27434 +static void free_rsp_fqs(void)
27435 +{
27436 +       int i;
27437 +       const cpumask_t *cpus = qman_affine_cpus();
27438 +
27439 +       for_each_cpu(i, cpus)
27440 +               kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
27441 +}
27442 +
27443 +int caam_qi_init(struct platform_device *caam_pdev)
27444 +{
27445 +       int err, i;
27446 +       struct platform_device *qi_pdev;
27447 +       struct device *ctrldev = &caam_pdev->dev, *qidev;
27448 +       struct caam_drv_private *ctrlpriv;
27449 +       const cpumask_t *cpus = qman_affine_cpus();
27450 +       struct cpumask old_cpumask = current->cpus_allowed;
27451 +       static struct platform_device_info qi_pdev_info = {
27452 +               .name = "caam_qi",
27453 +               .id = PLATFORM_DEVID_NONE
27454 +       };
27455 +
27456 +       /*
27457 +        * QMAN requires CGRs to be removed from same CPU+portal from where it
27458 +        * was originally allocated. Hence we need to note down the
27459 +        * initialisation CPU and use the same CPU for module exit.
27460 +        * We select the first CPU to from the list of portal owning CPUs.
27461 +        * Then we pin module init to this CPU.
27462 +        */
27463 +       mod_init_cpu = cpumask_first(cpus);
27464 +       set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
27465 +
27466 +       qi_pdev_info.parent = ctrldev;
27467 +       qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
27468 +       qi_pdev = platform_device_register_full(&qi_pdev_info);
27469 +       if (IS_ERR(qi_pdev))
27470 +               return PTR_ERR(qi_pdev);
27471 +       arch_setup_dma_ops(&qi_pdev->dev, 0, 0, NULL, true);
27472 +
27473 +       ctrlpriv = dev_get_drvdata(ctrldev);
27474 +       qidev = &qi_pdev->dev;
27475 +
27476 +       qipriv.qi_pdev = qi_pdev;
27477 +       dev_set_drvdata(qidev, &qipriv);
27478 +
27479 +       /* Initialize the congestion detection */
27480 +       err = init_cgr(qidev);
27481 +       if (err) {
27482 +               dev_err(qidev, "CGR initialization failed: %d\n", err);
27483 +               platform_device_unregister(qi_pdev);
27484 +               return err;
27485 +       }
27486 +
27487 +       /* Initialise response FQs */
27488 +       err = alloc_rsp_fqs(qidev);
27489 +       if (err) {
27490 +               dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
27491 +               free_rsp_fqs();
27492 +               platform_device_unregister(qi_pdev);
27493 +               return err;
27494 +       }
27495 +
27496 +       /*
27497 +        * Enable the NAPI contexts on each of the core which has an affine
27498 +        * portal.
27499 +        */
27500 +       for_each_cpu(i, cpus) {
27501 +               struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i);
27502 +               struct caam_napi *caam_napi = &priv->caam_napi;
27503 +               struct napi_struct *irqtask = &caam_napi->irqtask;
27504 +               struct net_device *net_dev = &priv->net_dev;
27505 +
27506 +               net_dev->dev = *qidev;
27507 +               INIT_LIST_HEAD(&net_dev->napi_list);
27508 +
27509 +               netif_napi_add(net_dev, irqtask, caam_qi_poll,
27510 +                              CAAM_NAPI_WEIGHT);
27511 +
27512 +               napi_enable(irqtask);
27513 +       }
27514 +
27515 +       /* Hook up QI device to parent controlling caam device */
27516 +       ctrlpriv->qidev = qidev;
27517 +
27518 +       qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
27519 +                                    SLAB_CACHE_DMA, NULL);
27520 +       if (!qi_cache) {
27521 +               dev_err(qidev, "Can't allocate CAAM cache\n");
27522 +               free_rsp_fqs();
27523 +               platform_device_unregister(qi_pdev);
27524 +               return -ENOMEM;
27525 +       }
27526 +
27527 +       /* Done with the CGRs; restore the cpus allowed mask */
27528 +       set_cpus_allowed_ptr(current, &old_cpumask);
27529 +#ifdef CONFIG_DEBUG_FS
27530 +       debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
27531 +                           &times_congested, &caam_fops_u64_ro);
27532 +#endif
27533 +       dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
27534 +       return 0;
27535 +}
27536 --- /dev/null
27537 +++ b/drivers/crypto/caam/qi.h
27538 @@ -0,0 +1,204 @@
27539 +/*
27540 + * Public definitions for the CAAM/QI (Queue Interface) backend.
27541 + *
27542 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
27543 + * Copyright 2016-2017 NXP
27544 + */
27545 +
27546 +#ifndef __QI_H__
27547 +#define __QI_H__
27548 +
27549 +#include <linux/fsl_qman.h>
27550 +#include "compat.h"
27551 +#include "desc.h"
27552 +#include "desc_constr.h"
27553 +
27554 +/*
27555 + * CAAM hardware constructs a job descriptor which points to a shared descriptor
27556 + * (as pointed by context_a of to-CAAM FQ).
27557 + * When the job descriptor is executed by DECO, the whole job descriptor
27558 + * together with shared descriptor gets loaded in DECO buffer, which is
27559 + * 64 words (each 32-bit) long.
27560 + *
27561 + * The job descriptor constructed by CAAM hardware has the following layout:
27562 + *
27563 + *     HEADER          (1 word)
27564 + *     Shdesc ptr      (1 or 2 words)
27565 + *     SEQ_OUT_PTR     (1 word)
27566 + *     Out ptr         (1 or 2 words)
27567 + *     Out length      (1 word)
27568 + *     SEQ_IN_PTR      (1 word)
27569 + *     In ptr          (1 or 2 words)
27570 + *     In length       (1 word)
27571 + *
27572 + * The shdesc ptr is used to fetch shared descriptor contents into DECO buffer.
27573 + *
27574 + * Apart from shdesc contents, the total number of words that get loaded in DECO
27575 + * buffer are '8' or '11'. The remaining words in DECO buffer can be used for
27576 + * storing shared descriptor.
27577 + */
27578 +#define MAX_SDLEN      ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
27579 +
27580 +/* Length of a single buffer in the QI driver memory cache */
27581 +#define CAAM_QI_MEMCACHE_SIZE  768
27582 +
27583 +extern bool caam_congested __read_mostly;
27584 +
27585 +/*
27586 + * This is the request structure the driver application should fill while
27587 + * submitting a job to driver.
27588 + */
27589 +struct caam_drv_req;
27590 +
27591 +/*
27592 + * caam_qi_cbk - application's callback function invoked by the driver when the
27593 + *               request has been successfully processed.
27594 + * @drv_req: original request that was submitted
27595 + * @status: completion status of request (0 - success, non-zero - error code)
27596 + */
27597 +typedef void (*caam_qi_cbk)(struct caam_drv_req *drv_req, u32 status);
27598 +
27599 +enum optype {
27600 +       ENCRYPT,
27601 +       DECRYPT,
27602 +       GIVENCRYPT,
27603 +       NUM_OP
27604 +};
27605 +
27606 +/**
27607 + * caam_drv_ctx - CAAM/QI backend driver context
27608 + *
27609 + * The jobs are processed by the driver against a driver context.
27610 + * With every cryptographic context, a driver context is attached.
27611 + * The driver context contains data for private use by driver.
27612 + * For the applications, this is an opaque structure.
27613 + *
27614 + * @prehdr: preheader placed before shrd desc
27615 + * @sh_desc: shared descriptor
27616 + * @context_a: shared descriptor dma address
27617 + * @req_fq: to-CAAM request frame queue
27618 + * @rsp_fq: from-CAAM response frame queue
27619 + * @cpu: cpu on which to receive CAAM response
27620 + * @op_type: operation type
27621 + * @qidev: device pointer for CAAM/QI backend
27622 + */
27623 +struct caam_drv_ctx {
27624 +       u32 prehdr[2];
27625 +       u32 sh_desc[MAX_SDLEN];
27626 +       dma_addr_t context_a;
27627 +       struct qman_fq *req_fq;
27628 +       struct qman_fq *rsp_fq;
27629 +       int cpu;
27630 +       enum optype op_type;
27631 +       struct device *qidev;
27632 +} ____cacheline_aligned;
27633 +
27634 +/**
27635 + * caam_drv_req - The request structure the driver application should fill while
27636 + *                submitting a job to driver.
27637 + * @fd_sgt: QMan S/G pointing to output (fd_sgt[0]) and input (fd_sgt[1])
27638 + *          buffers.
27639 + * @cbk: callback function to invoke when job is completed
27640 + * @app_ctx: arbitrary context attached with request by the application
27641 + *
27642 + * The fields mentioned below should not be used by application.
27643 + * These are for private use by driver.
27644 + *
27645 + * @hdr__: linked list header to maintain list of outstanding requests to CAAM
27646 + * @hwaddr: DMA address for the S/G table.
27647 + */
27648 +struct caam_drv_req {
27649 +       struct qm_sg_entry fd_sgt[2];
27650 +       struct caam_drv_ctx *drv_ctx;
27651 +       caam_qi_cbk cbk;
27652 +       void *app_ctx;
27653 +} ____cacheline_aligned;
27654 +
27655 +/**
27656 + * caam_drv_ctx_init - Initialise a CAAM/QI driver context
27657 + *
27658 + * A CAAM/QI driver context must be attached with each cryptographic context.
27659 + * This function allocates memory for CAAM/QI context and returns a handle to
27660 + * the application. This handle must be submitted along with each enqueue
27661 + * request to the driver by the application.
27662 + *
27663 + * @cpu: CPU where the application prefers to the driver to receive CAAM
27664 + *       responses. The request completion callback would be issued from this
27665 + *       CPU.
27666 + * @sh_desc: shared descriptor pointer to be attached with CAAM/QI driver
27667 + *           context.
27668 + *
27669 + * Returns a driver context on success or negative error code on failure.
27670 + */
27671 +struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, int *cpu,
27672 +                                      u32 *sh_desc);
27673 +
27674 +/**
27675 + * caam_qi_enqueue - Submit a request to QI backend driver.
27676 + *
27677 + * The request structure must be properly filled as described above.
27678 + *
27679 + * @qidev: device pointer for QI backend
27680 + * @req: CAAM QI request structure
27681 + *
27682 + * Returns 0 on success or negative error code on failure.
27683 + */
27684 +int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req);
27685 +
27686 +/**
27687 + * caam_drv_ctx_busy - Check if there are too many jobs pending with CAAM
27688 + *                    or too many CAAM responses are pending to be processed.
27689 + * @drv_ctx: driver context for which job is to be submitted
27690 + *
27691 + * Returns caam congestion status 'true/false'
27692 + */
27693 +bool caam_drv_ctx_busy(struct caam_drv_ctx *drv_ctx);
27694 +
27695 +/**
27696 + * caam_drv_ctx_update - Update QI driver context
27697 + *
27698 + * Invoked when shared descriptor is required to be change in driver context.
27699 + *
27700 + * @drv_ctx: driver context to be updated
27701 + * @sh_desc: new shared descriptor pointer to be updated in QI driver context
27702 + *
27703 + * Returns 0 on success or negative error code on failure.
27704 + */
27705 +int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
27706 +
27707 +/**
27708 + * caam_drv_ctx_rel - Release a QI driver context
27709 + * @drv_ctx: context to be released
27710 + */
27711 +void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
27712 +
27713 +int caam_qi_init(struct platform_device *pdev);
27714 +int caam_qi_shutdown(struct device *dev);
27715 +
27716 +/**
27717 + * qi_cache_alloc - Allocate buffers from CAAM-QI cache
27718 + *
27719 + * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) needs data which has
27720 + * to be allocated on the hotpath. Instead of using malloc, one can use the
27721 + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
27722 + * will have a size of 256B, which is sufficient for hosting 16 SG entries.
27723 + *
27724 + * @flags: flags that would be used for the equivalent malloc(..) call
27725 + *
27726 + * Returns a pointer to a retrieved buffer on success or NULL on failure.
27727 + */
27728 +void *qi_cache_alloc(gfp_t flags);
27729 +
27730 +/**
27731 + * qi_cache_free - Frees buffers allocated from CAAM-QI cache
27732 + *
27733 + * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) no longer needs
27734 + * the buffer previously allocated by a qi_cache_alloc call.
27735 + * No checking is being done, the call is a passthrough call to
27736 + * kmem_cache_free(...)
27737 + *
27738 + * @obj: object previously allocated using qi_cache_alloc()
27739 + */
27740 +void qi_cache_free(void *obj);
27741 +
27742 +#endif /* __QI_H__ */
27743 --- a/drivers/crypto/caam/regs.h
27744 +++ b/drivers/crypto/caam/regs.h
27745 @@ -2,6 +2,7 @@
27746   * CAAM hardware register-level view
27747   *
27748   * Copyright 2008-2011 Freescale Semiconductor, Inc.
27749 + * Copyright 2017 NXP
27750   */
27751  
27752  #ifndef REGS_H
27753 @@ -67,6 +68,7 @@
27754   */
27755  
27756  extern bool caam_little_end;
27757 +extern bool caam_imx;
27758  
27759  #define caam_to_cpu(len)                               \
27760  static inline u##len caam##len ## _to_cpu(u##len val)  \
27761 @@ -154,13 +156,10 @@ static inline u64 rd_reg64(void __iomem
27762  #else /* CONFIG_64BIT */
27763  static inline void wr_reg64(void __iomem *reg, u64 data)
27764  {
27765 -#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
27766 -       if (caam_little_end) {
27767 +       if (!caam_imx && caam_little_end) {
27768                 wr_reg32((u32 __iomem *)(reg) + 1, data >> 32);
27769                 wr_reg32((u32 __iomem *)(reg), data);
27770 -       } else
27771 -#endif
27772 -       {
27773 +       } else {
27774                 wr_reg32((u32 __iomem *)(reg), data >> 32);
27775                 wr_reg32((u32 __iomem *)(reg) + 1, data);
27776         }
27777 @@ -168,41 +167,40 @@ static inline void wr_reg64(void __iomem
27778  
27779  static inline u64 rd_reg64(void __iomem *reg)
27780  {
27781 -#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
27782 -       if (caam_little_end)
27783 +       if (!caam_imx && caam_little_end)
27784                 return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 |
27785                         (u64)rd_reg32((u32 __iomem *)(reg)));
27786 -       else
27787 -#endif
27788 -               return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
27789 -                       (u64)rd_reg32((u32 __iomem *)(reg) + 1));
27790 +
27791 +       return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
27792 +               (u64)rd_reg32((u32 __iomem *)(reg) + 1));
27793  }
27794  #endif /* CONFIG_64BIT  */
27795  
27796 +static inline u64 cpu_to_caam_dma64(dma_addr_t value)
27797 +{
27798 +       if (caam_imx)
27799 +               return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) |
27800 +                        (u64)cpu_to_caam32(upper_32_bits(value)));
27801 +
27802 +       return cpu_to_caam64(value);
27803 +}
27804 +
27805 +static inline u64 caam_dma64_to_cpu(u64 value)
27806 +{
27807 +       if (caam_imx)
27808 +               return (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) |
27809 +                        (u64)caam32_to_cpu(upper_32_bits(value)));
27810 +
27811 +       return caam64_to_cpu(value);
27812 +}
27813 +
27814  #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
27815 -#ifdef CONFIG_SOC_IMX7D
27816 -#define cpu_to_caam_dma(value) \
27817 -               (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
27818 -                 (u64)cpu_to_caam32(upper_32_bits(value)))
27819 -#define caam_dma_to_cpu(value) \
27820 -               (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | \
27821 -                 (u64)caam32_to_cpu(upper_32_bits(value)))
27822 -#else
27823 -#define cpu_to_caam_dma(value) cpu_to_caam64(value)
27824 -#define caam_dma_to_cpu(value) caam64_to_cpu(value)
27825 -#endif /* CONFIG_SOC_IMX7D */
27826 +#define cpu_to_caam_dma(value) cpu_to_caam_dma64(value)
27827 +#define caam_dma_to_cpu(value) caam_dma64_to_cpu(value)
27828  #else
27829  #define cpu_to_caam_dma(value) cpu_to_caam32(value)
27830  #define caam_dma_to_cpu(value) caam32_to_cpu(value)
27831 -#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT  */
27832 -
27833 -#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
27834 -#define cpu_to_caam_dma64(value) \
27835 -               (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
27836 -                (u64)cpu_to_caam32(upper_32_bits(value)))
27837 -#else
27838 -#define cpu_to_caam_dma64(value) cpu_to_caam64(value)
27839 -#endif
27840 +#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
27841  
27842  /*
27843   * jr_outentry
27844 @@ -293,6 +291,7 @@ struct caam_perfmon {
27845         u32 cha_rev_ls;         /* CRNR - CHA Rev No. Least significant half*/
27846  #define CTPR_MS_QI_SHIFT       25
27847  #define CTPR_MS_QI_MASK                (0x1ull << CTPR_MS_QI_SHIFT)
27848 +#define CTPR_MS_DPAA2          BIT(13)
27849  #define CTPR_MS_VIRT_EN_INCL   0x00000001
27850  #define CTPR_MS_VIRT_EN_POR    0x00000002
27851  #define CTPR_MS_PG_SZ_MASK     0x10
27852 @@ -628,6 +627,8 @@ struct caam_job_ring {
27853  #define JRSTA_DECOERR_INVSIGN       0x86
27854  #define JRSTA_DECOERR_DSASIGN       0x87
27855  
27856 +#define JRSTA_QIERR_ERROR_MASK      0x00ff
27857 +
27858  #define JRSTA_CCBERR_JUMP           0x08000000
27859  #define JRSTA_CCBERR_INDEX_MASK     0xff00
27860  #define JRSTA_CCBERR_INDEX_SHIFT    8
27861 --- /dev/null
27862 +++ b/drivers/crypto/caam/sg_sw_qm.h
27863 @@ -0,0 +1,126 @@
27864 +/*
27865 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
27866 + * Copyright 2016-2017 NXP
27867 + *
27868 + * Redistribution and use in source and binary forms, with or without
27869 + * modification, are permitted provided that the following conditions are met:
27870 + *     * Redistributions of source code must retain the above copyright
27871 + *       notice, this list of conditions and the following disclaimer.
27872 + *     * Redistributions in binary form must reproduce the above copyright
27873 + *       notice, this list of conditions and the following disclaimer in the
27874 + *       documentation and/or other materials provided with the distribution.
27875 + *     * Neither the name of Freescale Semiconductor nor the
27876 + *       names of its contributors may be used to endorse or promote products
27877 + *       derived from this software without specific prior written permission.
27878 + *
27879 + *
27880 + * ALTERNATIVELY, this software may be distributed under the terms of the
27881 + * GNU General Public License ("GPL") as published by the Free Software
27882 + * Foundation, either version 2 of that License or (at your option) any
27883 + * later version.
27884 + *
27885 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
27886 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
27887 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27888 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27889 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
27890 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27891 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27892 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27893 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27894 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27895 + */
27896 +
27897 +#ifndef __SG_SW_QM_H
27898 +#define __SG_SW_QM_H
27899 +
27900 +#include <linux/fsl_qman.h>
27901 +#include "regs.h"
27902 +
27903 +static inline void cpu_to_hw_sg(struct qm_sg_entry *qm_sg_ptr)
27904 +{
27905 +       dma_addr_t addr = qm_sg_ptr->opaque;
27906 +
27907 +       qm_sg_ptr->opaque = cpu_to_caam64(addr);
27908 +       qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
27909 +}
27910 +
27911 +static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
27912 +                                 u32 len, u16 offset)
27913 +{
27914 +       qm_sg_ptr->addr = dma;
27915 +       qm_sg_ptr->length = len;
27916 +       qm_sg_ptr->__reserved2 = 0;
27917 +       qm_sg_ptr->bpid = 0;
27918 +       qm_sg_ptr->__reserved3 = 0;
27919 +       qm_sg_ptr->offset = offset & QM_SG_OFFSET_MASK;
27920 +
27921 +       cpu_to_hw_sg(qm_sg_ptr);
27922 +}
27923 +
27924 +static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
27925 +                                   dma_addr_t dma, u32 len, u16 offset)
27926 +{
27927 +       qm_sg_ptr->extension = 0;
27928 +       qm_sg_ptr->final = 0;
27929 +       __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
27930 +}
27931 +
27932 +static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
27933 +                                        dma_addr_t dma, u32 len, u16 offset)
27934 +{
27935 +       qm_sg_ptr->extension = 0;
27936 +       qm_sg_ptr->final = 1;
27937 +       __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
27938 +}
27939 +
27940 +static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
27941 +                                       dma_addr_t dma, u32 len, u16 offset)
27942 +{
27943 +       qm_sg_ptr->extension = 1;
27944 +       qm_sg_ptr->final = 0;
27945 +       __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
27946 +}
27947 +
27948 +static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
27949 +                                            dma_addr_t dma, u32 len,
27950 +                                            u16 offset)
27951 +{
27952 +       qm_sg_ptr->extension = 1;
27953 +       qm_sg_ptr->final = 1;
27954 +       __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
27955 +}
27956 +
27957 +/*
27958 + * convert scatterlist to h/w link table format
27959 + * but does not have final bit; instead, returns last entry
27960 + */
27961 +static inline struct qm_sg_entry *
27962 +sg_to_qm_sg(struct scatterlist *sg, int sg_count,
27963 +           struct qm_sg_entry *qm_sg_ptr, u16 offset)
27964 +{
27965 +       while (sg_count && sg) {
27966 +               dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
27967 +                                sg_dma_len(sg), offset);
27968 +               qm_sg_ptr++;
27969 +               sg = sg_next(sg);
27970 +               sg_count--;
27971 +       }
27972 +       return qm_sg_ptr - 1;
27973 +}
27974 +
27975 +/*
27976 + * convert scatterlist to h/w link table format
27977 + * scatterlist must have been previously dma mapped
27978 + */
27979 +static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
27980 +                                   struct qm_sg_entry *qm_sg_ptr, u16 offset)
27981 +{
27982 +       qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
27983 +
27984 +       qm_sg_ptr->sgt_efl = caam32_to_cpu(qm_sg_ptr->sgt_efl);
27985 +       qm_sg_ptr->final = 1;
27986 +       qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
27987 +}
27988 +
27989 +#endif /* __SG_SW_QM_H */
27990 --- /dev/null
27991 +++ b/drivers/crypto/caam/sg_sw_qm2.h
27992 @@ -0,0 +1,81 @@
27993 +/*
27994 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
27995 + * Copyright 2017 NXP
27996 + *
27997 + * Redistribution and use in source and binary forms, with or without
27998 + * modification, are permitted provided that the following conditions are met:
27999 + *     * Redistributions of source code must retain the above copyright
28000 + *      notice, this list of conditions and the following disclaimer.
28001 + *     * Redistributions in binary form must reproduce the above copyright
28002 + *      notice, this list of conditions and the following disclaimer in the
28003 + *      documentation and/or other materials provided with the distribution.
28004 + *     * Neither the names of the above-listed copyright holders nor the
28005 + *      names of any contributors may be used to endorse or promote products
28006 + *      derived from this software without specific prior written permission.
28007 + *
28008 + *
28009 + * ALTERNATIVELY, this software may be distributed under the terms of the
28010 + * GNU General Public License ("GPL") as published by the Free Software
28011 + * Foundation, either version 2 of that License or (at your option) any
28012 + * later version.
28013 + *
28014 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
28015 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28016 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28017 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
28018 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28019 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28020 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28021 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28022 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28023 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28024 + * POSSIBILITY OF SUCH DAMAGE.
28025 + */
28026 +
28027 +#ifndef _SG_SW_QM2_H_
28028 +#define _SG_SW_QM2_H_
28029 +
28030 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
28031 +
28032 +static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr,
28033 +                                   dma_addr_t dma, u32 len, u16 offset)
28034 +{
28035 +       dpaa2_sg_set_addr(qm_sg_ptr, dma);
28036 +       dpaa2_sg_set_format(qm_sg_ptr, dpaa2_sg_single);
28037 +       dpaa2_sg_set_final(qm_sg_ptr, false);
28038 +       dpaa2_sg_set_len(qm_sg_ptr, len);
28039 +       dpaa2_sg_set_bpid(qm_sg_ptr, 0);
28040 +       dpaa2_sg_set_offset(qm_sg_ptr, offset);
28041 +}
28042 +
28043 +/*
28044 + * convert scatterlist to h/w link table format
28045 + * but does not have final bit; instead, returns last entry
28046 + */
28047 +static inline struct dpaa2_sg_entry *
28048 +sg_to_qm_sg(struct scatterlist *sg, int sg_count,
28049 +           struct dpaa2_sg_entry *qm_sg_ptr, u16 offset)
28050 +{
28051 +       while (sg_count && sg) {
28052 +               dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
28053 +                                sg_dma_len(sg), offset);
28054 +               qm_sg_ptr++;
28055 +               sg = sg_next(sg);
28056 +               sg_count--;
28057 +       }
28058 +       return qm_sg_ptr - 1;
28059 +}
28060 +
28061 +/*
28062 + * convert scatterlist to h/w link table format
28063 + * scatterlist must have been previously dma mapped
28064 + */
28065 +static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
28066 +                                   struct dpaa2_sg_entry *qm_sg_ptr,
28067 +                                   u16 offset)
28068 +{
28069 +       qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
28070 +       dpaa2_sg_set_final(qm_sg_ptr, true);
28071 +}
28072 +
28073 +#endif /* _SG_SW_QM2_H_ */
28074 --- a/drivers/crypto/caam/sg_sw_sec4.h
28075 +++ b/drivers/crypto/caam/sg_sw_sec4.h
28076 @@ -5,9 +5,19 @@
28077   *
28078   */
28079  
28080 +#ifndef _SG_SW_SEC4_H_
28081 +#define _SG_SW_SEC4_H_
28082 +
28083 +#include "ctrl.h"
28084  #include "regs.h"
28085 +#include "sg_sw_qm2.h"
28086 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
28087  
28088 -struct sec4_sg_entry;
28089 +struct sec4_sg_entry {
28090 +       u64 ptr;
28091 +       u32 len;
28092 +       u32 bpid_offset;
28093 +};
28094  
28095  /*
28096   * convert single dma address to h/w link table format
28097 @@ -15,9 +25,15 @@ struct sec4_sg_entry;
28098  static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
28099                                       dma_addr_t dma, u32 len, u16 offset)
28100  {
28101 -       sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
28102 -       sec4_sg_ptr->len = cpu_to_caam32(len);
28103 -       sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK);
28104 +       if (caam_dpaa2) {
28105 +               dma_to_qm_sg_one((struct dpaa2_sg_entry *)sec4_sg_ptr, dma, len,
28106 +                                offset);
28107 +       } else {
28108 +               sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
28109 +               sec4_sg_ptr->len = cpu_to_caam32(len);
28110 +               sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset &
28111 +                                                        SEC4_SG_OFFSET_MASK);
28112 +       }
28113  #ifdef DEBUG
28114         print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
28115                        DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
28116 @@ -43,6 +59,14 @@ sg_to_sec4_sg(struct scatterlist *sg, in
28117         return sec4_sg_ptr - 1;
28118  }
28119  
28120 +static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr)
28121 +{
28122 +       if (caam_dpaa2)
28123 +               dpaa2_sg_set_final((struct dpaa2_sg_entry *)sec4_sg_ptr, true);
28124 +       else
28125 +               sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
28126 +}
28127 +
28128  /*
28129   * convert scatterlist to h/w link table format
28130   * scatterlist must have been previously dma mapped
28131 @@ -52,31 +76,7 @@ static inline void sg_to_sec4_sg_last(st
28132                                       u16 offset)
28133  {
28134         sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
28135 -       sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
28136 -}
28137 -
28138 -static inline struct sec4_sg_entry *sg_to_sec4_sg_len(
28139 -       struct scatterlist *sg, unsigned int total,
28140 -       struct sec4_sg_entry *sec4_sg_ptr)
28141 -{
28142 -       do {
28143 -               unsigned int len = min(sg_dma_len(sg), total);
28144 -
28145 -               dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), len, 0);
28146 -               sec4_sg_ptr++;
28147 -               sg = sg_next(sg);
28148 -               total -= len;
28149 -       } while (total);
28150 -       return sec4_sg_ptr - 1;
28151 +       sg_to_sec4_set_last(sec4_sg_ptr);
28152  }
28153  
28154 -/* derive number of elements in scatterlist, but return 0 for 1 */
28155 -static inline int sg_count(struct scatterlist *sg_list, int nbytes)
28156 -{
28157 -       int sg_nents = sg_nents_for_len(sg_list, nbytes);
28158 -
28159 -       if (likely(sg_nents == 1))
28160 -               return 0;
28161 -
28162 -       return sg_nents;
28163 -}
28164 +#endif /* _SG_SW_SEC4_H_ */
28165 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c
28166 +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
28167 @@ -516,7 +516,7 @@ err:
28168  
28169  /**
28170   * rsi_disconnect() - This function performs the reverse of the probe function,
28171 - *                   it deintialize the driver structure.
28172 + *                   it deinitialize the driver structure.
28173   * @pfunction: Pointer to the USB interface structure.
28174   *
28175   * Return: None.
28176 --- a/drivers/staging/wilc1000/linux_wlan.c
28177 +++ b/drivers/staging/wilc1000/linux_wlan.c
28178 @@ -211,7 +211,7 @@ static void deinit_irq(struct net_device
28179         vif = netdev_priv(dev);
28180         wilc = vif->wilc;
28181  
28182 -       /* Deintialize IRQ */
28183 +       /* Deinitialize IRQ */
28184         if (wilc->dev_irq_num) {
28185                 free_irq(wilc->dev_irq_num, wilc);
28186                 gpio_free(wilc->gpio);
28187 --- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
28188 +++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
28189 @@ -2359,7 +2359,7 @@ int wilc_deinit_host_int(struct net_devi
28190                 del_timer_sync(&wilc_during_ip_timer);
28191  
28192         if (s32Error)
28193 -               netdev_err(net, "Error while deintializing host interface\n");
28194 +               netdev_err(net, "Error while deinitializing host interface\n");
28195  
28196         return s32Error;
28197  }
28198 --- /dev/null
28199 +++ b/include/crypto/acompress.h
28200 @@ -0,0 +1,269 @@
28201 +/*
28202 + * Asynchronous Compression operations
28203 + *
28204 + * Copyright (c) 2016, Intel Corporation
28205 + * Authors: Weigang Li <weigang.li@intel.com>
28206 + *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
28207 + *
28208 + * This program is free software; you can redistribute it and/or modify it
28209 + * under the terms of the GNU General Public License as published by the Free
28210 + * Software Foundation; either version 2 of the License, or (at your option)
28211 + * any later version.
28212 + *
28213 + */
28214 +#ifndef _CRYPTO_ACOMP_H
28215 +#define _CRYPTO_ACOMP_H
28216 +#include <linux/crypto.h>
28217 +
28218 +#define CRYPTO_ACOMP_ALLOC_OUTPUT      0x00000001
28219 +
28220 +/**
28221 + * struct acomp_req - asynchronous (de)compression request
28222 + *
28223 + * @base:      Common attributes for asynchronous crypto requests
28224 + * @src:       Source Data
28225 + * @dst:       Destination data
28226 + * @slen:      Size of the input buffer
28227 + * @dlen:      Size of the output buffer and number of bytes produced
28228 + * @flags:     Internal flags
28229 + * @__ctx:     Start of private context data
28230 + */
28231 +struct acomp_req {
28232 +       struct crypto_async_request base;
28233 +       struct scatterlist *src;
28234 +       struct scatterlist *dst;
28235 +       unsigned int slen;
28236 +       unsigned int dlen;
28237 +       u32 flags;
28238 +       void *__ctx[] CRYPTO_MINALIGN_ATTR;
28239 +};
28240 +
28241 +/**
28242 + * struct crypto_acomp - user-instantiated objects which encapsulate
28243 + * algorithms and core processing logic
28244 + *
28245 + * @compress:          Function performs a compress operation
28246 + * @decompress:                Function performs a de-compress operation
28247 + * @dst_free:          Frees destination buffer if allocated inside the
28248 + *                     algorithm
28249 + * @reqsize:           Context size for (de)compression requests
28250 + * @base:              Common crypto API algorithm data structure
28251 + */
28252 +struct crypto_acomp {
28253 +       int (*compress)(struct acomp_req *req);
28254 +       int (*decompress)(struct acomp_req *req);
28255 +       void (*dst_free)(struct scatterlist *dst);
28256 +       unsigned int reqsize;
28257 +       struct crypto_tfm base;
28258 +};
28259 +
28260 +/**
28261 + * struct acomp_alg - asynchronous compression algorithm
28262 + *
28263 + * @compress:  Function performs a compress operation
28264 + * @decompress:        Function performs a de-compress operation
28265 + * @dst_free:  Frees destination buffer if allocated inside the algorithm
28266 + * @init:      Initialize the cryptographic transformation object.
28267 + *             This function is used to initialize the cryptographic
28268 + *             transformation object. This function is called only once at
28269 + *             the instantiation time, right after the transformation context
28270 + *             was allocated. In case the cryptographic hardware has some
28271 + *             special requirements which need to be handled by software, this
28272 + *             function shall check for the precise requirement of the
28273 + *             transformation and put any software fallbacks in place.
28274 + * @exit:      Deinitialize the cryptographic transformation object. This is a
28275 + *             counterpart to @init, used to remove various changes set in
28276 + *             @init.
28277 + *
28278 + * @reqsize:   Context size for (de)compression requests
28279 + * @base:      Common crypto API algorithm data structure
28280 + */
28281 +struct acomp_alg {
28282 +       int (*compress)(struct acomp_req *req);
28283 +       int (*decompress)(struct acomp_req *req);
28284 +       void (*dst_free)(struct scatterlist *dst);
28285 +       int (*init)(struct crypto_acomp *tfm);
28286 +       void (*exit)(struct crypto_acomp *tfm);
28287 +       unsigned int reqsize;
28288 +       struct crypto_alg base;
28289 +};
28290 +
28291 +/**
28292 + * DOC: Asynchronous Compression API
28293 + *
28294 + * The Asynchronous Compression API is used with the algorithms of type
28295 + * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto)
28296 + */
28297 +
28298 +/**
28299 + * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle
28300 + * @alg_name:  is the cra_name / name or cra_driver_name / driver name of the
28301 + *             compression algorithm e.g. "deflate"
28302 + * @type:      specifies the type of the algorithm
28303 + * @mask:      specifies the mask for the algorithm
28304 + *
28305 + * Allocate a handle for a compression algorithm. The returned struct
28306 + * crypto_acomp is the handle that is required for any subsequent
28307 + * API invocation for the compression operations.
28308 + *
28309 + * Return:     allocated handle in case of success; IS_ERR() is true in case
28310 + *             of an error, PTR_ERR() returns the error code.
28311 + */
28312 +struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
28313 +                                       u32 mask);
28314 +
28315 +static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
28316 +{
28317 +       return &tfm->base;
28318 +}
28319 +
28320 +static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
28321 +{
28322 +       return container_of(alg, struct acomp_alg, base);
28323 +}
28324 +
28325 +static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
28326 +{
28327 +       return container_of(tfm, struct crypto_acomp, base);
28328 +}
28329 +
28330 +static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
28331 +{
28332 +       return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
28333 +}
28334 +
28335 +static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
28336 +{
28337 +       return tfm->reqsize;
28338 +}
28339 +
28340 +static inline void acomp_request_set_tfm(struct acomp_req *req,
28341 +                                        struct crypto_acomp *tfm)
28342 +{
28343 +       req->base.tfm = crypto_acomp_tfm(tfm);
28344 +}
28345 +
28346 +static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
28347 +{
28348 +       return __crypto_acomp_tfm(req->base.tfm);
28349 +}
28350 +
28351 +/**
28352 + * crypto_free_acomp() -- free ACOMPRESS tfm handle
28353 + *
28354 + * @tfm:       ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
28355 + */
28356 +static inline void crypto_free_acomp(struct crypto_acomp *tfm)
28357 +{
28358 +       crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm));
28359 +}
28360 +
28361 +static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
28362 +{
28363 +       type &= ~CRYPTO_ALG_TYPE_MASK;
28364 +       type |= CRYPTO_ALG_TYPE_ACOMPRESS;
28365 +       mask |= CRYPTO_ALG_TYPE_MASK;
28366 +
28367 +       return crypto_has_alg(alg_name, type, mask);
28368 +}
28369 +
28370 +/**
28371 + * acomp_request_alloc() -- allocates asynchronous (de)compression request
28372 + *
28373 + * @tfm:       ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
28374 + *
28375 + * Return:     allocated handle in case of success or NULL in case of an error
28376 + */
28377 +struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm);
28378 +
28379 +/**
28380 + * acomp_request_free() -- zeroize and free asynchronous (de)compression
28381 + *                        request as well as the output buffer if allocated
28382 + *                        inside the algorithm
28383 + *
28384 + * @req:       request to free
28385 + */
28386 +void acomp_request_free(struct acomp_req *req);
28387 +
28388 +/**
28389 + * acomp_request_set_callback() -- Sets an asynchronous callback
28390 + *
28391 + * Callback will be called when an asynchronous operation on a given
28392 + * request is finished.
28393 + *
28394 + * @req:       request that the callback will be set for
28395 + * @flgs:      specify for instance if the operation may backlog
28396 + * @cmlp:      callback which will be called
28397 + * @data:      private data used by the caller
28398 + */
28399 +static inline void acomp_request_set_callback(struct acomp_req *req,
28400 +                                             u32 flgs,
28401 +                                             crypto_completion_t cmpl,
28402 +                                             void *data)
28403 +{
28404 +       req->base.complete = cmpl;
28405 +       req->base.data = data;
28406 +       req->base.flags = flgs;
28407 +}
28408 +
28409 +/**
28410 + * acomp_request_set_params() -- Sets request parameters
28411 + *
28412 + * Sets parameters required by an acomp operation
28413 + *
28414 + * @req:       asynchronous compress request
28415 + * @src:       pointer to input buffer scatterlist
28416 + * @dst:       pointer to output buffer scatterlist. If this is NULL, the
28417 + *             acomp layer will allocate the output memory
28418 + * @slen:      size of the input buffer
28419 + * @dlen:      size of the output buffer. If dst is NULL, this can be used by
28420 + *             the user to specify the maximum amount of memory to allocate
28421 + */
28422 +static inline void acomp_request_set_params(struct acomp_req *req,
28423 +                                           struct scatterlist *src,
28424 +                                           struct scatterlist *dst,
28425 +                                           unsigned int slen,
28426 +                                           unsigned int dlen)
28427 +{
28428 +       req->src = src;
28429 +       req->dst = dst;
28430 +       req->slen = slen;
28431 +       req->dlen = dlen;
28432 +
28433 +       if (!req->dst)
28434 +               req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
28435 +}
28436 +
28437 +/**
28438 + * crypto_acomp_compress() -- Invoke asynchronous compress operation
28439 + *
28440 + * Function invokes the asynchronous compress operation
28441 + *
28442 + * @req:       asynchronous compress request
28443 + *
28444 + * Return:     zero on success; error code in case of error
28445 + */
28446 +static inline int crypto_acomp_compress(struct acomp_req *req)
28447 +{
28448 +       struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
28449 +
28450 +       return tfm->compress(req);
28451 +}
28452 +
28453 +/**
28454 + * crypto_acomp_decompress() -- Invoke asynchronous decompress operation
28455 + *
28456 + * Function invokes the asynchronous decompress operation
28457 + *
28458 + * @req:       asynchronous compress request
28459 + *
28460 + * Return:     zero on success; error code in case of error
28461 + */
28462 +static inline int crypto_acomp_decompress(struct acomp_req *req)
28463 +{
28464 +       struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
28465 +
28466 +       return tfm->decompress(req);
28467 +}
28468 +
28469 +#endif
28470 --- /dev/null
28471 +++ b/include/crypto/internal/acompress.h
28472 @@ -0,0 +1,81 @@
28473 +/*
28474 + * Asynchronous Compression operations
28475 + *
28476 + * Copyright (c) 2016, Intel Corporation
28477 + * Authors: Weigang Li <weigang.li@intel.com>
28478 + *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
28479 + *
28480 + * This program is free software; you can redistribute it and/or modify it
28481 + * under the terms of the GNU General Public License as published by the Free
28482 + * Software Foundation; either version 2 of the License, or (at your option)
28483 + * any later version.
28484 + *
28485 + */
28486 +#ifndef _CRYPTO_ACOMP_INT_H
28487 +#define _CRYPTO_ACOMP_INT_H
28488 +#include <crypto/acompress.h>
28489 +
28490 +/*
28491 + * Transform internal helpers.
28492 + */
28493 +static inline void *acomp_request_ctx(struct acomp_req *req)
28494 +{
28495 +       return req->__ctx;
28496 +}
28497 +
28498 +static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm)
28499 +{
28500 +       return tfm->base.__crt_ctx;
28501 +}
28502 +
28503 +static inline void acomp_request_complete(struct acomp_req *req,
28504 +                                         int err)
28505 +{
28506 +       req->base.complete(&req->base, err);
28507 +}
28508 +
28509 +static inline const char *acomp_alg_name(struct crypto_acomp *tfm)
28510 +{
28511 +       return crypto_acomp_tfm(tfm)->__crt_alg->cra_name;
28512 +}
28513 +
28514 +static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm)
28515 +{
28516 +       struct acomp_req *req;
28517 +
28518 +       req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL);
28519 +       if (likely(req))
28520 +               acomp_request_set_tfm(req, tfm);
28521 +       return req;
28522 +}
28523 +
28524 +static inline void __acomp_request_free(struct acomp_req *req)
28525 +{
28526 +       kzfree(req);
28527 +}
28528 +
28529 +/**
28530 + * crypto_register_acomp() -- Register asynchronous compression algorithm
28531 + *
28532 + * Function registers an implementation of an asynchronous
28533 + * compression algorithm
28534 + *
28535 + * @alg:       algorithm definition
28536 + *
28537 + * Return:     zero on success; error code in case of error
28538 + */
28539 +int crypto_register_acomp(struct acomp_alg *alg);
28540 +
28541 +/**
28542 + * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm
28543 + *
28544 + * Function unregisters an implementation of an asynchronous
28545 + * compression algorithm
28546 + *
28547 + * @alg:       algorithm definition
28548 + *
28549 + * Return:     zero on success; error code in case of error
28550 + */
28551 +int crypto_unregister_acomp(struct acomp_alg *alg);
28552 +
28553 +#endif
28554 --- /dev/null
28555 +++ b/include/crypto/internal/scompress.h
28556 @@ -0,0 +1,136 @@
28557 +/*
28558 + * Synchronous Compression operations
28559 + *
28560 + * Copyright 2015 LG Electronics Inc.
28561 + * Copyright (c) 2016, Intel Corporation
28562 + * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
28563 + *
28564 + * This program is free software; you can redistribute it and/or modify it
28565 + * under the terms of the GNU General Public License as published by the Free
28566 + * Software Foundation; either version 2 of the License, or (at your option)
28567 + * any later version.
28568 + *
28569 + */
28570 +#ifndef _CRYPTO_SCOMP_INT_H
28571 +#define _CRYPTO_SCOMP_INT_H
28572 +#include <linux/crypto.h>
28573 +
28574 +#define SCOMP_SCRATCH_SIZE     131072
28575 +
28576 +struct crypto_scomp {
28577 +       struct crypto_tfm base;
28578 +};
28579 +
28580 +/**
28581 + * struct scomp_alg - synchronous compression algorithm
28582 + *
28583 + * @alloc_ctx: Function allocates algorithm specific context
28584 + * @free_ctx:  Function frees context allocated with alloc_ctx
28585 + * @compress:  Function performs a compress operation
28586 + * @decompress:        Function performs a de-compress operation
28587 + * @init:      Initialize the cryptographic transformation object.
28588 + *             This function is used to initialize the cryptographic
28589 + *             transformation object. This function is called only once at
28590 + *             the instantiation time, right after the transformation context
28591 + *             was allocated. In case the cryptographic hardware has some
28592 + *             special requirements which need to be handled by software, this
28593 + *             function shall check for the precise requirement of the
28594 + *             transformation and put any software fallbacks in place.
28595 + * @exit:      Deinitialize the cryptographic transformation object. This is a
28596 + *             counterpart to @init, used to remove various changes set in
28597 + *             @init.
28598 + * @base:      Common crypto API algorithm data structure
28599 + */
28600 +struct scomp_alg {
28601 +       void *(*alloc_ctx)(struct crypto_scomp *tfm);
28602 +       void (*free_ctx)(struct crypto_scomp *tfm, void *ctx);
28603 +       int (*compress)(struct crypto_scomp *tfm, const u8 *src,
28604 +                       unsigned int slen, u8 *dst, unsigned int *dlen,
28605 +                       void *ctx);
28606 +       int (*decompress)(struct crypto_scomp *tfm, const u8 *src,
28607 +                         unsigned int slen, u8 *dst, unsigned int *dlen,
28608 +                         void *ctx);
28609 +       struct crypto_alg base;
28610 +};
28611 +
28612 +static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg)
28613 +{
28614 +       return container_of(alg, struct scomp_alg, base);
28615 +}
28616 +
28617 +static inline struct crypto_scomp *__crypto_scomp_tfm(struct crypto_tfm *tfm)
28618 +{
28619 +       return container_of(tfm, struct crypto_scomp, base);
28620 +}
28621 +
28622 +static inline struct crypto_tfm *crypto_scomp_tfm(struct crypto_scomp *tfm)
28623 +{
28624 +       return &tfm->base;
28625 +}
28626 +
28627 +static inline void crypto_free_scomp(struct crypto_scomp *tfm)
28628 +{
28629 +       crypto_destroy_tfm(tfm, crypto_scomp_tfm(tfm));
28630 +}
28631 +
28632 +static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm)
28633 +{
28634 +       return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg);
28635 +}
28636 +
28637 +static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm)
28638 +{
28639 +       return crypto_scomp_alg(tfm)->alloc_ctx(tfm);
28640 +}
28641 +
28642 +static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm,
28643 +                                        void *ctx)
28644 +{
28645 +       return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx);
28646 +}
28647 +
28648 +static inline int crypto_scomp_compress(struct crypto_scomp *tfm,
28649 +                                       const u8 *src, unsigned int slen,
28650 +                                       u8 *dst, unsigned int *dlen, void *ctx)
28651 +{
28652 +       return crypto_scomp_alg(tfm)->compress(tfm, src, slen, dst, dlen, ctx);
28653 +}
28654 +
28655 +static inline int crypto_scomp_decompress(struct crypto_scomp *tfm,
28656 +                                         const u8 *src, unsigned int slen,
28657 +                                         u8 *dst, unsigned int *dlen,
28658 +                                         void *ctx)
28659 +{
28660 +       return crypto_scomp_alg(tfm)->decompress(tfm, src, slen, dst, dlen,
28661 +                                                ctx);
28662 +}
28663 +
28664 +int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
28665 +struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
28666 +void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
28667 +
28668 +/**
28669 + * crypto_register_scomp() -- Register synchronous compression algorithm
28670 + *
28671 + * Function registers an implementation of a synchronous
28672 + * compression algorithm
28673 + *
28674 + * @alg:       algorithm definition
28675 + *
28676 + * Return: zero on success; error code in case of error
28677 + */
28678 +int crypto_register_scomp(struct scomp_alg *alg);
28679 +
28680 +/**
28681 + * crypto_unregister_scomp() -- Unregister synchronous compression algorithm
28682 + *
28683 + * Function unregisters an implementation of a synchronous
28684 + * compression algorithm
28685 + *
28686 + * @alg:       algorithm definition
28687 + *
28688 + * Return: zero on success; error code in case of error
28689 + */
28690 +int crypto_unregister_scomp(struct scomp_alg *alg);
28691 +
28692 +#endif
28693 --- a/include/linux/crypto.h
28694 +++ b/include/linux/crypto.h
28695 @@ -50,6 +50,8 @@
28696  #define CRYPTO_ALG_TYPE_SKCIPHER       0x00000005
28697  #define CRYPTO_ALG_TYPE_GIVCIPHER      0x00000006
28698  #define CRYPTO_ALG_TYPE_KPP            0x00000008
28699 +#define CRYPTO_ALG_TYPE_ACOMPRESS      0x0000000a
28700 +#define CRYPTO_ALG_TYPE_SCOMPRESS      0x0000000b
28701  #define CRYPTO_ALG_TYPE_RNG            0x0000000c
28702  #define CRYPTO_ALG_TYPE_AKCIPHER       0x0000000d
28703  #define CRYPTO_ALG_TYPE_DIGEST         0x0000000e
28704 @@ -60,6 +62,7 @@
28705  #define CRYPTO_ALG_TYPE_HASH_MASK      0x0000000e
28706  #define CRYPTO_ALG_TYPE_AHASH_MASK     0x0000000e
28707  #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
28708 +#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e
28709  
28710  #define CRYPTO_ALG_LARVAL              0x00000010
28711  #define CRYPTO_ALG_DEAD                        0x00000020
28712 --- a/include/uapi/linux/cryptouser.h
28713 +++ b/include/uapi/linux/cryptouser.h
28714 @@ -46,6 +46,7 @@ enum crypto_attr_type_t {
28715         CRYPTOCFGA_REPORT_CIPHER,       /* struct crypto_report_cipher */
28716         CRYPTOCFGA_REPORT_AKCIPHER,     /* struct crypto_report_akcipher */
28717         CRYPTOCFGA_REPORT_KPP,          /* struct crypto_report_kpp */
28718 +       CRYPTOCFGA_REPORT_ACOMP,        /* struct crypto_report_acomp */
28719         __CRYPTOCFGA_MAX
28720  
28721  #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
28722 @@ -112,5 +113,9 @@ struct crypto_report_kpp {
28723         char type[CRYPTO_MAX_NAME];
28724  };
28725  
28726 +struct crypto_report_acomp {
28727 +       char type[CRYPTO_MAX_NAME];
28728 +};
28729 +
28730  #define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
28731                                sizeof(struct crypto_report_blkcipher))
28732 --- a/scripts/spelling.txt
28733 +++ b/scripts/spelling.txt
28734 @@ -305,6 +305,9 @@ defintion||definition
28735  defintions||definitions
28736  defualt||default
28737  defult||default
28738 +deintializing||deinitializing
28739 +deintialize||deinitialize
28740 +deintialized||deinitialized
28741  deivce||device
28742  delared||declared
28743  delare||declare
28744 --- a/sound/soc/amd/acp-pcm-dma.c
28745 +++ b/sound/soc/amd/acp-pcm-dma.c
28746 @@ -506,7 +506,7 @@ static int acp_init(void __iomem *acp_mm
28747         return 0;
28748  }
28749  
28750 -/* Deintialize ACP */
28751 +/* Deinitialize ACP */
28752  static int acp_deinit(void __iomem *acp_mmio)
28753  {
28754         u32 val;