ath79/mikrotik: use routerbootpart partitions
[oweals/openwrt.git] / target / linux / layerscape / patches-5.4 / 804-crypto-0029-crypto-caam-qi2-add-support-for-TLS-1.0-record.patch
1 From a97159ba48984bfec10abcea3a0215cf3deff153 Mon Sep 17 00:00:00 2001
2 From: Radu Alexe <radu.alexe@nxp.com>
3 Date: Fri, 9 Jun 2017 14:49:17 +0300
4 Subject: [PATCH] crypto: caam/qi2 - add support for TLS 1.0 record
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 TLS 1.0 descriptors run on SEC 4.x or higher. For now, only
10 tls10(hmac(sha1),cbc(aes)) algorithm is registered by the driver.
11
12 Known limitations:
13  - when src == dst - there should be no element in the src scatterlist
14    array that contains both associated data and message data.
15  - when src != dst - associated data is not copied from source into
16    destination.
17  - for decryption when src != dst the size of the destination should be
18    large enough so that the buffer may contain the decrypted authenc and
19 padded data.
20
21 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
22 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
23 ---
24  drivers/crypto/caam/caamalg_qi2.c | 450 ++++++++++++++++++++++++++++++++++++++
25  drivers/crypto/caam/caamalg_qi2.h |  22 ++
26  2 files changed, 472 insertions(+)
27
28 --- a/drivers/crypto/caam/caamalg_qi2.c
29 +++ b/drivers/crypto/caam/caamalg_qi2.c
30 @@ -583,6 +583,257 @@ skip_out_fle:
31         return edesc;
32  }
33  
34 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req,
35 +                                        bool encrypt)
36 +{
37 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
38 +       unsigned int blocksize = crypto_aead_blocksize(tls);
39 +       unsigned int padsize, authsize;
40 +       struct caam_request *req_ctx = aead_request_ctx(req);
41 +       struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
42 +       struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
43 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
44 +       struct caam_aead_alg *alg = container_of(crypto_aead_alg(tls),
45 +                                                typeof(*alg), aead);
46 +       struct device *dev = ctx->dev;
47 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
48 +                     GFP_KERNEL : GFP_ATOMIC;
49 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
50 +       struct tls_edesc *edesc;
51 +       dma_addr_t qm_sg_dma, iv_dma = 0;
52 +       int ivsize = 0;
53 +       u8 *iv;
54 +       int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
55 +       int in_len, out_len;
56 +       struct dpaa2_sg_entry *sg_table;
57 +       struct scatterlist *dst;
58 +
59 +       if (encrypt) {
60 +               padsize = blocksize - ((req->cryptlen + ctx->authsize) %
61 +                                       blocksize);
62 +               authsize = ctx->authsize + padsize;
63 +       } else {
64 +               authsize = ctx->authsize;
65 +       }
66 +
67 +       /* allocate space for base edesc, link tables and IV */
68 +       edesc = qi_cache_zalloc(GFP_DMA | flags);
69 +       if (unlikely(!edesc)) {
70 +               dev_err(dev, "could not allocate extended descriptor\n");
71 +               return ERR_PTR(-ENOMEM);
72 +       }
73 +
74 +       if (likely(req->src == req->dst)) {
75 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
76 +                                            req->cryptlen +
77 +                                            (encrypt ? authsize : 0));
78 +               if (unlikely(src_nents < 0)) {
79 +                       dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
80 +                               req->assoclen + req->cryptlen +
81 +                               (encrypt ? authsize : 0));
82 +                       qi_cache_free(edesc);
83 +                       return ERR_PTR(src_nents);
84 +               }
85 +
86 +               mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
87 +                                             DMA_BIDIRECTIONAL);
88 +               if (unlikely(!mapped_src_nents)) {
89 +                       dev_err(dev, "unable to map source\n");
90 +                       qi_cache_free(edesc);
91 +                       return ERR_PTR(-ENOMEM);
92 +               }
93 +               dst = req->dst;
94 +       } else {
95 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
96 +                                            req->cryptlen);
97 +               if (unlikely(src_nents < 0)) {
98 +                       dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
99 +                               req->assoclen + req->cryptlen);
100 +                       qi_cache_free(edesc);
101 +                       return ERR_PTR(src_nents);
102 +               }
103 +
104 +               dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
105 +               dst_nents = sg_nents_for_len(dst, req->cryptlen +
106 +                                            (encrypt ? authsize : 0));
107 +               if (unlikely(dst_nents < 0)) {
108 +                       dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
109 +                               req->cryptlen +
110 +                               (encrypt ? authsize : 0));
111 +                       qi_cache_free(edesc);
112 +                       return ERR_PTR(dst_nents);
113 +               }
114 +
115 +               if (src_nents) {
116 +                       mapped_src_nents = dma_map_sg(dev, req->src,
117 +                                                     src_nents, DMA_TO_DEVICE);
118 +                       if (unlikely(!mapped_src_nents)) {
119 +                               dev_err(dev, "unable to map source\n");
120 +                               qi_cache_free(edesc);
121 +                               return ERR_PTR(-ENOMEM);
122 +                       }
123 +               } else {
124 +                       mapped_src_nents = 0;
125 +               }
126 +
127 +               mapped_dst_nents = dma_map_sg(dev, dst, dst_nents,
128 +                                             DMA_FROM_DEVICE);
129 +               if (unlikely(!mapped_dst_nents)) {
130 +                       dev_err(dev, "unable to map destination\n");
131 +                       dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
132 +                       qi_cache_free(edesc);
133 +                       return ERR_PTR(-ENOMEM);
134 +               }
135 +       }
136 +
137 +       /*
138 +        * Create S/G table: IV, src, dst.
139 +        * Input is not contiguous.
140 +        */
141 +       qm_sg_ents = 1 + mapped_src_nents +
142 +                    (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
143 +       sg_table = &edesc->sgt[0];
144 +       qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
145 +
146 +       ivsize = crypto_aead_ivsize(tls);
147 +       iv = (u8 *)(sg_table + qm_sg_ents);
148 +       /* Make sure IV is located in a DMAable area */
149 +       memcpy(iv, req->iv, ivsize);
150 +       iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
151 +       if (dma_mapping_error(dev, iv_dma)) {
152 +               dev_err(dev, "unable to map IV\n");
153 +               caam_unmap(dev, req->src, dst, src_nents, dst_nents, 0, 0,
154 +               DMA_NONE, 0, 0);
155 +               qi_cache_free(edesc);
156 +               return ERR_PTR(-ENOMEM);
157 +       }
158 +
159 +       edesc->src_nents = src_nents;
160 +       edesc->dst_nents = dst_nents;
161 +       edesc->dst = dst;
162 +       edesc->iv_dma = iv_dma;
163 +
164 +       dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
165 +       qm_sg_index = 1;
166 +
167 +       sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
168 +       qm_sg_index += mapped_src_nents;
169 +
170 +       if (mapped_dst_nents > 1)
171 +               sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
172 +                                qm_sg_index, 0);
173 +
174 +       qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
175 +       if (dma_mapping_error(dev, qm_sg_dma)) {
176 +               dev_err(dev, "unable to map S/G table\n");
177 +               caam_unmap(dev, req->src, dst, src_nents, dst_nents, iv_dma,
178 +                          ivsize, DMA_TO_DEVICE, 0, 0);
179 +               qi_cache_free(edesc);
180 +               return ERR_PTR(-ENOMEM);
181 +       }
182 +
183 +       edesc->qm_sg_dma = qm_sg_dma;
184 +       edesc->qm_sg_bytes = qm_sg_bytes;
185 +
186 +       out_len = req->cryptlen + (encrypt ? authsize : 0);
187 +       in_len = ivsize + req->assoclen + req->cryptlen;
188 +
189 +       memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
190 +       dpaa2_fl_set_final(in_fle, true);
191 +       dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
192 +       dpaa2_fl_set_addr(in_fle, qm_sg_dma);
193 +       dpaa2_fl_set_len(in_fle, in_len);
194 +
195 +       if (req->dst == req->src) {
196 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
197 +               dpaa2_fl_set_addr(out_fle, qm_sg_dma +
198 +                                 (sg_nents_for_len(req->src, req->assoclen) +
199 +                                  1) * sizeof(*sg_table));
200 +       } else if (mapped_dst_nents == 1) {
201 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
202 +               dpaa2_fl_set_addr(out_fle, sg_dma_address(dst));
203 +       } else {
204 +               dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
205 +               dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
206 +                                 sizeof(*sg_table));
207 +       }
208 +
209 +       dpaa2_fl_set_len(out_fle, out_len);
210 +
211 +       return edesc;
212 +}
213 +
214 +static int tls_set_sh_desc(struct crypto_aead *tls)
215 +{
216 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
217 +       unsigned int ivsize = crypto_aead_ivsize(tls);
218 +       unsigned int blocksize = crypto_aead_blocksize(tls);
219 +       struct device *dev = ctx->dev;
220 +       struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
221 +       struct caam_flc *flc;
222 +       u32 *desc;
223 +       unsigned int assoclen = 13; /* always 13 bytes for TLS */
224 +       unsigned int data_len[2];
225 +       u32 inl_mask;
226 +
227 +       if (!ctx->cdata.keylen || !ctx->authsize)
228 +               return 0;
229 +
230 +       /*
231 +        * TLS 1.0 encrypt shared descriptor
232 +        * Job Descriptor and Shared Descriptor
233 +        * must fit into the 64-word Descriptor h/w Buffer
234 +        */
235 +       data_len[0] = ctx->adata.keylen_pad;
236 +       data_len[1] = ctx->cdata.keylen;
237 +
238 +       if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
239 +                             &inl_mask, ARRAY_SIZE(data_len)) < 0)
240 +               return -EINVAL;
241 +
242 +       if (inl_mask & 1)
243 +               ctx->adata.key_virt = ctx->key;
244 +       else
245 +               ctx->adata.key_dma = ctx->key_dma;
246 +
247 +       if (inl_mask & 2)
248 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
249 +       else
250 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
251 +
252 +       ctx->adata.key_inline = !!(inl_mask & 1);
253 +       ctx->cdata.key_inline = !!(inl_mask & 2);
254 +
255 +       flc = &ctx->flc[ENCRYPT];
256 +       desc = flc->sh_desc;
257 +       cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata,
258 +                             assoclen, ivsize, ctx->authsize, blocksize,
259 +                             priv->sec_attr.era);
260 +       flc->flc[1] = cpu_to_caam32(desc_len(desc));
261 +       dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
262 +                                  sizeof(flc->flc) + desc_bytes(desc),
263 +                                  ctx->dir);
264 +
265 +       /*
266 +        * TLS 1.0 decrypt shared descriptor
267 +        * Keys do not fit inline, regardless of algorithms used
268 +        */
269 +       ctx->adata.key_inline = false;
270 +       ctx->adata.key_dma = ctx->key_dma;
271 +       ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
272 +
273 +       flc = &ctx->flc[DECRYPT];
274 +       desc = flc->sh_desc;
275 +       cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize,
276 +                             ctx->authsize, blocksize, priv->sec_attr.era);
277 +       flc->flc[1] = cpu_to_caam32(desc_len(desc));
278 +       dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
279 +                                  sizeof(flc->flc) + desc_bytes(desc),
280 +                                  ctx->dir);
281 +
282 +       return 0;
283 +}
284 +
285  static int chachapoly_set_sh_desc(struct crypto_aead *aead)
286  {
287         struct caam_ctx *ctx = crypto_aead_ctx(aead);
288 @@ -627,6 +878,61 @@ static int chachapoly_setauthsize(struct
289         return chachapoly_set_sh_desc(aead);
290  }
291  
292 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
293 +                     unsigned int keylen)
294 +{
295 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
296 +       struct device *dev = ctx->dev;
297 +       struct crypto_authenc_keys keys;
298 +
299 +       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
300 +               goto badkey;
301 +
302 +#ifdef DEBUG
303 +       dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
304 +               keys.authkeylen + keys.enckeylen, keys.enckeylen,
305 +               keys.authkeylen);
306 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
307 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
308 +#endif
309 +
310 +       ctx->adata.keylen = keys.authkeylen;
311 +       ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
312 +                                             OP_ALG_ALGSEL_MASK);
313 +
314 +       if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
315 +               goto badkey;
316 +
317 +       memcpy(ctx->key, keys.authkey, keys.authkeylen);
318 +       memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
319 +       dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
320 +                                  keys.enckeylen, ctx->dir);
321 +#ifdef DEBUG
322 +       print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
323 +                      DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
324 +                      ctx->adata.keylen_pad + keys.enckeylen, 1);
325 +#endif
326 +
327 +       ctx->cdata.keylen = keys.enckeylen;
328 +
329 +       memzero_explicit(&keys, sizeof(keys));
330 +       return tls_set_sh_desc(tls);
331 +badkey:
332 +       crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
333 +       memzero_explicit(&keys, sizeof(keys));
334 +       return -EINVAL;
335 +}
336 +
337 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
338 +{
339 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
340 +
341 +       ctx->authsize = authsize;
342 +       tls_set_sh_desc(tls);
343 +
344 +       return 0;
345 +}
346 +
347  static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
348                              unsigned int keylen)
349  {
350 @@ -1274,6 +1580,17 @@ static void aead_unmap(struct device *de
351         dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
352  }
353  
354 +static void tls_unmap(struct device *dev, struct tls_edesc *edesc,
355 +                     struct aead_request *req)
356 +{
357 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
358 +       int ivsize = crypto_aead_ivsize(tls);
359 +
360 +       caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
361 +                  edesc->dst_nents, edesc->iv_dma, ivsize, DMA_TO_DEVICE,
362 +                  edesc->qm_sg_dma, edesc->qm_sg_bytes);
363 +}
364 +
365  static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
366                            struct skcipher_request *req)
367  {
368 @@ -1383,6 +1700,119 @@ static int aead_decrypt(struct aead_requ
369         return ret;
370  }
371  
372 +static void tls_encrypt_done(void *cbk_ctx, u32 status)
373 +{
374 +       struct crypto_async_request *areq = cbk_ctx;
375 +       struct aead_request *req = container_of(areq, struct aead_request,
376 +                                               base);
377 +       struct caam_request *req_ctx = to_caam_req(areq);
378 +       struct tls_edesc *edesc = req_ctx->edesc;
379 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
380 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
381 +       int ecode = 0;
382 +
383 +#ifdef DEBUG
384 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
385 +#endif
386 +
387 +       if (unlikely(status)) {
388 +               caam_qi2_strstatus(ctx->dev, status);
389 +               ecode = -EIO;
390 +       }
391 +
392 +       tls_unmap(ctx->dev, edesc, req);
393 +       qi_cache_free(edesc);
394 +       aead_request_complete(req, ecode);
395 +}
396 +
397 +static void tls_decrypt_done(void *cbk_ctx, u32 status)
398 +{
399 +       struct crypto_async_request *areq = cbk_ctx;
400 +       struct aead_request *req = container_of(areq, struct aead_request,
401 +                                               base);
402 +       struct caam_request *req_ctx = to_caam_req(areq);
403 +       struct tls_edesc *edesc = req_ctx->edesc;
404 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
405 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
406 +       int ecode = 0;
407 +
408 +#ifdef DEBUG
409 +       dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
410 +#endif
411 +
412 +       if (unlikely(status)) {
413 +               caam_qi2_strstatus(ctx->dev, status);
414 +               /*
415 +                * verify hw auth check passed else return -EBADMSG
416 +                */
417 +               if ((status & JRSTA_CCBERR_ERRID_MASK) ==
418 +                    JRSTA_CCBERR_ERRID_ICVCHK)
419 +                       ecode = -EBADMSG;
420 +               else
421 +                       ecode = -EIO;
422 +       }
423 +
424 +       tls_unmap(ctx->dev, edesc, req);
425 +       qi_cache_free(edesc);
426 +       aead_request_complete(req, ecode);
427 +}
428 +
429 +static int tls_encrypt(struct aead_request *req)
430 +{
431 +       struct tls_edesc *edesc;
432 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
433 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
434 +       struct caam_request *caam_req = aead_request_ctx(req);
435 +       int ret;
436 +
437 +       /* allocate extended descriptor */
438 +       edesc = tls_edesc_alloc(req, true);
439 +       if (IS_ERR(edesc))
440 +               return PTR_ERR(edesc);
441 +
442 +       caam_req->flc = &ctx->flc[ENCRYPT];
443 +       caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
444 +       caam_req->cbk = tls_encrypt_done;
445 +       caam_req->ctx = &req->base;
446 +       caam_req->edesc = edesc;
447 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
448 +       if (ret != -EINPROGRESS &&
449 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
450 +               tls_unmap(ctx->dev, edesc, req);
451 +               qi_cache_free(edesc);
452 +       }
453 +
454 +       return ret;
455 +}
456 +
457 +static int tls_decrypt(struct aead_request *req)
458 +{
459 +       struct tls_edesc *edesc;
460 +       struct crypto_aead *tls = crypto_aead_reqtfm(req);
461 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
462 +       struct caam_request *caam_req = aead_request_ctx(req);
463 +       int ret;
464 +
465 +       /* allocate extended descriptor */
466 +       edesc = tls_edesc_alloc(req, false);
467 +       if (IS_ERR(edesc))
468 +               return PTR_ERR(edesc);
469 +
470 +       caam_req->flc = &ctx->flc[DECRYPT];
471 +       caam_req->flc_dma = ctx->flc_dma[DECRYPT];
472 +       caam_req->cbk = tls_decrypt_done;
473 +       caam_req->ctx = &req->base;
474 +       caam_req->edesc = edesc;
475 +       ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
476 +       if (ret != -EINPROGRESS &&
477 +           !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
478 +               tls_unmap(ctx->dev, edesc, req);
479 +               qi_cache_free(edesc);
480 +       }
481 +
482 +       return ret;
483 +}
484 +
485  static int ipsec_gcm_encrypt(struct aead_request *req)
486  {
487         return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
488 @@ -2929,6 +3359,26 @@ static struct caam_aead_alg driver_aeads
489                         .geniv = true,
490                 },
491         },
492 +       {
493 +               .aead = {
494 +                       .base = {
495 +                               .cra_name = "tls10(hmac(sha1),cbc(aes))",
496 +                               .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2",
497 +                               .cra_blocksize = AES_BLOCK_SIZE,
498 +                       },
499 +                       .setkey = tls_setkey,
500 +                       .setauthsize = tls_setauthsize,
501 +                       .encrypt = tls_encrypt,
502 +                       .decrypt = tls_decrypt,
503 +                       .ivsize = AES_BLOCK_SIZE,
504 +                       .maxauthsize = SHA1_DIGEST_SIZE,
505 +               },
506 +               .caam = {
507 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
508 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
509 +                                          OP_ALG_AAI_HMAC_PRECOMP,
510 +               },
511 +       },
512  };
513  
514  static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
515 --- a/drivers/crypto/caam/caamalg_qi2.h
516 +++ b/drivers/crypto/caam/caamalg_qi2.h
517 @@ -118,6 +118,28 @@ struct aead_edesc {
518  };
519  
520  /*
521 + * tls_edesc - s/w-extended tls descriptor
522 + * @src_nents: number of segments in input scatterlist
523 + * @dst_nents: number of segments in output scatterlist
524 + * @iv_dma: dma address of iv for checking continuity and link table
525 + * @qm_sg_bytes: length of dma mapped h/w link table
526 + * @qm_sg_dma: bus physical mapped address of h/w link table
527 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
528 + * @dst: pointer to output scatterlist, usefull for unmapping
529 + * @sgt: the h/w link table, followed by IV
530 + */
531 +struct tls_edesc {
532 +       int src_nents;
533 +       int dst_nents;
534 +       dma_addr_t iv_dma;
535 +       int qm_sg_bytes;
536 +       dma_addr_t qm_sg_dma;
537 +       struct scatterlist tmp[2];
538 +       struct scatterlist *dst;
539 +       struct dpaa2_sg_entry sgt[0];
540 +};
541 +
542 +/*
543   * skcipher_edesc - s/w-extended skcipher descriptor
544   * @src_nents: number of segments in input scatterlist
545   * @dst_nents: number of segments in output scatterlist