ath79/mikrotik: use routerbootpart partitions
[oweals/openwrt.git] / target / linux / layerscape / patches-5.4 / 804-crypto-0028-crypto-caam-qi-add-support-for-TLS-1.0-record.patch
1 From 47824cc9417946b49b80e88c74ab5ee69eacc2a7 Mon Sep 17 00:00:00 2001
2 From: Radu Alexe <radu.alexe@nxp.com>
3 Date: Thu, 25 May 2017 15:51:50 +0300
4 Subject: [PATCH] crypto: caam/qi - add support for TLS 1.0 record
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 TLS 1.0 descriptors run on SEC 4.x or higher.
10 For now, only tls10(hmac(sha1),cbc(aes)) algorithm
11 is registered by the driver.
12
13 Known limitations:
14  - when src == dst - there should be no element in the src scatterlist array
15    that contains both associated data and message data.
16  - when src != dst - associated data is not copied from source into
17    destination.
18  - for decryption when src != dst the size of the destination should be
19    large enough so that the buffer may contain the decrypted authenc and
20    padded data.
21
22 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
23 Signed-off-by: Cristian Stoica <cristian.stoica@nxp.com>
24 Signed-off-by: Alex Porosanu <alexandru.porosanu@nxp.com>
25 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
26 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
27 ---
28  drivers/crypto/caam/caamalg_desc.c | 414 ++++++++++++++++++++++++++++++++
29  drivers/crypto/caam/caamalg_desc.h |  13 +
30  drivers/crypto/caam/caamalg_qi.c   | 478 +++++++++++++++++++++++++++++++++++++
31  drivers/crypto/caam/desc.h         |  27 +++
32  4 files changed, 932 insertions(+)
33
34 --- a/drivers/crypto/caam/caamalg_desc.c
35 +++ b/drivers/crypto/caam/caamalg_desc.c
36 @@ -622,6 +622,420 @@ copy_iv:
37  EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
38  
39  /**
40 + * cnstr_shdsc_tls_encap - tls encapsulation shared descriptor
41 + * @desc: pointer to buffer used for descriptor construction
42 + * @cdata: pointer to block cipher transform definitions
43 + *         Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
44 + *         with OP_ALG_AAI_CBC
45 + * @adata: pointer to authentication transform definitions.
46 + *         A split key is required for SEC Era < 6; the size of the split key
47 + *         is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
48 + *         ANDed with OP_ALG_AAI_HMAC_PRECOMP.
49 + * @assoclen: associated data length
50 + * @ivsize: initialization vector size
51 + * @authsize: authentication data size
52 + * @blocksize: block cipher size
53 + * @era: SEC Era
54 + */
55 +void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
56 +                          struct alginfo *adata, unsigned int assoclen,
57 +                          unsigned int ivsize, unsigned int authsize,
58 +                          unsigned int blocksize, int era)
59 +{
60 +       u32 *key_jump_cmd, *zero_payload_jump_cmd;
61 +       u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
62 +
63 +       /*
64 +        * Compute the index (in bytes) for the LOAD with destination of
65 +        * Class 1 Data Size Register and for the LOAD that generates padding
66 +        */
67 +       if (adata->key_inline) {
68 +               idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad +
69 +                               cdata->keylen - 4 * CAAM_CMD_SZ;
70 +               idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad +
71 +                            cdata->keylen - 2 * CAAM_CMD_SZ;
72 +       } else {
73 +               idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
74 +                               4 * CAAM_CMD_SZ;
75 +               idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
76 +                            2 * CAAM_CMD_SZ;
77 +       }
78 +
79 +       stidx = 1 << HDR_START_IDX_SHIFT;
80 +       init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
81 +
82 +       /* skip key loading if they are loaded due to sharing */
83 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
84 +                                  JUMP_COND_SHRD);
85 +
86 +       if (era < 6) {
87 +               if (adata->key_inline)
88 +                       append_key_as_imm(desc, adata->key_virt,
89 +                                         adata->keylen_pad, adata->keylen,
90 +                                         CLASS_2 | KEY_DEST_MDHA_SPLIT |
91 +                                         KEY_ENC);
92 +               else
93 +                       append_key(desc, adata->key_dma, adata->keylen,
94 +                                  CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
95 +       } else {
96 +               append_proto_dkp(desc, adata);
97 +       }
98 +
99 +       if (cdata->key_inline)
100 +               append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
101 +                                 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
102 +       else
103 +               append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
104 +                          KEY_DEST_CLASS_REG);
105 +
106 +       set_jump_tgt_here(desc, key_jump_cmd);
107 +
108 +       /* class 2 operation */
109 +       append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
110 +                        OP_ALG_ENCRYPT);
111 +       /* class 1 operation */
112 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
113 +                        OP_ALG_ENCRYPT);
114 +
115 +       /* payloadlen = input data length - (assoclen + ivlen) */
116 +       append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize);
117 +
118 +       /* math1 = payloadlen + icvlen */
119 +       append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize);
120 +
121 +       /* padlen = block_size - math1 % block_size */
122 +       append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
123 +       append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
124 +
125 +       /* cryptlen = payloadlen + icvlen + padlen */
126 +       append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
127 +
128 +       /*
129 +        * update immediate data with the padding length value
130 +        * for the LOAD in the class 1 data size register.
131 +        */
132 +       append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
133 +                       (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7);
134 +       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
135 +                       (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8);
136 +
137 +       /* overwrite PL field for the padding iNFO FIFO entry  */
138 +       append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
139 +                       (idx_ld_pad << MOVE_OFFSET_SHIFT) | 7);
140 +       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
141 +                       (idx_ld_pad << MOVE_OFFSET_SHIFT) | 8);
142 +
143 +       /* store encrypted payload, icv and padding */
144 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
145 +
146 +       /* if payload length is zero, jump to zero-payload commands */
147 +       append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4);
148 +       zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
149 +                                           JUMP_COND_MATH_Z);
150 +
151 +       /* load iv in context1 */
152 +       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
153 +                  LDST_CLASS_1_CCB | ivsize);
154 +
155 +       /* read assoc for authentication */
156 +       append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
157 +                            FIFOLD_TYPE_MSG);
158 +       /* insnoop payload */
159 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
160 +                            FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
161 +
162 +       /* jump the zero-payload commands */
163 +       append_jump(desc, JUMP_TEST_ALL | 3);
164 +
165 +       /* zero-payload commands */
166 +       set_jump_tgt_here(desc, zero_payload_jump_cmd);
167 +
168 +       /* load iv in context1 */
169 +       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
170 +                  LDST_CLASS_1_CCB | ivsize);
171 +
172 +       /* assoc data is the only data for authentication */
173 +       append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
174 +                            FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
175 +
176 +       /* send icv to encryption */
177 +       append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
178 +                   authsize);
179 +
180 +       /* update class 1 data size register with padding length */
181 +       append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB |
182 +                           LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
183 +
184 +       /* generate padding and send it to encryption */
185 +       genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
186 +             NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
187 +       append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
188 +                           LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
189 +
190 +#ifdef DEBUG
191 +       print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ",
192 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
193 +                      desc_bytes(desc), 1);
194 +#endif
195 +}
196 +EXPORT_SYMBOL(cnstr_shdsc_tls_encap);
197 +
198 +/**
199 + * cnstr_shdsc_tls_decap - tls decapsulation shared descriptor
200 + * @desc: pointer to buffer used for descriptor construction
201 + * @cdata: pointer to block cipher transform definitions
202 + *         Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
203 + *         with OP_ALG_AAI_CBC
204 + * @adata: pointer to authentication transform definitions.
205 + *         A split key is required for SEC Era < 6; the size of the split key
206 + *         is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
207 + *         ANDed with OP_ALG_AAI_HMAC_PRECOMP.
208 + * @assoclen: associated data length
209 + * @ivsize: initialization vector size
210 + * @authsize: authentication data size
211 + * @blocksize: block cipher size
212 + * @era: SEC Era
213 + */
214 +void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
215 +                          struct alginfo *adata, unsigned int assoclen,
216 +                          unsigned int ivsize, unsigned int authsize,
217 +                          unsigned int blocksize, int era)
218 +{
219 +       u32 stidx, jumpback;
220 +       u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
221 +       /*
222 +        * Pointer Size bool determines the size of address pointers.
223 +        * false - Pointers fit in one 32-bit word.
224 +        * true - Pointers fit in two 32-bit words.
225 +        */
226 +       bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ);
227 +
228 +       stidx = 1 << HDR_START_IDX_SHIFT;
229 +       init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
230 +
231 +       /* skip key loading if they are loaded due to sharing */
232 +       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
233 +                                  JUMP_COND_SHRD);
234 +
235 +       if (era < 6)
236 +               append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
237 +                          KEY_DEST_MDHA_SPLIT | KEY_ENC);
238 +       else
239 +               append_proto_dkp(desc, adata);
240 +
241 +       append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
242 +                  KEY_DEST_CLASS_REG);
243 +
244 +       set_jump_tgt_here(desc, key_jump_cmd);
245 +
246 +       /* class 2 operation */
247 +       append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
248 +                        OP_ALG_DECRYPT | OP_ALG_ICV_ON);
249 +       /* class 1 operation */
250 +       append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
251 +                        OP_ALG_DECRYPT);
252 +
253 +       /* VSIL = input data length - 2 * block_size */
254 +       append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
255 +                               blocksize);
256 +
257 +       /*
258 +        * payloadlen + icvlen + padlen = input data length - (assoclen +
259 +        * ivsize)
260 +        */
261 +       append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize);
262 +
263 +       /* skip data to the last but one cipher block */
264 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
265 +
266 +       /* load iv for the last cipher block */
267 +       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
268 +                  LDST_CLASS_1_CCB | ivsize);
269 +
270 +       /* read last cipher block */
271 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
272 +                            FIFOLD_TYPE_LAST1 | blocksize);
273 +
274 +       /* move decrypted block into math0 and math1 */
275 +       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
276 +                   blocksize);
277 +
278 +       /* reset AES CHA */
279 +       append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB |
280 +                           LDST_SRCDST_WORD_CHACTRL | LDST_IMM);
281 +
282 +       /* rewind input sequence */
283 +       append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
284 +
285 +       /* key1 is in decryption form */
286 +       append_operation(desc, cdata->algtype | OP_ALG_AAI_DK |
287 +                        OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
288 +
289 +       /* load iv in context1 */
290 +       append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
291 +                  LDST_SRCDST_WORD_CLASS_CTX | ivsize);
292 +
293 +       /* read sequence number */
294 +       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
295 +       /* load Type, Version and Len fields in math0 */
296 +       append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
297 +                  LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5);
298 +
299 +       /* compute (padlen - 1) */
300 +       append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
301 +
302 +       /* math2 = icvlen + (padlen - 1) + 1 */
303 +       append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1);
304 +
305 +       append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
306 +
307 +       /* VSOL = payloadlen + icvlen + padlen */
308 +       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
309 +
310 +       if (caam_little_end)
311 +               append_moveb(desc, MOVE_WAITCOMP |
312 +                            MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
313 +
314 +       /* update Len field */
315 +       append_math_sub(desc, REG0, REG0, REG2, 8);
316 +
317 +       /* store decrypted payload, icv and padding */
318 +       append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
319 +
320 +       /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
321 +       append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
322 +
323 +       zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
324 +                                           JUMP_COND_MATH_Z);
325 +
326 +       /* send Type, Version and Len(pre ICV) fields to authentication */
327 +       append_move(desc, MOVE_WAITCOMP |
328 +                   MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
329 +                   (3 << MOVE_OFFSET_SHIFT) | 5);
330 +
331 +       /* outsnooping payload */
332 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
333 +                            FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
334 +                            FIFOLDST_VLF);
335 +       skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2);
336 +
337 +       set_jump_tgt_here(desc, zero_payload_jump_cmd);
338 +       /* send Type, Version and Len(pre ICV) fields to authentication */
339 +       append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS |
340 +                   MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
341 +                   (3 << MOVE_OFFSET_SHIFT) | 5);
342 +
343 +       set_jump_tgt_here(desc, skip_zero_jump_cmd);
344 +       append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
345 +
346 +       /* load icvlen and padlen */
347 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
348 +                            FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
349 +
350 +       /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
351 +       append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
352 +
353 +       /*
354 +        * Start a new input sequence using the SEQ OUT PTR command options,
355 +        * pointer and length used when the current output sequence was defined.
356 +        */
357 +       if (ps) {
358 +               /*
359 +                * Move the lower 32 bits of Shared Descriptor address, the
360 +                * SEQ OUT PTR command, Output Pointer (2 words) and
361 +                * Output Length into math registers.
362 +                */
363 +               if (caam_little_end)
364 +                       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
365 +                                   MOVE_DEST_MATH0 |
366 +                                   (55 * 4 << MOVE_OFFSET_SHIFT) | 20);
367 +               else
368 +                       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
369 +                                   MOVE_DEST_MATH0 |
370 +                                   (54 * 4 << MOVE_OFFSET_SHIFT) | 20);
371 +
372 +               /* Transform SEQ OUT PTR command in SEQ IN PTR command */
373 +               append_math_and_imm_u32(desc, REG0, REG0, IMM,
374 +                                       ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
375 +               /* Append a JUMP command after the copied fields */
376 +               jumpback = CMD_JUMP | (char)-9;
377 +               append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
378 +                                   LDST_SRCDST_WORD_DECO_MATH2 |
379 +                                   (4 << LDST_OFFSET_SHIFT));
380 +               append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
381 +               /* Move the updated fields back to the Job Descriptor */
382 +               if (caam_little_end)
383 +                       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
384 +                                   MOVE_DEST_DESCBUF |
385 +                                   (55 * 4 << MOVE_OFFSET_SHIFT) | 24);
386 +               else
387 +                       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
388 +                                   MOVE_DEST_DESCBUF |
389 +                                   (54 * 4 << MOVE_OFFSET_SHIFT) | 24);
390 +
391 +               /*
392 +                * Read the new SEQ IN PTR command, Input Pointer, Input Length
393 +                * and then jump back to the next command from the
394 +                * Shared Descriptor.
395 +                */
396 +               append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
397 +       } else {
398 +               /*
399 +                * Move the SEQ OUT PTR command, Output Pointer (1 word) and
400 +                * Output Length into math registers.
401 +                */
402 +               if (caam_little_end)
403 +                       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
404 +                                   MOVE_DEST_MATH0 |
405 +                                   (54 * 4 << MOVE_OFFSET_SHIFT) | 12);
406 +               else
407 +                       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
408 +                                   MOVE_DEST_MATH0 |
409 +                                   (53 * 4 << MOVE_OFFSET_SHIFT) | 12);
410 +
411 +               /* Transform SEQ OUT PTR command in SEQ IN PTR command */
412 +               append_math_and_imm_u64(desc, REG0, REG0, IMM,
413 +                                       ~(((u64)(CMD_SEQ_IN_PTR ^
414 +                                                CMD_SEQ_OUT_PTR)) << 32));
415 +               /* Append a JUMP command after the copied fields */
416 +               jumpback = CMD_JUMP | (char)-7;
417 +               append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
418 +                                   LDST_SRCDST_WORD_DECO_MATH1 |
419 +                                   (4 << LDST_OFFSET_SHIFT));
420 +               append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
421 +               /* Move the updated fields back to the Job Descriptor */
422 +               if (caam_little_end)
423 +                       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
424 +                                   MOVE_DEST_DESCBUF |
425 +                                   (54 * 4 << MOVE_OFFSET_SHIFT) | 16);
426 +               else
427 +                       append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
428 +                                   MOVE_DEST_DESCBUF |
429 +                                   (53 * 4 << MOVE_OFFSET_SHIFT) | 16);
430 +
431 +               /*
432 +                * Read the new SEQ IN PTR command, Input Pointer, Input Length
433 +                * and then jump back to the next command from the
434 +                * Shared Descriptor.
435 +                */
436 +                append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5);
437 +       }
438 +
439 +       /* skip payload */
440 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
441 +       /* check icv */
442 +       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
443 +                            FIFOLD_TYPE_LAST2 | authsize);
444 +
445 +#ifdef DEBUG
446 +       print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ",
447 +                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
448 +                      desc_bytes(desc), 1);
449 +#endif
450 +}
451 +EXPORT_SYMBOL(cnstr_shdsc_tls_decap);
452 +
453 +/**
454   * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
455   * @desc: pointer to buffer used for descriptor construction
456   * @cdata: pointer to block cipher transform definitions
457 --- a/drivers/crypto/caam/caamalg_desc.h
458 +++ b/drivers/crypto/caam/caamalg_desc.h
459 @@ -17,6 +17,9 @@
460  #define DESC_QI_AEAD_DEC_LEN           (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
461  #define DESC_QI_AEAD_GIVENC_LEN                (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
462  
463 +#define DESC_TLS_BASE                  (4 * CAAM_CMD_SZ)
464 +#define DESC_TLS10_ENC_LEN             (DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
465 +
466  /* Note: Nonce is counted in cdata.keylen */
467  #define DESC_AEAD_CTR_RFC3686_LEN      (4 * CAAM_CMD_SZ)
468  
469 @@ -72,6 +75,16 @@ void cnstr_shdsc_aead_givencap(u32 * con
470                                u32 *nonce, const u32 ctx1_iv_off,
471                                const bool is_qi, int era);
472  
473 +void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
474 +                          struct alginfo *adata, unsigned int assoclen,
475 +                          unsigned int ivsize, unsigned int authsize,
476 +                          unsigned int blocksize, int era);
477 +
478 +void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
479 +                          struct alginfo *adata, unsigned int assoclen,
480 +                          unsigned int ivsize, unsigned int authsize,
481 +                          unsigned int blocksize, int era);
482 +
483  void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
484                            unsigned int ivsize, unsigned int icvsize,
485                            const bool is_qi);
486 --- a/drivers/crypto/caam/caamalg_qi.c
487 +++ b/drivers/crypto/caam/caamalg_qi.c
488 @@ -290,6 +290,167 @@ static int des3_aead_setkey(struct crypt
489         return err;
490  }
491  
492 +static int tls_set_sh_desc(struct crypto_aead *tls)
493 +{
494 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
495 +       unsigned int ivsize = crypto_aead_ivsize(tls);
496 +       unsigned int blocksize = crypto_aead_blocksize(tls);
497 +       unsigned int assoclen = 13; /* always 13 bytes for TLS */
498 +       unsigned int data_len[2];
499 +       u32 inl_mask;
500 +       struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
501 +
502 +       if (!ctx->cdata.keylen || !ctx->authsize)
503 +               return 0;
504 +
505 +       /*
506 +        * TLS 1.0 encrypt shared descriptor
507 +        * Job Descriptor and Shared Descriptor
508 +        * must fit into the 64-word Descriptor h/w Buffer
509 +        */
510 +       data_len[0] = ctx->adata.keylen_pad;
511 +       data_len[1] = ctx->cdata.keylen;
512 +
513 +       if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
514 +                             &inl_mask, ARRAY_SIZE(data_len)) < 0)
515 +               return -EINVAL;
516 +
517 +       if (inl_mask & 1)
518 +               ctx->adata.key_virt = ctx->key;
519 +       else
520 +               ctx->adata.key_dma = ctx->key_dma;
521 +
522 +       if (inl_mask & 2)
523 +               ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
524 +       else
525 +               ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
526 +
527 +       ctx->adata.key_inline = !!(inl_mask & 1);
528 +       ctx->cdata.key_inline = !!(inl_mask & 2);
529 +
530 +       cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
531 +                             assoclen, ivsize, ctx->authsize, blocksize,
532 +                             ctrlpriv->era);
533 +
534 +       /*
535 +        * TLS 1.0 decrypt shared descriptor
536 +        * Keys do not fit inline, regardless of algorithms used
537 +        */
538 +       ctx->adata.key_inline = false;
539 +       ctx->adata.key_dma = ctx->key_dma;
540 +       ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
541 +
542 +       cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
543 +                             assoclen, ivsize, ctx->authsize, blocksize,
544 +                             ctrlpriv->era);
545 +
546 +       return 0;
547 +}
548 +
549 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
550 +{
551 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
552 +
553 +       ctx->authsize = authsize;
554 +       tls_set_sh_desc(tls);
555 +
556 +       return 0;
557 +}
558 +
559 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
560 +                     unsigned int keylen)
561 +{
562 +       struct caam_ctx *ctx = crypto_aead_ctx(tls);
563 +       struct device *jrdev = ctx->jrdev;
564 +       struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
565 +       struct crypto_authenc_keys keys;
566 +       int ret = 0;
567 +
568 +       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
569 +               goto badkey;
570 +
571 +#ifdef DEBUG
572 +       dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
573 +               keys.authkeylen + keys.enckeylen, keys.enckeylen,
574 +               keys.authkeylen);
575 +       print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
576 +                      DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
577 +#endif
578 +
579 +       /*
580 +        * If DKP is supported, use it in the shared descriptor to generate
581 +        * the split key.
582 +        */
583 +       if (ctrlpriv->era >= 6) {
584 +               ctx->adata.keylen = keys.authkeylen;
585 +               ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
586 +                                                     OP_ALG_ALGSEL_MASK);
587 +
588 +               if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
589 +                       goto badkey;
590 +
591 +               memcpy(ctx->key, keys.authkey, keys.authkeylen);
592 +               memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
593 +                      keys.enckeylen);
594 +               dma_sync_single_for_device(jrdev, ctx->key_dma,
595 +                                          ctx->adata.keylen_pad +
596 +                                          keys.enckeylen, ctx->dir);
597 +               goto skip_split_key;
598 +       }
599 +
600 +       ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
601 +                           keys.authkeylen, CAAM_MAX_KEY_SIZE -
602 +                           keys.enckeylen);
603 +       if (ret)
604 +               goto badkey;
605 +
606 +       /* postpend encryption key to auth split key */
607 +       memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
608 +       dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
609 +                                  keys.enckeylen, ctx->dir);
610 +
611 +#ifdef DEBUG
612 +       dev_err(jrdev, "split keylen %d split keylen padded %d\n",
613 +               ctx->adata.keylen, ctx->adata.keylen_pad);
614 +       print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
615 +                      DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
616 +                      ctx->adata.keylen_pad + keys.enckeylen, 1);
617 +#endif
618 +
619 +skip_split_key:
620 +       ctx->cdata.keylen = keys.enckeylen;
621 +
622 +       ret = tls_set_sh_desc(tls);
623 +       if (ret)
624 +               goto badkey;
625 +
626 +       /* Now update the driver contexts with the new shared descriptor */
627 +       if (ctx->drv_ctx[ENCRYPT]) {
628 +               ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
629 +                                         ctx->sh_desc_enc);
630 +               if (ret) {
631 +                       dev_err(jrdev, "driver enc context update failed\n");
632 +                       goto badkey;
633 +               }
634 +       }
635 +
636 +       if (ctx->drv_ctx[DECRYPT]) {
637 +               ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
638 +                                         ctx->sh_desc_dec);
639 +               if (ret) {
640 +                       dev_err(jrdev, "driver dec context update failed\n");
641 +                       goto badkey;
642 +               }
643 +       }
644 +
645 +       memzero_explicit(&keys, sizeof(keys));
646 +       return ret;
647 +badkey:
648 +       crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
649 +       memzero_explicit(&keys, sizeof(keys));
650 +       return -EINVAL;
651 +}
652 +
653  static int gcm_set_sh_desc(struct crypto_aead *aead)
654  {
655         struct caam_ctx *ctx = crypto_aead_ctx(aead);
656 @@ -809,6 +970,29 @@ struct aead_edesc {
657  };
658  
659  /*
660 + * tls_edesc - s/w-extended tls descriptor
661 + * @src_nents: number of segments in input scatterlist
662 + * @dst_nents: number of segments in output scatterlist
663 + * @iv_dma: dma address of iv for checking continuity and link table
664 + * @qm_sg_bytes: length of dma mapped h/w link table
665 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
666 + * @qm_sg_dma: bus physical mapped address of h/w link table
667 + * @drv_req: driver-specific request structure
668 + * @sgt: the h/w link table, followed by IV
669 + */
670 +struct tls_edesc {
671 +       int src_nents;
672 +       int dst_nents;
673 +       dma_addr_t iv_dma;
674 +       int qm_sg_bytes;
675 +       dma_addr_t qm_sg_dma;
676 +       struct scatterlist tmp[2];
677 +       struct scatterlist *dst;
678 +       struct caam_drv_req drv_req;
679 +       struct qm_sg_entry sgt[0];
680 +};
681 +
682 +/*
683   * skcipher_edesc - s/w-extended skcipher descriptor
684   * @src_nents: number of segments in input scatterlist
685   * @dst_nents: number of segments in output scatterlist
686 @@ -900,6 +1084,18 @@ static void aead_unmap(struct device *de
687         dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
688  }
689  
690 +static void tls_unmap(struct device *dev,
691 +                     struct tls_edesc *edesc,
692 +                     struct aead_request *req)
693 +{
694 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
695 +       int ivsize = crypto_aead_ivsize(aead);
696 +
697 +       caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
698 +                  edesc->dst_nents, edesc->iv_dma, ivsize, DMA_TO_DEVICE,
699 +                  edesc->qm_sg_dma, edesc->qm_sg_bytes);
700 +}
701 +
702  static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
703                            struct skcipher_request *req)
704  {
705 @@ -1192,6 +1388,243 @@ static int aead_decrypt(struct aead_requ
706         return aead_crypt(req, false);
707  }
708  
709 +static void tls_done(struct caam_drv_req *drv_req, u32 status)
710 +{
711 +       struct device *qidev;
712 +       struct tls_edesc *edesc;
713 +       struct aead_request *aead_req = drv_req->app_ctx;
714 +       struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
715 +       struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
716 +       int ecode = 0;
717 +
718 +       qidev = caam_ctx->qidev;
719 +
720 +       if (unlikely(status)) {
721 +               caam_jr_strstatus(qidev, status);
722 +               ecode = -EIO;
723 +       }
724 +
725 +       edesc = container_of(drv_req, typeof(*edesc), drv_req);
726 +       tls_unmap(qidev, edesc, aead_req);
727 +
728 +       aead_request_complete(aead_req, ecode);
729 +       qi_cache_free(edesc);
730 +}
731 +
732 +/*
733 + * allocate and map the tls extended descriptor
734 + */
735 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
736 +{
737 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
738 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
739 +       unsigned int blocksize = crypto_aead_blocksize(aead);
740 +       unsigned int padsize, authsize;
741 +       struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
742 +                                                typeof(*alg), aead);
743 +       struct device *qidev = ctx->qidev;
744 +       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
745 +                     GFP_KERNEL : GFP_ATOMIC;
746 +       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
747 +       struct tls_edesc *edesc;
748 +       dma_addr_t qm_sg_dma, iv_dma = 0;
749 +       int ivsize = 0;
750 +       u8 *iv;
751 +       int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
752 +       int in_len, out_len;
753 +       struct qm_sg_entry *sg_table, *fd_sgt;
754 +       struct caam_drv_ctx *drv_ctx;
755 +       struct scatterlist *dst;
756 +
757 +       if (encrypt) {
758 +               padsize = blocksize - ((req->cryptlen + ctx->authsize) %
759 +                                       blocksize);
760 +               authsize = ctx->authsize + padsize;
761 +       } else {
762 +               authsize = ctx->authsize;
763 +       }
764 +
765 +       drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
766 +       if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
767 +               return (struct tls_edesc *)drv_ctx;
768 +
769 +       /* allocate space for base edesc, link tables and IV */
770 +       edesc = qi_cache_alloc(GFP_DMA | flags);
771 +       if (unlikely(!edesc)) {
772 +               dev_err(qidev, "could not allocate extended descriptor\n");
773 +               return ERR_PTR(-ENOMEM);
774 +       }
775 +
776 +       if (likely(req->src == req->dst)) {
777 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
778 +                                            req->cryptlen +
779 +                                            (encrypt ? authsize : 0));
780 +               if (unlikely(src_nents < 0)) {
781 +                       dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
782 +                               req->assoclen + req->cryptlen +
783 +                               (encrypt ? authsize : 0));
784 +                       qi_cache_free(edesc);
785 +                       return ERR_PTR(src_nents);
786 +               }
787 +
788 +               mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
789 +                                             DMA_BIDIRECTIONAL);
790 +               if (unlikely(!mapped_src_nents)) {
791 +                       dev_err(qidev, "unable to map source\n");
792 +                       qi_cache_free(edesc);
793 +                       return ERR_PTR(-ENOMEM);
794 +               }
795 +               dst = req->dst;
796 +       } else {
797 +               src_nents = sg_nents_for_len(req->src, req->assoclen +
798 +                                            req->cryptlen);
799 +               if (unlikely(src_nents < 0)) {
800 +                       dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
801 +                               req->assoclen + req->cryptlen);
802 +                       qi_cache_free(edesc);
803 +                       return ERR_PTR(src_nents);
804 +               }
805 +
806 +               dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
807 +               dst_nents = sg_nents_for_len(dst, req->cryptlen +
808 +                                            (encrypt ? authsize : 0));
809 +               if (unlikely(dst_nents < 0)) {
810 +                       dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
811 +                               req->cryptlen +
812 +                               (encrypt ? authsize : 0));
813 +                       qi_cache_free(edesc);
814 +                       return ERR_PTR(dst_nents);
815 +               }
816 +
817 +               if (src_nents) {
818 +                       mapped_src_nents = dma_map_sg(qidev, req->src,
819 +                                                     src_nents, DMA_TO_DEVICE);
820 +                       if (unlikely(!mapped_src_nents)) {
821 +                               dev_err(qidev, "unable to map source\n");
822 +                               qi_cache_free(edesc);
823 +                               return ERR_PTR(-ENOMEM);
824 +                       }
825 +               } else {
826 +                       mapped_src_nents = 0;
827 +               }
828 +
829 +               mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
830 +                                             DMA_FROM_DEVICE);
831 +               if (unlikely(!mapped_dst_nents)) {
832 +                       dev_err(qidev, "unable to map destination\n");
833 +                       dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
834 +                       qi_cache_free(edesc);
835 +                       return ERR_PTR(-ENOMEM);
836 +               }
837 +       }
838 +
839 +       /*
840 +        * Create S/G table: IV, src, dst.
841 +        * Input is not contiguous.
842 +        */
843 +       qm_sg_ents = 1 + mapped_src_nents +
844 +                    (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
845 +       sg_table = &edesc->sgt[0];
846 +       qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
847 +
848 +       ivsize = crypto_aead_ivsize(aead);
849 +       iv = (u8 *)(sg_table + qm_sg_ents);
850 +       /* Make sure IV is located in a DMAable area */
851 +       memcpy(iv, req->iv, ivsize);
852 +       iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
853 +       if (dma_mapping_error(qidev, iv_dma)) {
854 +               dev_err(qidev, "unable to map IV\n");
855 +               caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0,
856 +                          DMA_NONE, 0, 0);
857 +               qi_cache_free(edesc);
858 +               return ERR_PTR(-ENOMEM);
859 +       }
860 +
861 +       edesc->src_nents = src_nents;
862 +       edesc->dst_nents = dst_nents;
863 +       edesc->dst = dst;
864 +       edesc->iv_dma = iv_dma;
865 +       edesc->drv_req.app_ctx = req;
866 +       edesc->drv_req.cbk = tls_done;
867 +       edesc->drv_req.drv_ctx = drv_ctx;
868 +
869 +       dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
870 +       qm_sg_index = 1;
871 +
872 +       sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
873 +       qm_sg_index += mapped_src_nents;
874 +
875 +       if (mapped_dst_nents > 1)
876 +               sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
877 +                                qm_sg_index, 0);
878 +
879 +       qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
880 +       if (dma_mapping_error(qidev, qm_sg_dma)) {
881 +               dev_err(qidev, "unable to map S/G table\n");
882 +               caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
883 +                          ivsize, DMA_TO_DEVICE, 0, 0);
884 +               qi_cache_free(edesc);
885 +               return ERR_PTR(-ENOMEM);
886 +       }
887 +
888 +       edesc->qm_sg_dma = qm_sg_dma;
889 +       edesc->qm_sg_bytes = qm_sg_bytes;
890 +
891 +       out_len = req->cryptlen + (encrypt ? authsize : 0);
892 +       in_len = ivsize + req->assoclen + req->cryptlen;
893 +
894 +       fd_sgt = &edesc->drv_req.fd_sgt[0];
895 +
896 +       dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
897 +
898 +       if (req->dst == req->src)
899 +               dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
900 +                                   (sg_nents_for_len(req->src, req->assoclen) +
901 +                                    1) * sizeof(*sg_table), out_len, 0);
902 +       else if (mapped_dst_nents == 1)
903 +               dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
904 +       else
905 +               dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
906 +                                    qm_sg_index, out_len, 0);
907 +
908 +       return edesc;
909 +}
910 +
911 +static int tls_crypt(struct aead_request *req, bool encrypt)
912 +{
913 +       struct tls_edesc *edesc;
914 +       struct crypto_aead *aead = crypto_aead_reqtfm(req);
915 +       struct caam_ctx *ctx = crypto_aead_ctx(aead);
916 +       int ret;
917 +
918 +       if (unlikely(caam_congested))
919 +               return -EAGAIN;
920 +
921 +       edesc = tls_edesc_alloc(req, encrypt);
922 +       if (IS_ERR_OR_NULL(edesc))
923 +               return PTR_ERR(edesc);
924 +
925 +       ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
926 +       if (!ret) {
927 +               ret = -EINPROGRESS;
928 +       } else {
929 +               tls_unmap(ctx->qidev, edesc, req);
930 +               qi_cache_free(edesc);
931 +       }
932 +
933 +       return ret;
934 +}
935 +
936 +static int tls_encrypt(struct aead_request *req)
937 +{
938 +       return tls_crypt(req, true);
939 +}
940 +
941 +static int tls_decrypt(struct aead_request *req)
942 +{
943 +       return tls_crypt(req, false);
944 +}
945 +
946  static int ipsec_gcm_encrypt(struct aead_request *req)
947  {
948         return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req,
949 @@ -2411,6 +2844,26 @@ static struct caam_aead_alg driver_aeads
950                         .geniv = true,
951                 }
952         },
953 +       {
954 +               .aead = {
955 +                       .base = {
956 +                               .cra_name = "tls10(hmac(sha1),cbc(aes))",
957 +                               .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi",
958 +                               .cra_blocksize = AES_BLOCK_SIZE,
959 +                       },
960 +                       .setkey = tls_setkey,
961 +                       .setauthsize = tls_setauthsize,
962 +                       .encrypt = tls_encrypt,
963 +                       .decrypt = tls_decrypt,
964 +                       .ivsize = AES_BLOCK_SIZE,
965 +                       .maxauthsize = SHA1_DIGEST_SIZE,
966 +               },
967 +               .caam = {
968 +                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
969 +                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
970 +                                          OP_ALG_AAI_HMAC_PRECOMP,
971 +               }
972 +       }
973  };
974  
975  static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
976 @@ -2418,6 +2871,16 @@ static int caam_init_common(struct caam_
977  {
978         struct caam_drv_private *priv;
979         struct device *dev;
980 +       /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
981 +       static const u8 digest_size[] = {
982 +               MD5_DIGEST_SIZE,
983 +               SHA1_DIGEST_SIZE,
984 +               SHA224_DIGEST_SIZE,
985 +               SHA256_DIGEST_SIZE,
986 +               SHA384_DIGEST_SIZE,
987 +               SHA512_DIGEST_SIZE
988 +       };
989 +       u8 op_id;
990  
991         /*
992          * distribute tfms across job rings to ensure in-order
993 @@ -2449,6 +2912,21 @@ static int caam_init_common(struct caam_
994         ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
995  
996         ctx->qidev = dev;
997 +       if (ctx->adata.algtype) {
998 +               op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK)
999 +                               >> OP_ALG_ALGSEL_SHIFT;
1000 +               if (op_id < ARRAY_SIZE(digest_size)) {
1001 +                       ctx->authsize = digest_size[op_id];
1002 +               } else {
1003 +                       dev_err(ctx->jrdev,
1004 +                               "incorrect op_id %d; must be less than %zu\n",
1005 +                               op_id, ARRAY_SIZE(digest_size));
1006 +                       caam_jr_free(ctx->jrdev);
1007 +                       return -EINVAL;
1008 +               }
1009 +       } else {
1010 +               ctx->authsize = 0;
1011 +       }
1012  
1013         spin_lock_init(&ctx->lock);
1014         ctx->drv_ctx[ENCRYPT] = NULL;
1015 --- a/drivers/crypto/caam/desc.h
1016 +++ b/drivers/crypto/caam/desc.h
1017 @@ -1704,4 +1704,31 @@
1018  /* Frame Descriptor Command for Replacement Job Descriptor */
1019  #define FD_CMD_REPLACE_JOB_DESC                                0x20000000
1020  
1021 +/* CHA Control Register bits */
1022 +#define CCTRL_RESET_CHA_ALL          0x1
1023 +#define CCTRL_RESET_CHA_AESA         0x2
1024 +#define CCTRL_RESET_CHA_DESA         0x4
1025 +#define CCTRL_RESET_CHA_AFHA         0x8
1026 +#define CCTRL_RESET_CHA_KFHA         0x10
1027 +#define CCTRL_RESET_CHA_SF8A         0x20
1028 +#define CCTRL_RESET_CHA_PKHA         0x40
1029 +#define CCTRL_RESET_CHA_MDHA         0x80
1030 +#define CCTRL_RESET_CHA_CRCA         0x100
1031 +#define CCTRL_RESET_CHA_RNG          0x200
1032 +#define CCTRL_RESET_CHA_SF9A         0x400
1033 +#define CCTRL_RESET_CHA_ZUCE         0x800
1034 +#define CCTRL_RESET_CHA_ZUCA         0x1000
1035 +#define CCTRL_UNLOAD_PK_A0           0x10000
1036 +#define CCTRL_UNLOAD_PK_A1           0x20000
1037 +#define CCTRL_UNLOAD_PK_A2           0x40000
1038 +#define CCTRL_UNLOAD_PK_A3           0x80000
1039 +#define CCTRL_UNLOAD_PK_B0           0x100000
1040 +#define CCTRL_UNLOAD_PK_B1           0x200000
1041 +#define CCTRL_UNLOAD_PK_B2           0x400000
1042 +#define CCTRL_UNLOAD_PK_B3           0x800000
1043 +#define CCTRL_UNLOAD_PK_N            0x1000000
1044 +#define CCTRL_UNLOAD_PK_A            0x4000000
1045 +#define CCTRL_UNLOAD_PK_B            0x8000000
1046 +#define CCTRL_UNLOAD_SBOX            0x10000000
1047 +
1048  #endif /* DESC_H */