Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / crypto / hisilicon / sec / sec_algs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2016-2017 Hisilicon Limited. */
3 #include <linux/crypto.h>
4 #include <linux/dma-mapping.h>
5 #include <linux/dmapool.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/slab.h>
9
10 #include <crypto/aes.h>
11 #include <crypto/algapi.h>
12 #include <crypto/des.h>
13 #include <crypto/skcipher.h>
14 #include <crypto/xts.h>
15 #include <crypto/internal/skcipher.h>
16
17 #include "sec_drv.h"
18
19 #define SEC_MAX_CIPHER_KEY              64
20 #define SEC_REQ_LIMIT SZ_32M
21
22 struct sec_c_alg_cfg {
23         unsigned c_alg          : 3;
24         unsigned c_mode         : 3;
25         unsigned key_len        : 2;
26         unsigned c_width        : 2;
27 };
28
29 static const struct sec_c_alg_cfg sec_c_alg_cfgs[] =  {
30         [SEC_C_DES_ECB_64] = {
31                 .c_alg = SEC_C_ALG_DES,
32                 .c_mode = SEC_C_MODE_ECB,
33                 .key_len = SEC_KEY_LEN_DES,
34         },
35         [SEC_C_DES_CBC_64] = {
36                 .c_alg = SEC_C_ALG_DES,
37                 .c_mode = SEC_C_MODE_CBC,
38                 .key_len = SEC_KEY_LEN_DES,
39         },
40         [SEC_C_3DES_ECB_192_3KEY] = {
41                 .c_alg = SEC_C_ALG_3DES,
42                 .c_mode = SEC_C_MODE_ECB,
43                 .key_len = SEC_KEY_LEN_3DES_3_KEY,
44         },
45         [SEC_C_3DES_ECB_192_2KEY] = {
46                 .c_alg = SEC_C_ALG_3DES,
47                 .c_mode = SEC_C_MODE_ECB,
48                 .key_len = SEC_KEY_LEN_3DES_2_KEY,
49         },
50         [SEC_C_3DES_CBC_192_3KEY] = {
51                 .c_alg = SEC_C_ALG_3DES,
52                 .c_mode = SEC_C_MODE_CBC,
53                 .key_len = SEC_KEY_LEN_3DES_3_KEY,
54         },
55         [SEC_C_3DES_CBC_192_2KEY] = {
56                 .c_alg = SEC_C_ALG_3DES,
57                 .c_mode = SEC_C_MODE_CBC,
58                 .key_len = SEC_KEY_LEN_3DES_2_KEY,
59         },
60         [SEC_C_AES_ECB_128] = {
61                 .c_alg = SEC_C_ALG_AES,
62                 .c_mode = SEC_C_MODE_ECB,
63                 .key_len = SEC_KEY_LEN_AES_128,
64         },
65         [SEC_C_AES_ECB_192] = {
66                 .c_alg = SEC_C_ALG_AES,
67                 .c_mode = SEC_C_MODE_ECB,
68                 .key_len = SEC_KEY_LEN_AES_192,
69         },
70         [SEC_C_AES_ECB_256] = {
71                 .c_alg = SEC_C_ALG_AES,
72                 .c_mode = SEC_C_MODE_ECB,
73                 .key_len = SEC_KEY_LEN_AES_256,
74         },
75         [SEC_C_AES_CBC_128] = {
76                 .c_alg = SEC_C_ALG_AES,
77                 .c_mode = SEC_C_MODE_CBC,
78                 .key_len = SEC_KEY_LEN_AES_128,
79         },
80         [SEC_C_AES_CBC_192] = {
81                 .c_alg = SEC_C_ALG_AES,
82                 .c_mode = SEC_C_MODE_CBC,
83                 .key_len = SEC_KEY_LEN_AES_192,
84         },
85         [SEC_C_AES_CBC_256] = {
86                 .c_alg = SEC_C_ALG_AES,
87                 .c_mode = SEC_C_MODE_CBC,
88                 .key_len = SEC_KEY_LEN_AES_256,
89         },
90         [SEC_C_AES_CTR_128] = {
91                 .c_alg = SEC_C_ALG_AES,
92                 .c_mode = SEC_C_MODE_CTR,
93                 .key_len = SEC_KEY_LEN_AES_128,
94         },
95         [SEC_C_AES_CTR_192] = {
96                 .c_alg = SEC_C_ALG_AES,
97                 .c_mode = SEC_C_MODE_CTR,
98                 .key_len = SEC_KEY_LEN_AES_192,
99         },
100         [SEC_C_AES_CTR_256] = {
101                 .c_alg = SEC_C_ALG_AES,
102                 .c_mode = SEC_C_MODE_CTR,
103                 .key_len = SEC_KEY_LEN_AES_256,
104         },
105         [SEC_C_AES_XTS_128] = {
106                 .c_alg = SEC_C_ALG_AES,
107                 .c_mode = SEC_C_MODE_XTS,
108                 .key_len = SEC_KEY_LEN_AES_128,
109         },
110         [SEC_C_AES_XTS_256] = {
111                 .c_alg = SEC_C_ALG_AES,
112                 .c_mode = SEC_C_MODE_XTS,
113                 .key_len = SEC_KEY_LEN_AES_256,
114         },
115         [SEC_C_NULL] = {
116         },
117 };
118
119 /*
120  * Mutex used to ensure safe operation of reference count of
121  * alg providers
122  */
123 static DEFINE_MUTEX(algs_lock);
124 static unsigned int active_devs;
125
126 static void sec_alg_skcipher_init_template(struct sec_alg_tfm_ctx *ctx,
127                                            struct sec_bd_info *req,
128                                            enum sec_cipher_alg alg)
129 {
130         const struct sec_c_alg_cfg *cfg = &sec_c_alg_cfgs[alg];
131
132         memset(req, 0, sizeof(*req));
133         req->w0 |= cfg->c_mode << SEC_BD_W0_C_MODE_S;
134         req->w1 |= cfg->c_alg << SEC_BD_W1_C_ALG_S;
135         req->w3 |= cfg->key_len << SEC_BD_W3_C_KEY_LEN_S;
136         req->w0 |= cfg->c_width << SEC_BD_W0_C_WIDTH_S;
137
138         req->cipher_key_addr_lo = lower_32_bits(ctx->pkey);
139         req->cipher_key_addr_hi = upper_32_bits(ctx->pkey);
140 }
141
142 static void sec_alg_skcipher_init_context(struct crypto_skcipher *atfm,
143                                           const u8 *key,
144                                           unsigned int keylen,
145                                           enum sec_cipher_alg alg)
146 {
147         struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
148         struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
149
150         ctx->cipher_alg = alg;
151         memcpy(ctx->key, key, keylen);
152         sec_alg_skcipher_init_template(ctx, &ctx->req_template,
153                                        ctx->cipher_alg);
154 }
155
156 static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
157                                      dma_addr_t *psec_sgl,
158                                      struct scatterlist *sgl,
159                                      int count,
160                                      struct sec_dev_info *info)
161 {
162         struct sec_hw_sgl *sgl_current = NULL;
163         struct sec_hw_sgl *sgl_next;
164         dma_addr_t sgl_next_dma;
165         struct scatterlist *sg;
166         int ret, sge_index, i;
167
168         if (!count)
169                 return -EINVAL;
170
171         for_each_sg(sgl, sg, count, i) {
172                 sge_index = i % SEC_MAX_SGE_NUM;
173                 if (sge_index == 0) {
174                         sgl_next = dma_pool_zalloc(info->hw_sgl_pool,
175                                                    GFP_KERNEL, &sgl_next_dma);
176                         if (!sgl_next) {
177                                 ret = -ENOMEM;
178                                 goto err_free_hw_sgls;
179                         }
180
181                         if (!sgl_current) { /* First one */
182                                 *psec_sgl = sgl_next_dma;
183                                 *sec_sgl = sgl_next;
184                         } else { /* Chained */
185                                 sgl_current->entry_sum_in_sgl = SEC_MAX_SGE_NUM;
186                                 sgl_current->next_sgl = sgl_next_dma;
187                                 sgl_current->next = sgl_next;
188                         }
189                         sgl_current = sgl_next;
190                 }
191                 sgl_current->sge_entries[sge_index].buf = sg_dma_address(sg);
192                 sgl_current->sge_entries[sge_index].len = sg_dma_len(sg);
193                 sgl_current->data_bytes_in_sgl += sg_dma_len(sg);
194         }
195         sgl_current->entry_sum_in_sgl = count % SEC_MAX_SGE_NUM;
196         sgl_current->next_sgl = 0;
197         (*sec_sgl)->entry_sum_in_chain = count;
198
199         return 0;
200
201 err_free_hw_sgls:
202         sgl_current = *sec_sgl;
203         while (sgl_current) {
204                 sgl_next = sgl_current->next;
205                 dma_pool_free(info->hw_sgl_pool, sgl_current,
206                               sgl_current->next_sgl);
207                 sgl_current = sgl_next;
208         }
209         *psec_sgl = 0;
210
211         return ret;
212 }
213
214 static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
215                             dma_addr_t psec_sgl, struct sec_dev_info *info)
216 {
217         struct sec_hw_sgl *sgl_current, *sgl_next;
218         dma_addr_t sgl_next_dma;
219
220         sgl_current = hw_sgl;
221         while (sgl_current) {
222                 sgl_next = sgl_current->next;
223                 sgl_next_dma = sgl_current->next_sgl;
224
225                 dma_pool_free(info->hw_sgl_pool, sgl_current, psec_sgl);
226
227                 sgl_current = sgl_next;
228                 psec_sgl = sgl_next_dma;
229         }
230 }
231
232 static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
233                                    const u8 *key, unsigned int keylen,
234                                    enum sec_cipher_alg alg)
235 {
236         struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
237         struct device *dev = ctx->queue->dev_info->dev;
238
239         mutex_lock(&ctx->lock);
240         if (ctx->key) {
241                 /* rekeying */
242                 memset(ctx->key, 0, SEC_MAX_CIPHER_KEY);
243         } else {
244                 /* new key */
245                 ctx->key = dma_alloc_coherent(dev, SEC_MAX_CIPHER_KEY,
246                                               &ctx->pkey, GFP_KERNEL);
247                 if (!ctx->key) {
248                         mutex_unlock(&ctx->lock);
249                         return -ENOMEM;
250                 }
251         }
252         mutex_unlock(&ctx->lock);
253         sec_alg_skcipher_init_context(tfm, key, keylen, alg);
254
255         return 0;
256 }
257
258 static int sec_alg_skcipher_setkey_aes_ecb(struct crypto_skcipher *tfm,
259                                            const u8 *key, unsigned int keylen)
260 {
261         enum sec_cipher_alg alg;
262
263         switch (keylen) {
264         case AES_KEYSIZE_128:
265                 alg = SEC_C_AES_ECB_128;
266                 break;
267         case AES_KEYSIZE_192:
268                 alg = SEC_C_AES_ECB_192;
269                 break;
270         case AES_KEYSIZE_256:
271                 alg = SEC_C_AES_ECB_256;
272                 break;
273         default:
274                 return -EINVAL;
275         }
276
277         return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
278 }
279
280 static int sec_alg_skcipher_setkey_aes_cbc(struct crypto_skcipher *tfm,
281                                            const u8 *key, unsigned int keylen)
282 {
283         enum sec_cipher_alg alg;
284
285         switch (keylen) {
286         case AES_KEYSIZE_128:
287                 alg = SEC_C_AES_CBC_128;
288                 break;
289         case AES_KEYSIZE_192:
290                 alg = SEC_C_AES_CBC_192;
291                 break;
292         case AES_KEYSIZE_256:
293                 alg = SEC_C_AES_CBC_256;
294                 break;
295         default:
296                 return -EINVAL;
297         }
298
299         return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
300 }
301
302 static int sec_alg_skcipher_setkey_aes_ctr(struct crypto_skcipher *tfm,
303                                            const u8 *key, unsigned int keylen)
304 {
305         enum sec_cipher_alg alg;
306
307         switch (keylen) {
308         case AES_KEYSIZE_128:
309                 alg = SEC_C_AES_CTR_128;
310                 break;
311         case AES_KEYSIZE_192:
312                 alg = SEC_C_AES_CTR_192;
313                 break;
314         case AES_KEYSIZE_256:
315                 alg = SEC_C_AES_CTR_256;
316                 break;
317         default:
318                 return -EINVAL;
319         }
320
321         return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
322 }
323
324 static int sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher *tfm,
325                                            const u8 *key, unsigned int keylen)
326 {
327         enum sec_cipher_alg alg;
328         int ret;
329
330         ret = xts_verify_key(tfm, key, keylen);
331         if (ret)
332                 return ret;
333
334         switch (keylen) {
335         case AES_KEYSIZE_128 * 2:
336                 alg = SEC_C_AES_XTS_128;
337                 break;
338         case AES_KEYSIZE_256 * 2:
339                 alg = SEC_C_AES_XTS_256;
340                 break;
341         default:
342                 return -EINVAL;
343         }
344
345         return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
346 }
347
348 static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher *tfm,
349                                            const u8 *key, unsigned int keylen)
350 {
351         if (keylen != DES_KEY_SIZE)
352                 return -EINVAL;
353
354         return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_ECB_64);
355 }
356
357 static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm,
358                                            const u8 *key, unsigned int keylen)
359 {
360         if (keylen != DES_KEY_SIZE)
361                 return -EINVAL;
362
363         return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_CBC_64);
364 }
365
366 static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm,
367                                             const u8 *key, unsigned int keylen)
368 {
369         return unlikely(des3_verify_key(tfm, key)) ?:
370                sec_alg_skcipher_setkey(tfm, key, keylen,
371                                        SEC_C_3DES_ECB_192_3KEY);
372 }
373
374 static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm,
375                                             const u8 *key, unsigned int keylen)
376 {
377         return unlikely(des3_verify_key(tfm, key)) ?:
378                sec_alg_skcipher_setkey(tfm, key, keylen,
379                                        SEC_C_3DES_CBC_192_3KEY);
380 }
381
382 static void sec_alg_free_el(struct sec_request_el *el,
383                             struct sec_dev_info *info)
384 {
385         sec_free_hw_sgl(el->out, el->dma_out, info);
386         sec_free_hw_sgl(el->in, el->dma_in, info);
387         kfree(el->sgl_in);
388         kfree(el->sgl_out);
389         kfree(el);
390 }
391
392 /* queuelock must be held */
393 static int sec_send_request(struct sec_request *sec_req, struct sec_queue *queue)
394 {
395         struct sec_request_el *el, *temp;
396         int ret = 0;
397
398         mutex_lock(&sec_req->lock);
399         list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
400                 /*
401                  * Add to hardware queue only under following circumstances
402                  * 1) Software and hardware queue empty so no chain dependencies
403                  * 2) No dependencies as new IV - (check software queue empty
404                  *    to maintain order)
405                  * 3) No dependencies because the mode does no chaining.
406                  *
407                  * In other cases first insert onto the software queue which
408                  * is then emptied as requests complete
409                  */
410                 if (!queue->havesoftqueue ||
411                     (kfifo_is_empty(&queue->softqueue) &&
412                      sec_queue_empty(queue))) {
413                         ret = sec_queue_send(queue, &el->req, sec_req);
414                         if (ret == -EAGAIN) {
415                                 /* Wait unti we can send then try again */
416                                 /* DEAD if here - should not happen */
417                                 ret = -EBUSY;
418                                 goto err_unlock;
419                         }
420                 } else {
421                         kfifo_put(&queue->softqueue, el);
422                 }
423         }
424 err_unlock:
425         mutex_unlock(&sec_req->lock);
426
427         return ret;
428 }
429
430 static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
431                                       struct crypto_async_request *req_base)
432 {
433         struct skcipher_request *skreq = container_of(req_base,
434                                                       struct skcipher_request,
435                                                       base);
436         struct sec_request *sec_req = skcipher_request_ctx(skreq);
437         struct sec_request *backlog_req;
438         struct sec_request_el *sec_req_el, *nextrequest;
439         struct sec_alg_tfm_ctx *ctx = sec_req->tfm_ctx;
440         struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
441         struct device *dev = ctx->queue->dev_info->dev;
442         int icv_or_skey_en, ret;
443         bool done;
444
445         sec_req_el = list_first_entry(&sec_req->elements, struct sec_request_el,
446                                       head);
447         icv_or_skey_en = (sec_resp->w0 & SEC_BD_W0_ICV_OR_SKEY_EN_M) >>
448                 SEC_BD_W0_ICV_OR_SKEY_EN_S;
449         if (sec_resp->w1 & SEC_BD_W1_BD_INVALID || icv_or_skey_en == 3) {
450                 dev_err(dev, "Got an invalid answer %lu %d\n",
451                         sec_resp->w1 & SEC_BD_W1_BD_INVALID,
452                         icv_or_skey_en);
453                 sec_req->err = -EINVAL;
454                 /*
455                  * We need to muddle on to avoid getting stuck with elements
456                  * on the queue. Error will be reported so requester so
457                  * it should be able to handle appropriately.
458                  */
459         }
460
461         mutex_lock(&ctx->queue->queuelock);
462         /* Put the IV in place for chained cases */
463         switch (ctx->cipher_alg) {
464         case SEC_C_AES_CBC_128:
465         case SEC_C_AES_CBC_192:
466         case SEC_C_AES_CBC_256:
467                 if (sec_req_el->req.w0 & SEC_BD_W0_DE)
468                         sg_pcopy_to_buffer(sec_req_el->sgl_out,
469                                            sg_nents(sec_req_el->sgl_out),
470                                            skreq->iv,
471                                            crypto_skcipher_ivsize(atfm),
472                                            sec_req_el->el_length -
473                                            crypto_skcipher_ivsize(atfm));
474                 else
475                         sg_pcopy_to_buffer(sec_req_el->sgl_in,
476                                            sg_nents(sec_req_el->sgl_in),
477                                            skreq->iv,
478                                            crypto_skcipher_ivsize(atfm),
479                                            sec_req_el->el_length -
480                                            crypto_skcipher_ivsize(atfm));
481                 /* No need to sync to the device as coherent DMA */
482                 break;
483         case SEC_C_AES_CTR_128:
484         case SEC_C_AES_CTR_192:
485         case SEC_C_AES_CTR_256:
486                 crypto_inc(skreq->iv, 16);
487                 break;
488         default:
489                 /* Do not update */
490                 break;
491         }
492
493         if (ctx->queue->havesoftqueue &&
494             !kfifo_is_empty(&ctx->queue->softqueue) &&
495             sec_queue_empty(ctx->queue)) {
496                 ret = kfifo_get(&ctx->queue->softqueue, &nextrequest);
497                 if (ret <= 0)
498                         dev_err(dev,
499                                 "Error getting next element from kfifo %d\n",
500                                 ret);
501                 else
502                         /* We know there is space so this cannot fail */
503                         sec_queue_send(ctx->queue, &nextrequest->req,
504                                        nextrequest->sec_req);
505         } else if (!list_empty(&ctx->backlog)) {
506                 /* Need to verify there is room first */
507                 backlog_req = list_first_entry(&ctx->backlog,
508                                                typeof(*backlog_req),
509                                                backlog_head);
510                 if (sec_queue_can_enqueue(ctx->queue,
511                     backlog_req->num_elements) ||
512                     (ctx->queue->havesoftqueue &&
513                      kfifo_avail(&ctx->queue->softqueue) >
514                      backlog_req->num_elements)) {
515                         sec_send_request(backlog_req, ctx->queue);
516                         backlog_req->req_base->complete(backlog_req->req_base,
517                                                         -EINPROGRESS);
518                         list_del(&backlog_req->backlog_head);
519                 }
520         }
521         mutex_unlock(&ctx->queue->queuelock);
522
523         mutex_lock(&sec_req->lock);
524         list_del(&sec_req_el->head);
525         mutex_unlock(&sec_req->lock);
526         sec_alg_free_el(sec_req_el, ctx->queue->dev_info);
527
528         /*
529          * Request is done.
530          * The dance is needed as the lock is freed in the completion
531          */
532         mutex_lock(&sec_req->lock);
533         done = list_empty(&sec_req->elements);
534         mutex_unlock(&sec_req->lock);
535         if (done) {
536                 if (crypto_skcipher_ivsize(atfm)) {
537                         dma_unmap_single(dev, sec_req->dma_iv,
538                                          crypto_skcipher_ivsize(atfm),
539                                          DMA_TO_DEVICE);
540                 }
541                 dma_unmap_sg(dev, skreq->src, sec_req->len_in,
542                              DMA_BIDIRECTIONAL);
543                 if (skreq->src != skreq->dst)
544                         dma_unmap_sg(dev, skreq->dst, sec_req->len_out,
545                                      DMA_BIDIRECTIONAL);
546                 skreq->base.complete(&skreq->base, sec_req->err);
547         }
548 }
549
550 void sec_alg_callback(struct sec_bd_info *resp, void *shadow)
551 {
552         struct sec_request *sec_req = shadow;
553
554         sec_req->cb(resp, sec_req->req_base);
555 }
556
557 static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes,
558                                               int *steps)
559 {
560         size_t *sizes;
561         int i;
562
563         /* Split into suitable sized blocks */
564         *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT;
565         sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL);
566         if (!sizes)
567                 return -ENOMEM;
568
569         for (i = 0; i < *steps - 1; i++)
570                 sizes[i] = SEC_REQ_LIMIT;
571         sizes[*steps - 1] = length - SEC_REQ_LIMIT * (*steps - 1);
572         *split_sizes = sizes;
573
574         return 0;
575 }
576
577 static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
578                                 int steps, struct scatterlist ***splits,
579                                 int **splits_nents,
580                                 int sgl_len_in,
581                                 struct device *dev)
582 {
583         int ret, count;
584
585         count = dma_map_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
586         if (!count)
587                 return -EINVAL;
588
589         *splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL);
590         if (!*splits) {
591                 ret = -ENOMEM;
592                 goto err_unmap_sg;
593         }
594         *splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL);
595         if (!*splits_nents) {
596                 ret = -ENOMEM;
597                 goto err_free_splits;
598         }
599
600         /* output the scatter list before and after this */
601         ret = sg_split(sgl, count, 0, steps, split_sizes,
602                        *splits, *splits_nents, GFP_KERNEL);
603         if (ret) {
604                 ret = -ENOMEM;
605                 goto err_free_splits_nents;
606         }
607
608         return 0;
609
610 err_free_splits_nents:
611         kfree(*splits_nents);
612 err_free_splits:
613         kfree(*splits);
614 err_unmap_sg:
615         dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
616
617         return ret;
618 }
619
620 /*
621  * Reverses the sec_map_and_split_sg call for messages not yet added to
622  * the queues.
623  */
624 static void sec_unmap_sg_on_err(struct scatterlist *sgl, int steps,
625                                 struct scatterlist **splits, int *splits_nents,
626                                 int sgl_len_in, struct device *dev)
627 {
628         int i;
629
630         for (i = 0; i < steps; i++)
631                 kfree(splits[i]);
632         kfree(splits_nents);
633         kfree(splits);
634
635         dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
636 }
637
638 static struct sec_request_el
639 *sec_alg_alloc_and_fill_el(struct sec_bd_info *template, int encrypt,
640                            int el_size, bool different_dest,
641                            struct scatterlist *sgl_in, int n_ents_in,
642                            struct scatterlist *sgl_out, int n_ents_out,
643                            struct sec_dev_info *info)
644 {
645         struct sec_request_el *el;
646         struct sec_bd_info *req;
647         int ret;
648
649         el = kzalloc(sizeof(*el), GFP_KERNEL);
650         if (!el)
651                 return ERR_PTR(-ENOMEM);
652         el->el_length = el_size;
653         req = &el->req;
654         memcpy(req, template, sizeof(*req));
655
656         req->w0 &= ~SEC_BD_W0_CIPHER_M;
657         if (encrypt)
658                 req->w0 |= SEC_CIPHER_ENCRYPT << SEC_BD_W0_CIPHER_S;
659         else
660                 req->w0 |= SEC_CIPHER_DECRYPT << SEC_BD_W0_CIPHER_S;
661
662         req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_19_16_M;
663         req->w0 |= ((el_size >> 16) << SEC_BD_W0_C_GRAN_SIZE_19_16_S) &
664                 SEC_BD_W0_C_GRAN_SIZE_19_16_M;
665
666         req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_21_20_M;
667         req->w0 |= ((el_size >> 20) << SEC_BD_W0_C_GRAN_SIZE_21_20_S) &
668                 SEC_BD_W0_C_GRAN_SIZE_21_20_M;
669
670         /* Writing whole u32 so no need to take care of masking */
671         req->w2 = ((1 << SEC_BD_W2_GRAN_NUM_S) & SEC_BD_W2_GRAN_NUM_M) |
672                 ((el_size << SEC_BD_W2_C_GRAN_SIZE_15_0_S) &
673                  SEC_BD_W2_C_GRAN_SIZE_15_0_M);
674
675         req->w3 &= ~SEC_BD_W3_CIPHER_LEN_OFFSET_M;
676         req->w1 |= SEC_BD_W1_ADDR_TYPE;
677
678         el->sgl_in = sgl_in;
679
680         ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in,
681                                         n_ents_in, info);
682         if (ret)
683                 goto err_free_el;
684
685         req->data_addr_lo = lower_32_bits(el->dma_in);
686         req->data_addr_hi = upper_32_bits(el->dma_in);
687
688         if (different_dest) {
689                 el->sgl_out = sgl_out;
690                 ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out,
691                                                 el->sgl_out,
692                                                 n_ents_out, info);
693                 if (ret)
694                         goto err_free_hw_sgl_in;
695
696                 req->w0 |= SEC_BD_W0_DE;
697                 req->cipher_destin_addr_lo = lower_32_bits(el->dma_out);
698                 req->cipher_destin_addr_hi = upper_32_bits(el->dma_out);
699
700         } else {
701                 req->w0 &= ~SEC_BD_W0_DE;
702                 req->cipher_destin_addr_lo = lower_32_bits(el->dma_in);
703                 req->cipher_destin_addr_hi = upper_32_bits(el->dma_in);
704         }
705
706         return el;
707
708 err_free_hw_sgl_in:
709         sec_free_hw_sgl(el->in, el->dma_in, info);
710 err_free_el:
711         kfree(el);
712
713         return ERR_PTR(ret);
714 }
715
716 static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
717                                    bool encrypt)
718 {
719         struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
720         struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
721         struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
722         struct sec_queue *queue = ctx->queue;
723         struct sec_request *sec_req = skcipher_request_ctx(skreq);
724         struct sec_dev_info *info = queue->dev_info;
725         int i, ret, steps;
726         size_t *split_sizes;
727         struct scatterlist **splits_in;
728         struct scatterlist **splits_out = NULL;
729         int *splits_in_nents;
730         int *splits_out_nents = NULL;
731         struct sec_request_el *el, *temp;
732         bool split = skreq->src != skreq->dst;
733
734         mutex_init(&sec_req->lock);
735         sec_req->req_base = &skreq->base;
736         sec_req->err = 0;
737         /* SGL mapping out here to allow us to break it up as necessary */
738         sec_req->len_in = sg_nents(skreq->src);
739
740         ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes,
741                                                  &steps);
742         if (ret)
743                 return ret;
744         sec_req->num_elements = steps;
745         ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in,
746                                    &splits_in_nents, sec_req->len_in,
747                                    info->dev);
748         if (ret)
749                 goto err_free_split_sizes;
750
751         if (split) {
752                 sec_req->len_out = sg_nents(skreq->dst);
753                 ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
754                                            &splits_out, &splits_out_nents,
755                                            sec_req->len_out, info->dev);
756                 if (ret)
757                         goto err_unmap_in_sg;
758         }
759         /* Shared info stored in seq_req - applies to all BDs */
760         sec_req->tfm_ctx = ctx;
761         sec_req->cb = sec_skcipher_alg_callback;
762         INIT_LIST_HEAD(&sec_req->elements);
763
764         /*
765          * Future optimization.
766          * In the chaining case we can't use a dma pool bounce buffer
767          * but in the case where we know there is no chaining we can
768          */
769         if (crypto_skcipher_ivsize(atfm)) {
770                 sec_req->dma_iv = dma_map_single(info->dev, skreq->iv,
771                                                  crypto_skcipher_ivsize(atfm),
772                                                  DMA_TO_DEVICE);
773                 if (dma_mapping_error(info->dev, sec_req->dma_iv)) {
774                         ret = -ENOMEM;
775                         goto err_unmap_out_sg;
776                 }
777         }
778
779         /* Set them all up then queue - cleaner error handling. */
780         for (i = 0; i < steps; i++) {
781                 el = sec_alg_alloc_and_fill_el(&ctx->req_template,
782                                                encrypt ? 1 : 0,
783                                                split_sizes[i],
784                                                skreq->src != skreq->dst,
785                                                splits_in[i], splits_in_nents[i],
786                                                split ? splits_out[i] : NULL,
787                                                split ? splits_out_nents[i] : 0,
788                                                info);
789                 if (IS_ERR(el)) {
790                         ret = PTR_ERR(el);
791                         goto err_free_elements;
792                 }
793                 el->req.cipher_iv_addr_lo = lower_32_bits(sec_req->dma_iv);
794                 el->req.cipher_iv_addr_hi = upper_32_bits(sec_req->dma_iv);
795                 el->sec_req = sec_req;
796                 list_add_tail(&el->head, &sec_req->elements);
797         }
798
799         /*
800          * Only attempt to queue if the whole lot can fit in the queue -
801          * we can't successfully cleanup after a partial queing so this
802          * must succeed or fail atomically.
803          *
804          * Big hammer test of both software and hardware queues - could be
805          * more refined but this is unlikely to happen so no need.
806          */
807
808         /* Grab a big lock for a long time to avoid concurrency issues */
809         mutex_lock(&queue->queuelock);
810
811         /*
812          * Can go on to queue if we have space in either:
813          * 1) The hardware queue and no software queue
814          * 2) The software queue
815          * AND there is nothing in the backlog.  If there is backlog we
816          * have to only queue to the backlog queue and return busy.
817          */
818         if ((!sec_queue_can_enqueue(queue, steps) &&
819              (!queue->havesoftqueue ||
820               kfifo_avail(&queue->softqueue) > steps)) ||
821             !list_empty(&ctx->backlog)) {
822                 ret = -EBUSY;
823                 if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
824                         list_add_tail(&sec_req->backlog_head, &ctx->backlog);
825                         mutex_unlock(&queue->queuelock);
826                         goto out;
827                 }
828
829                 mutex_unlock(&queue->queuelock);
830                 goto err_free_elements;
831         }
832         ret = sec_send_request(sec_req, queue);
833         mutex_unlock(&queue->queuelock);
834         if (ret)
835                 goto err_free_elements;
836
837         ret = -EINPROGRESS;
838 out:
839         /* Cleanup - all elements in pointer arrays have been copied */
840         kfree(splits_in_nents);
841         kfree(splits_in);
842         kfree(splits_out_nents);
843         kfree(splits_out);
844         kfree(split_sizes);
845         return ret;
846
847 err_free_elements:
848         list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
849                 list_del(&el->head);
850                 sec_alg_free_el(el, info);
851         }
852         if (crypto_skcipher_ivsize(atfm))
853                 dma_unmap_single(info->dev, sec_req->dma_iv,
854                                  crypto_skcipher_ivsize(atfm),
855                                  DMA_BIDIRECTIONAL);
856 err_unmap_out_sg:
857         if (split)
858                 sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
859                                     splits_out_nents, sec_req->len_out,
860                                     info->dev);
861 err_unmap_in_sg:
862         sec_unmap_sg_on_err(skreq->src, steps, splits_in, splits_in_nents,
863                             sec_req->len_in, info->dev);
864 err_free_split_sizes:
865         kfree(split_sizes);
866
867         return ret;
868 }
869
870 static int sec_alg_skcipher_encrypt(struct skcipher_request *req)
871 {
872         return sec_alg_skcipher_crypto(req, true);
873 }
874
875 static int sec_alg_skcipher_decrypt(struct skcipher_request *req)
876 {
877         return sec_alg_skcipher_crypto(req, false);
878 }
879
880 static int sec_alg_skcipher_init(struct crypto_skcipher *tfm)
881 {
882         struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
883
884         mutex_init(&ctx->lock);
885         INIT_LIST_HEAD(&ctx->backlog);
886         crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_request));
887
888         ctx->queue = sec_queue_alloc_start_safe();
889         if (IS_ERR(ctx->queue))
890                 return PTR_ERR(ctx->queue);
891
892         mutex_init(&ctx->queue->queuelock);
893         ctx->queue->havesoftqueue = false;
894
895         return 0;
896 }
897
898 static void sec_alg_skcipher_exit(struct crypto_skcipher *tfm)
899 {
900         struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
901         struct device *dev = ctx->queue->dev_info->dev;
902
903         if (ctx->key) {
904                 memzero_explicit(ctx->key, SEC_MAX_CIPHER_KEY);
905                 dma_free_coherent(dev, SEC_MAX_CIPHER_KEY, ctx->key,
906                                   ctx->pkey);
907         }
908         sec_queue_stop_release(ctx->queue);
909 }
910
911 static int sec_alg_skcipher_init_with_queue(struct crypto_skcipher *tfm)
912 {
913         struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
914         int ret;
915
916         ret = sec_alg_skcipher_init(tfm);
917         if (ret)
918                 return ret;
919
920         INIT_KFIFO(ctx->queue->softqueue);
921         ret = kfifo_alloc(&ctx->queue->softqueue, 512, GFP_KERNEL);
922         if (ret) {
923                 sec_alg_skcipher_exit(tfm);
924                 return ret;
925         }
926         ctx->queue->havesoftqueue = true;
927
928         return 0;
929 }
930
931 static void sec_alg_skcipher_exit_with_queue(struct crypto_skcipher *tfm)
932 {
933         struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
934
935         kfifo_free(&ctx->queue->softqueue);
936         sec_alg_skcipher_exit(tfm);
937 }
938
939 static struct skcipher_alg sec_algs[] = {
940         {
941                 .base = {
942                         .cra_name = "ecb(aes)",
943                         .cra_driver_name = "hisi_sec_aes_ecb",
944                         .cra_priority = 4001,
945                         .cra_flags = CRYPTO_ALG_ASYNC,
946                         .cra_blocksize = AES_BLOCK_SIZE,
947                         .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
948                         .cra_alignmask = 0,
949                         .cra_module = THIS_MODULE,
950                 },
951                 .init = sec_alg_skcipher_init,
952                 .exit = sec_alg_skcipher_exit,
953                 .setkey = sec_alg_skcipher_setkey_aes_ecb,
954                 .decrypt = sec_alg_skcipher_decrypt,
955                 .encrypt = sec_alg_skcipher_encrypt,
956                 .min_keysize = AES_MIN_KEY_SIZE,
957                 .max_keysize = AES_MAX_KEY_SIZE,
958                 .ivsize = 0,
959         }, {
960                 .base = {
961                         .cra_name = "cbc(aes)",
962                         .cra_driver_name = "hisi_sec_aes_cbc",
963                         .cra_priority = 4001,
964                         .cra_flags = CRYPTO_ALG_ASYNC,
965                         .cra_blocksize = AES_BLOCK_SIZE,
966                         .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
967                         .cra_alignmask = 0,
968                         .cra_module = THIS_MODULE,
969                 },
970                 .init = sec_alg_skcipher_init_with_queue,
971                 .exit = sec_alg_skcipher_exit_with_queue,
972                 .setkey = sec_alg_skcipher_setkey_aes_cbc,
973                 .decrypt = sec_alg_skcipher_decrypt,
974                 .encrypt = sec_alg_skcipher_encrypt,
975                 .min_keysize = AES_MIN_KEY_SIZE,
976                 .max_keysize = AES_MAX_KEY_SIZE,
977                 .ivsize = AES_BLOCK_SIZE,
978         }, {
979                 .base = {
980                         .cra_name = "ctr(aes)",
981                         .cra_driver_name = "hisi_sec_aes_ctr",
982                         .cra_priority = 4001,
983                         .cra_flags = CRYPTO_ALG_ASYNC,
984                         .cra_blocksize = AES_BLOCK_SIZE,
985                         .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
986                         .cra_alignmask = 0,
987                         .cra_module = THIS_MODULE,
988                 },
989                 .init = sec_alg_skcipher_init_with_queue,
990                 .exit = sec_alg_skcipher_exit_with_queue,
991                 .setkey = sec_alg_skcipher_setkey_aes_ctr,
992                 .decrypt = sec_alg_skcipher_decrypt,
993                 .encrypt = sec_alg_skcipher_encrypt,
994                 .min_keysize = AES_MIN_KEY_SIZE,
995                 .max_keysize = AES_MAX_KEY_SIZE,
996                 .ivsize = AES_BLOCK_SIZE,
997         }, {
998                 .base = {
999                         .cra_name = "xts(aes)",
1000                         .cra_driver_name = "hisi_sec_aes_xts",
1001                         .cra_priority = 4001,
1002                         .cra_flags = CRYPTO_ALG_ASYNC,
1003                         .cra_blocksize = AES_BLOCK_SIZE,
1004                         .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1005                         .cra_alignmask = 0,
1006                         .cra_module = THIS_MODULE,
1007                 },
1008                 .init = sec_alg_skcipher_init,
1009                 .exit = sec_alg_skcipher_exit,
1010                 .setkey = sec_alg_skcipher_setkey_aes_xts,
1011                 .decrypt = sec_alg_skcipher_decrypt,
1012                 .encrypt = sec_alg_skcipher_encrypt,
1013                 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1014                 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1015                 .ivsize = AES_BLOCK_SIZE,
1016         }, {
1017         /* Unable to find any test vectors so untested */
1018                 .base = {
1019                         .cra_name = "ecb(des)",
1020                         .cra_driver_name = "hisi_sec_des_ecb",
1021                         .cra_priority = 4001,
1022                         .cra_flags = CRYPTO_ALG_ASYNC,
1023                         .cra_blocksize = DES_BLOCK_SIZE,
1024                         .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1025                         .cra_alignmask = 0,
1026                         .cra_module = THIS_MODULE,
1027                 },
1028                 .init = sec_alg_skcipher_init,
1029                 .exit = sec_alg_skcipher_exit,
1030                 .setkey = sec_alg_skcipher_setkey_des_ecb,
1031                 .decrypt = sec_alg_skcipher_decrypt,
1032                 .encrypt = sec_alg_skcipher_encrypt,
1033                 .min_keysize = DES_KEY_SIZE,
1034                 .max_keysize = DES_KEY_SIZE,
1035                 .ivsize = 0,
1036         }, {
1037                 .base = {
1038                         .cra_name = "cbc(des)",
1039                         .cra_driver_name = "hisi_sec_des_cbc",
1040                         .cra_priority = 4001,
1041                         .cra_flags = CRYPTO_ALG_ASYNC,
1042                         .cra_blocksize = DES_BLOCK_SIZE,
1043                         .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1044                         .cra_alignmask = 0,
1045                         .cra_module = THIS_MODULE,
1046                 },
1047                 .init = sec_alg_skcipher_init_with_queue,
1048                 .exit = sec_alg_skcipher_exit_with_queue,
1049                 .setkey = sec_alg_skcipher_setkey_des_cbc,
1050                 .decrypt = sec_alg_skcipher_decrypt,
1051                 .encrypt = sec_alg_skcipher_encrypt,
1052                 .min_keysize = DES_KEY_SIZE,
1053                 .max_keysize = DES_KEY_SIZE,
1054                 .ivsize = DES_BLOCK_SIZE,
1055         }, {
1056                 .base = {
1057                         .cra_name = "cbc(des3_ede)",
1058                         .cra_driver_name = "hisi_sec_3des_cbc",
1059                         .cra_priority = 4001,
1060                         .cra_flags = CRYPTO_ALG_ASYNC,
1061                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1062                         .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1063                         .cra_alignmask = 0,
1064                         .cra_module = THIS_MODULE,
1065                 },
1066                 .init = sec_alg_skcipher_init_with_queue,
1067                 .exit = sec_alg_skcipher_exit_with_queue,
1068                 .setkey = sec_alg_skcipher_setkey_3des_cbc,
1069                 .decrypt = sec_alg_skcipher_decrypt,
1070                 .encrypt = sec_alg_skcipher_encrypt,
1071                 .min_keysize = DES3_EDE_KEY_SIZE,
1072                 .max_keysize = DES3_EDE_KEY_SIZE,
1073                 .ivsize = DES3_EDE_BLOCK_SIZE,
1074         }, {
1075                 .base = {
1076                         .cra_name = "ecb(des3_ede)",
1077                         .cra_driver_name = "hisi_sec_3des_ecb",
1078                         .cra_priority = 4001,
1079                         .cra_flags = CRYPTO_ALG_ASYNC,
1080                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1081                         .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1082                         .cra_alignmask = 0,
1083                         .cra_module = THIS_MODULE,
1084                 },
1085                 .init = sec_alg_skcipher_init,
1086                 .exit = sec_alg_skcipher_exit,
1087                 .setkey = sec_alg_skcipher_setkey_3des_ecb,
1088                 .decrypt = sec_alg_skcipher_decrypt,
1089                 .encrypt = sec_alg_skcipher_encrypt,
1090                 .min_keysize = DES3_EDE_KEY_SIZE,
1091                 .max_keysize = DES3_EDE_KEY_SIZE,
1092                 .ivsize = 0,
1093         }
1094 };
1095
1096 int sec_algs_register(void)
1097 {
1098         int ret = 0;
1099
1100         mutex_lock(&algs_lock);
1101         if (++active_devs != 1)
1102                 goto unlock;
1103
1104         ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
1105         if (ret)
1106                 --active_devs;
1107 unlock:
1108         mutex_unlock(&algs_lock);
1109
1110         return ret;
1111 }
1112
1113 void sec_algs_unregister(void)
1114 {
1115         mutex_lock(&algs_lock);
1116         if (--active_devs != 0)
1117                 goto unlock;
1118         crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
1119
1120 unlock:
1121         mutex_unlock(&algs_lock);
1122 }