1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
5 * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
7 * This file add support for AES cipher with 128,192,256 bits
8 * keysize in CBC and ECB mode.
9 * Add support also for DES and 3DES in CBC and ECB mode.
11 * You could find the datasheet in Documentation/arm/sunxi.rst
15 static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
17 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
18 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
19 struct sun4i_ss_ctx *ss = op->ss;
20 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
21 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
23 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
24 u32 rx_cnt = SS_RX_DEFAULT;
30 unsigned int ileft = areq->cryptlen;
31 unsigned int oleft = areq->cryptlen;
33 struct sg_mapping_iter mi, mo;
34 unsigned int oi, oo; /* offset for in and out */
40 if (!areq->src || !areq->dst) {
41 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
45 spin_lock_irqsave(&ss->slock, flags);
47 for (i = 0; i < op->keylen; i += 4)
48 writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
51 for (i = 0; i < 4 && i < ivsize / 4; i++) {
52 v = *(u32 *)(areq->iv + i * 4);
53 writel(v, ss->base + SS_IV0 + i * 4);
56 writel(mode, ss->base + SS_CTL);
58 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
59 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
60 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
61 SG_MITER_TO_SG | SG_MITER_ATOMIC);
64 if (!mi.addr || !mo.addr) {
65 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
70 ileft = areq->cryptlen / 4;
71 oleft = areq->cryptlen / 4;
75 todo = min(rx_cnt, ileft);
76 todo = min_t(size_t, todo, (mi.length - oi) / 4);
79 writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
82 if (oi == mi.length) {
87 spaces = readl(ss->base + SS_FCSR);
88 rx_cnt = SS_RXFIFO_SPACES(spaces);
89 tx_cnt = SS_TXFIFO_SPACES(spaces);
91 todo = min(tx_cnt, oleft);
92 todo = min_t(size_t, todo, (mo.length - oo) / 4);
95 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
98 if (oo == mo.length) {
105 for (i = 0; i < 4 && i < ivsize / 4; i++) {
106 v = readl(ss->base + SS_IV0 + i * 4);
107 *(u32 *)(areq->iv + i * 4) = v;
114 writel(0, ss->base + SS_CTL);
115 spin_unlock_irqrestore(&ss->slock, flags);
120 static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_request *areq)
122 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
123 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
124 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
125 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
128 skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
129 skcipher_request_set_callback(subreq, areq->base.flags, NULL,
131 skcipher_request_set_crypt(subreq, areq->src, areq->dst,
132 areq->cryptlen, areq->iv);
133 if (ctx->mode & SS_DECRYPTION)
134 err = crypto_skcipher_decrypt(subreq);
136 err = crypto_skcipher_encrypt(subreq);
137 skcipher_request_zero(subreq);
142 /* Generic function that support SG with size not multiple of 4 */
143 static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
145 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
146 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
147 struct sun4i_ss_ctx *ss = op->ss;
149 struct scatterlist *in_sg = areq->src;
150 struct scatterlist *out_sg = areq->dst;
151 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
152 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
153 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
154 struct sun4i_ss_alg_template *algt;
155 u32 mode = ctx->mode;
156 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
157 u32 rx_cnt = SS_RX_DEFAULT;
163 unsigned int ileft = areq->cryptlen;
164 unsigned int oleft = areq->cryptlen;
166 struct sg_mapping_iter mi, mo;
167 unsigned int oi, oo; /* offset for in and out */
168 unsigned int ob = 0; /* offset in buf */
169 unsigned int obo = 0; /* offset in bufo*/
170 unsigned int obl = 0; /* length of data in bufo */
177 if (!areq->src || !areq->dst) {
178 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
182 algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
183 if (areq->cryptlen % algt->alg.crypto.base.cra_blocksize)
184 need_fallback = true;
187 * if we have only SGs with size multiple of 4,
188 * we can use the SS optimized function
190 while (in_sg && no_chunk == 1) {
191 if (in_sg->length % 4)
193 in_sg = sg_next(in_sg);
195 while (out_sg && no_chunk == 1) {
196 if (out_sg->length % 4)
198 out_sg = sg_next(out_sg);
201 if (no_chunk == 1 && !need_fallback)
202 return sun4i_ss_opti_poll(areq);
205 return sun4i_ss_cipher_poll_fallback(areq);
207 spin_lock_irqsave(&ss->slock, flags);
209 for (i = 0; i < op->keylen; i += 4)
210 writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
213 for (i = 0; i < 4 && i < ivsize / 4; i++) {
214 v = *(u32 *)(areq->iv + i * 4);
215 writel(v, ss->base + SS_IV0 + i * 4);
218 writel(mode, ss->base + SS_CTL);
220 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
221 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
222 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
223 SG_MITER_TO_SG | SG_MITER_ATOMIC);
226 if (!mi.addr || !mo.addr) {
227 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
231 ileft = areq->cryptlen;
232 oleft = areq->cryptlen;
238 char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
241 * todo is the number of consecutive 4byte word that we
242 * can read from current SG
244 todo = min(rx_cnt, ileft / 4);
245 todo = min_t(size_t, todo, (mi.length - oi) / 4);
247 writesl(ss->base + SS_RXFIFO, mi.addr + oi,
253 * not enough consecutive bytes, so we need to
254 * linearize in buf. todo is in bytes
255 * After that copy, if we have a multiple of 4
256 * we need to be able to write all buf in one
257 * pass, so it is why we min() with rx_cnt
259 todo = min(rx_cnt * 4 - ob, ileft);
260 todo = min_t(size_t, todo, mi.length - oi);
261 memcpy(buf + ob, mi.addr + oi, todo);
266 writesl(ss->base + SS_RXFIFO, buf,
271 if (oi == mi.length) {
277 spaces = readl(ss->base + SS_FCSR);
278 rx_cnt = SS_RXFIFO_SPACES(spaces);
279 tx_cnt = SS_TXFIFO_SPACES(spaces);
281 "%x %u/%zu %u/%u cnt=%u %u/%zu %u/%u cnt=%u %u\n",
283 oi, mi.length, ileft, areq->cryptlen, rx_cnt,
284 oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);
288 /* todo in 4bytes word */
289 todo = min(tx_cnt, oleft / 4);
290 todo = min_t(size_t, todo, (mo.length - oo) / 4);
292 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
295 if (oo == mo.length) {
300 char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
303 * read obl bytes in bufo, we read at maximum for
304 * emptying the device
306 readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
311 * how many bytes we can copy ?
312 * no more than remaining SG size
313 * no more than remaining buffer
314 * no need to test against oleft
317 mo.length - oo, obl - obo);
318 memcpy(mo.addr + oo, bufo + obo, todo);
322 if (oo == mo.length) {
327 /* bufo must be fully used here */
331 for (i = 0; i < 4 && i < ivsize / 4; i++) {
332 v = readl(ss->base + SS_IV0 + i * 4);
333 *(u32 *)(areq->iv + i * 4) = v;
340 writel(0, ss->base + SS_CTL);
341 spin_unlock_irqrestore(&ss->slock, flags);
347 int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq)
349 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
350 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
351 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
353 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
355 return sun4i_ss_cipher_poll(areq);
358 int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq)
360 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
361 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
362 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
364 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
366 return sun4i_ss_cipher_poll(areq);
370 int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq)
372 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
373 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
374 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
376 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
378 return sun4i_ss_cipher_poll(areq);
381 int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq)
383 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
384 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
385 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
387 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
389 return sun4i_ss_cipher_poll(areq);
393 int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq)
395 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
396 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
397 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
399 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
401 return sun4i_ss_cipher_poll(areq);
404 int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq)
406 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
407 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
408 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
410 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
412 return sun4i_ss_cipher_poll(areq);
416 int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq)
418 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
419 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
420 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
422 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
424 return sun4i_ss_cipher_poll(areq);
427 int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq)
429 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
430 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
431 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
433 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
435 return sun4i_ss_cipher_poll(areq);
439 int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq)
441 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
442 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
443 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
445 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
447 return sun4i_ss_cipher_poll(areq);
450 int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq)
452 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
453 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
454 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
456 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
458 return sun4i_ss_cipher_poll(areq);
462 int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq)
464 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
465 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
466 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
468 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
470 return sun4i_ss_cipher_poll(areq);
473 int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq)
475 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
476 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
477 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
479 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
481 return sun4i_ss_cipher_poll(areq);
484 int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
486 struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
487 struct sun4i_ss_alg_template *algt;
488 const char *name = crypto_tfm_alg_name(tfm);
490 memset(op, 0, sizeof(struct sun4i_tfm_ctx));
492 algt = container_of(tfm->__crt_alg, struct sun4i_ss_alg_template,
496 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
497 sizeof(struct sun4i_cipher_req_ctx));
499 op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
500 if (IS_ERR(op->fallback_tfm)) {
501 dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
502 name, PTR_ERR(op->fallback_tfm));
503 return PTR_ERR(op->fallback_tfm);
509 void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
511 struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
512 crypto_free_sync_skcipher(op->fallback_tfm);
515 /* check and set the AES key, prepare the mode to be used */
516 int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
519 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
520 struct sun4i_ss_ctx *ss = op->ss;
524 op->keymode = SS_AES_128BITS;
527 op->keymode = SS_AES_192BITS;
530 op->keymode = SS_AES_256BITS;
533 dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
534 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
538 memcpy(op->key, key, keylen);
540 crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
541 crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
543 return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
546 /* check and set the DES key, prepare the mode to be used */
547 int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
550 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
553 err = verify_skcipher_des_key(tfm, key);
558 memcpy(op->key, key, keylen);
560 crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
561 crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
563 return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
566 /* check and set the 3DES key, prepare the mode to be used */
567 int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
570 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
573 err = verify_skcipher_des3_key(tfm, key);
578 memcpy(op->key, key, keylen);
580 crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
581 crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
583 return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);