Linux-libre 5.4.47-gnu
[librecmc/linux-libre.git] / drivers / crypto / nx / nx-aes-xcbc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /**
3  * AES XCBC routines supporting the Power 7+ Nest Accelerators driver
4  *
5  * Copyright (C) 2011-2012 International Business Machines Inc.
6  *
7  * Author: Kent Yoder <yoder1@us.ibm.com>
8  */
9
10 #include <crypto/internal/hash.h>
11 #include <crypto/aes.h>
12 #include <crypto/algapi.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/crypto.h>
16 #include <asm/vio.h>
17
18 #include "nx_csbcpb.h"
19 #include "nx.h"
20
21
22 struct xcbc_state {
23         u8 state[AES_BLOCK_SIZE];
24         unsigned int count;
25         u8 buffer[AES_BLOCK_SIZE];
26 };
27
28 static int nx_xcbc_set_key(struct crypto_shash *desc,
29                            const u8            *in_key,
30                            unsigned int         key_len)
31 {
32         struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc);
33         struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
34
35         switch (key_len) {
36         case AES_KEYSIZE_128:
37                 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
38                 break;
39         default:
40                 return -EINVAL;
41         }
42
43         memcpy(csbcpb->cpb.aes_xcbc.key, in_key, key_len);
44
45         return 0;
46 }
47
48 /*
49  * Based on RFC 3566, for a zero-length message:
50  *
51  * n = 1
52  * K1 = E(K, 0x01010101010101010101010101010101)
53  * K3 = E(K, 0x03030303030303030303030303030303)
54  * E[0] = 0x00000000000000000000000000000000
55  * M[1] = 0x80000000000000000000000000000000 (0 length message with padding)
56  * E[1] = (K1, M[1] ^ E[0] ^ K3)
57  * Tag = M[1]
58  */
59 static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
60 {
61         struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
62         struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
63         struct nx_sg *in_sg, *out_sg;
64         u8 keys[2][AES_BLOCK_SIZE];
65         u8 key[32];
66         int rc = 0;
67         int len;
68
69         /* Change to ECB mode */
70         csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
71         memcpy(key, csbcpb->cpb.aes_xcbc.key, AES_BLOCK_SIZE);
72         memcpy(csbcpb->cpb.aes_ecb.key, key, AES_BLOCK_SIZE);
73         NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
74
75         /* K1 and K3 base patterns */
76         memset(keys[0], 0x01, sizeof(keys[0]));
77         memset(keys[1], 0x03, sizeof(keys[1]));
78
79         len = sizeof(keys);
80         /* Generate K1 and K3 encrypting the patterns */
81         in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, &len,
82                                  nx_ctx->ap->sglen);
83
84         if (len != sizeof(keys))
85                 return -EINVAL;
86
87         out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, &len,
88                                   nx_ctx->ap->sglen);
89
90         if (len != sizeof(keys))
91                 return -EINVAL;
92
93         nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
94         nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
95
96         rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
97         if (rc)
98                 goto out;
99         atomic_inc(&(nx_ctx->stats->aes_ops));
100
101         /* XOr K3 with the padding for a 0 length message */
102         keys[1][0] ^= 0x80;
103
104         len = sizeof(keys[1]);
105
106         /* Encrypt the final result */
107         memcpy(csbcpb->cpb.aes_ecb.key, keys[0], AES_BLOCK_SIZE);
108         in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], &len,
109                                  nx_ctx->ap->sglen);
110
111         if (len != sizeof(keys[1]))
112                 return -EINVAL;
113
114         len = AES_BLOCK_SIZE;
115         out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
116                                   nx_ctx->ap->sglen);
117
118         if (len != AES_BLOCK_SIZE)
119                 return -EINVAL;
120
121         nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
122         nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
123
124         rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
125         if (rc)
126                 goto out;
127         atomic_inc(&(nx_ctx->stats->aes_ops));
128
129 out:
130         /* Restore XCBC mode */
131         csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
132         memcpy(csbcpb->cpb.aes_xcbc.key, key, AES_BLOCK_SIZE);
133         NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
134
135         return rc;
136 }
137
138 static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm)
139 {
140         struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
141         struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
142         int err;
143
144         err = nx_crypto_ctx_aes_xcbc_init(tfm);
145         if (err)
146                 return err;
147
148         nx_ctx_init(nx_ctx, HCOP_FC_AES);
149
150         NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
151         csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
152
153         return 0;
154 }
155
156 static int nx_xcbc_init(struct shash_desc *desc)
157 {
158         struct xcbc_state *sctx = shash_desc_ctx(desc);
159
160         memset(sctx, 0, sizeof *sctx);
161
162         return 0;
163 }
164
165 static int nx_xcbc_update(struct shash_desc *desc,
166                           const u8          *data,
167                           unsigned int       len)
168 {
169         struct xcbc_state *sctx = shash_desc_ctx(desc);
170         struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
171         struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
172         struct nx_sg *in_sg;
173         struct nx_sg *out_sg;
174         u32 to_process = 0, leftover, total;
175         unsigned int max_sg_len;
176         unsigned long irq_flags;
177         int rc = 0;
178         int data_len;
179
180         spin_lock_irqsave(&nx_ctx->lock, irq_flags);
181
182
183         total = sctx->count + len;
184
185         /* 2 cases for total data len:
186          *  1: <= AES_BLOCK_SIZE: copy into state, return 0
187          *  2: > AES_BLOCK_SIZE: process X blocks, copy in leftover
188          */
189         if (total <= AES_BLOCK_SIZE) {
190                 memcpy(sctx->buffer + sctx->count, data, len);
191                 sctx->count += len;
192                 goto out;
193         }
194
195         in_sg = nx_ctx->in_sg;
196         max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
197                                 nx_ctx->ap->sglen);
198         max_sg_len = min_t(u64, max_sg_len,
199                                 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
200
201         data_len = AES_BLOCK_SIZE;
202         out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
203                                   &len, nx_ctx->ap->sglen);
204
205         if (data_len != AES_BLOCK_SIZE) {
206                 rc = -EINVAL;
207                 goto out;
208         }
209
210         nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
211
212         do {
213                 to_process = total - to_process;
214                 to_process = to_process & ~(AES_BLOCK_SIZE - 1);
215
216                 leftover = total - to_process;
217
218                 /* the hardware will not accept a 0 byte operation for this
219                  * algorithm and the operation MUST be finalized to be correct.
220                  * So if we happen to get an update that falls on a block sized
221                  * boundary, we must save off the last block to finalize with
222                  * later. */
223                 if (!leftover) {
224                         to_process -= AES_BLOCK_SIZE;
225                         leftover = AES_BLOCK_SIZE;
226                 }
227
228                 if (sctx->count) {
229                         data_len = sctx->count;
230                         in_sg = nx_build_sg_list(nx_ctx->in_sg,
231                                                 (u8 *) sctx->buffer,
232                                                 &data_len,
233                                                 max_sg_len);
234                         if (data_len != sctx->count) {
235                                 rc = -EINVAL;
236                                 goto out;
237                         }
238                 }
239
240                 data_len = to_process - sctx->count;
241                 in_sg = nx_build_sg_list(in_sg,
242                                         (u8 *) data,
243                                         &data_len,
244                                         max_sg_len);
245
246                 if (data_len != to_process - sctx->count) {
247                         rc = -EINVAL;
248                         goto out;
249                 }
250
251                 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
252                                         sizeof(struct nx_sg);
253
254                 /* we've hit the nx chip previously and we're updating again,
255                  * so copy over the partial digest */
256                 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
257                         memcpy(csbcpb->cpb.aes_xcbc.cv,
258                                 csbcpb->cpb.aes_xcbc.out_cv_mac,
259                                 AES_BLOCK_SIZE);
260                 }
261
262                 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
263                 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
264                         rc = -EINVAL;
265                         goto out;
266                 }
267
268                 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
269                 if (rc)
270                         goto out;
271
272                 atomic_inc(&(nx_ctx->stats->aes_ops));
273
274                 /* everything after the first update is continuation */
275                 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
276
277                 total -= to_process;
278                 data += to_process - sctx->count;
279                 sctx->count = 0;
280                 in_sg = nx_ctx->in_sg;
281         } while (leftover > AES_BLOCK_SIZE);
282
283         /* copy the leftover back into the state struct */
284         memcpy(sctx->buffer, data, leftover);
285         sctx->count = leftover;
286
287 out:
288         spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
289         return rc;
290 }
291
292 static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
293 {
294         struct xcbc_state *sctx = shash_desc_ctx(desc);
295         struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
296         struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
297         struct nx_sg *in_sg, *out_sg;
298         unsigned long irq_flags;
299         int rc = 0;
300         int len;
301
302         spin_lock_irqsave(&nx_ctx->lock, irq_flags);
303
304         if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
305                 /* we've hit the nx chip previously, now we're finalizing,
306                  * so copy over the partial digest */
307                 memcpy(csbcpb->cpb.aes_xcbc.cv,
308                        csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
309         } else if (sctx->count == 0) {
310                 /*
311                  * we've never seen an update, so this is a 0 byte op. The
312                  * hardware cannot handle a 0 byte op, so just ECB to
313                  * generate the hash.
314                  */
315                 rc = nx_xcbc_empty(desc, out);
316                 goto out;
317         }
318
319         /* final is represented by continuing the operation and indicating that
320          * this is not an intermediate operation */
321         NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
322
323         len = sctx->count;
324         in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
325                                  &len, nx_ctx->ap->sglen);
326
327         if (len != sctx->count) {
328                 rc = -EINVAL;
329                 goto out;
330         }
331
332         len = AES_BLOCK_SIZE;
333         out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
334                                   nx_ctx->ap->sglen);
335
336         if (len != AES_BLOCK_SIZE) {
337                 rc = -EINVAL;
338                 goto out;
339         }
340
341         nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
342         nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
343
344         if (!nx_ctx->op.outlen) {
345                 rc = -EINVAL;
346                 goto out;
347         }
348
349         rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
350         if (rc)
351                 goto out;
352
353         atomic_inc(&(nx_ctx->stats->aes_ops));
354
355         memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
356 out:
357         spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
358         return rc;
359 }
360
361 struct shash_alg nx_shash_aes_xcbc_alg = {
362         .digestsize = AES_BLOCK_SIZE,
363         .init       = nx_xcbc_init,
364         .update     = nx_xcbc_update,
365         .final      = nx_xcbc_final,
366         .setkey     = nx_xcbc_set_key,
367         .descsize   = sizeof(struct xcbc_state),
368         .statesize  = sizeof(struct xcbc_state),
369         .base       = {
370                 .cra_name        = "xcbc(aes)",
371                 .cra_driver_name = "xcbc-aes-nx",
372                 .cra_priority    = 300,
373                 .cra_blocksize   = AES_BLOCK_SIZE,
374                 .cra_module      = THIS_MODULE,
375                 .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
376                 .cra_init        = nx_crypto_ctx_aes_xcbc_init2,
377                 .cra_exit        = nx_crypto_ctx_exit,
378         }
379 };