Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / crypto / nx / nx-sha512.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /**
3  * SHA-512 routines supporting the Power 7+ Nest Accelerators driver
4  *
5  * Copyright (C) 2011-2012 International Business Machines Inc.
6  *
7  * Author: Kent Yoder <yoder1@us.ibm.com>
8  */
9
10 #include <crypto/internal/hash.h>
11 #include <crypto/sha.h>
12 #include <linux/module.h>
13 #include <asm/vio.h>
14
15 #include "nx_csbcpb.h"
16 #include "nx.h"
17
18
19 static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
20 {
21         struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
22         int err;
23
24         err = nx_crypto_ctx_sha_init(tfm);
25         if (err)
26                 return err;
27
28         nx_ctx_init(nx_ctx, HCOP_FC_SHA);
29
30         nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
31
32         NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
33
34         return 0;
35 }
36
37 static int nx_sha512_init(struct shash_desc *desc)
38 {
39         struct sha512_state *sctx = shash_desc_ctx(desc);
40
41         memset(sctx, 0, sizeof *sctx);
42
43         sctx->state[0] = __cpu_to_be64(SHA512_H0);
44         sctx->state[1] = __cpu_to_be64(SHA512_H1);
45         sctx->state[2] = __cpu_to_be64(SHA512_H2);
46         sctx->state[3] = __cpu_to_be64(SHA512_H3);
47         sctx->state[4] = __cpu_to_be64(SHA512_H4);
48         sctx->state[5] = __cpu_to_be64(SHA512_H5);
49         sctx->state[6] = __cpu_to_be64(SHA512_H6);
50         sctx->state[7] = __cpu_to_be64(SHA512_H7);
51         sctx->count[0] = 0;
52
53         return 0;
54 }
55
56 static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
57                             unsigned int len)
58 {
59         struct sha512_state *sctx = shash_desc_ctx(desc);
60         struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
61         struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
62         struct nx_sg *out_sg;
63         u64 to_process, leftover = 0, total;
64         unsigned long irq_flags;
65         int rc = 0;
66         int data_len;
67         u32 max_sg_len;
68         u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
69
70         spin_lock_irqsave(&nx_ctx->lock, irq_flags);
71
72         /* 2 cases for total data len:
73          *  1: < SHA512_BLOCK_SIZE: copy into state, return 0
74          *  2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover
75          */
76         total = (sctx->count[0] % SHA512_BLOCK_SIZE) + len;
77         if (total < SHA512_BLOCK_SIZE) {
78                 memcpy(sctx->buf + buf_len, data, len);
79                 sctx->count[0] += len;
80                 goto out;
81         }
82
83         memcpy(csbcpb->cpb.sha512.message_digest, sctx->state, SHA512_DIGEST_SIZE);
84         NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
85         NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
86
87         max_sg_len = min_t(u64, nx_ctx->ap->sglen,
88                         nx_driver.of.max_sg_len/sizeof(struct nx_sg));
89         max_sg_len = min_t(u64, max_sg_len,
90                         nx_ctx->ap->databytelen/NX_PAGE_SIZE);
91
92         data_len = SHA512_DIGEST_SIZE;
93         out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
94                                   &data_len, max_sg_len);
95         nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
96
97         if (data_len != SHA512_DIGEST_SIZE) {
98                 rc = -EINVAL;
99                 goto out;
100         }
101
102         do {
103                 int used_sgs = 0;
104                 struct nx_sg *in_sg = nx_ctx->in_sg;
105
106                 if (buf_len) {
107                         data_len = buf_len;
108                         in_sg = nx_build_sg_list(in_sg,
109                                                  (u8 *) sctx->buf,
110                                                  &data_len, max_sg_len);
111
112                         if (data_len != buf_len) {
113                                 rc = -EINVAL;
114                                 goto out;
115                         }
116                         used_sgs = in_sg - nx_ctx->in_sg;
117                 }
118
119                 /* to_process: SHA512_BLOCK_SIZE aligned chunk to be
120                  * processed in this iteration. This value is restricted
121                  * by sg list limits and number of sgs we already used
122                  * for leftover data. (see above)
123                  * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
124                  * but because data may not be aligned, we need to account
125                  * for that too. */
126                 to_process = min_t(u64, total,
127                         (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
128                 to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
129
130                 data_len = to_process - buf_len;
131                 in_sg = nx_build_sg_list(in_sg, (u8 *) data,
132                                          &data_len, max_sg_len);
133
134                 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
135
136                 if (data_len != (to_process - buf_len)) {
137                         rc = -EINVAL;
138                         goto out;
139                 }
140
141                 to_process = data_len + buf_len;
142                 leftover = total - to_process;
143
144                 /*
145                  * we've hit the nx chip previously and we're updating
146                  * again, so copy over the partial digest.
147                  */
148                 memcpy(csbcpb->cpb.sha512.input_partial_digest,
149                                csbcpb->cpb.sha512.message_digest,
150                                SHA512_DIGEST_SIZE);
151
152                 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
153                         rc = -EINVAL;
154                         goto out;
155                 }
156
157                 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
158                 if (rc)
159                         goto out;
160
161                 atomic_inc(&(nx_ctx->stats->sha512_ops));
162
163                 total -= to_process;
164                 data += to_process - buf_len;
165                 buf_len = 0;
166
167         } while (leftover >= SHA512_BLOCK_SIZE);
168
169         /* copy the leftover back into the state struct */
170         if (leftover)
171                 memcpy(sctx->buf, data, leftover);
172         sctx->count[0] += len;
173         memcpy(sctx->state, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
174 out:
175         spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
176         return rc;
177 }
178
179 static int nx_sha512_final(struct shash_desc *desc, u8 *out)
180 {
181         struct sha512_state *sctx = shash_desc_ctx(desc);
182         struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
183         struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
184         struct nx_sg *in_sg, *out_sg;
185         u32 max_sg_len;
186         u64 count0;
187         unsigned long irq_flags;
188         int rc = 0;
189         int len;
190
191         spin_lock_irqsave(&nx_ctx->lock, irq_flags);
192
193         max_sg_len = min_t(u64, nx_ctx->ap->sglen,
194                         nx_driver.of.max_sg_len/sizeof(struct nx_sg));
195         max_sg_len = min_t(u64, max_sg_len,
196                         nx_ctx->ap->databytelen/NX_PAGE_SIZE);
197
198         /* final is represented by continuing the operation and indicating that
199          * this is not an intermediate operation */
200         if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
201                 /* we've hit the nx chip previously, now we're finalizing,
202                  * so copy over the partial digest */
203                 memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state,
204                                                         SHA512_DIGEST_SIZE);
205                 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
206                 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
207         } else {
208                 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
209                 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
210         }
211
212         NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
213
214         count0 = sctx->count[0] * 8;
215
216         csbcpb->cpb.sha512.message_bit_length_lo = count0;
217
218         len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
219         in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len,
220                                  max_sg_len);
221
222         if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) {
223                 rc = -EINVAL;
224                 goto out;
225         }
226
227         len = SHA512_DIGEST_SIZE;
228         out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
229                                  max_sg_len);
230
231         nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
232         nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
233
234         if (!nx_ctx->op.outlen) {
235                 rc = -EINVAL;
236                 goto out;
237         }
238
239         rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
240         if (rc)
241                 goto out;
242
243         atomic_inc(&(nx_ctx->stats->sha512_ops));
244         atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes));
245
246         memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
247 out:
248         spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
249         return rc;
250 }
251
252 static int nx_sha512_export(struct shash_desc *desc, void *out)
253 {
254         struct sha512_state *sctx = shash_desc_ctx(desc);
255
256         memcpy(out, sctx, sizeof(*sctx));
257
258         return 0;
259 }
260
261 static int nx_sha512_import(struct shash_desc *desc, const void *in)
262 {
263         struct sha512_state *sctx = shash_desc_ctx(desc);
264
265         memcpy(sctx, in, sizeof(*sctx));
266
267         return 0;
268 }
269
270 struct shash_alg nx_shash_sha512_alg = {
271         .digestsize = SHA512_DIGEST_SIZE,
272         .init       = nx_sha512_init,
273         .update     = nx_sha512_update,
274         .final      = nx_sha512_final,
275         .export     = nx_sha512_export,
276         .import     = nx_sha512_import,
277         .descsize   = sizeof(struct sha512_state),
278         .statesize  = sizeof(struct sha512_state),
279         .base       = {
280                 .cra_name        = "sha512",
281                 .cra_driver_name = "sha512-nx",
282                 .cra_priority    = 300,
283                 .cra_blocksize   = SHA512_BLOCK_SIZE,
284                 .cra_module      = THIS_MODULE,
285                 .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
286                 .cra_init        = nx_crypto_ctx_sha512_init,
287                 .cra_exit        = nx_crypto_ctx_exit,
288         }
289 };