Linux-libre 5.4.47-gnu
[librecmc/linux-libre.git] / drivers / crypto / ccp / ccp-crypto-main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AMD Cryptographic Coprocessor (CCP) crypto API support
4  *
5  * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
6  *
7  * Author: Tom Lendacky <thomas.lendacky@amd.com>
8  */
9
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <linux/ccp.h>
15 #include <linux/scatterlist.h>
16 #include <crypto/internal/hash.h>
17 #include <crypto/internal/akcipher.h>
18
19 #include "ccp-crypto.h"
20
21 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
22 MODULE_LICENSE("GPL");
23 MODULE_VERSION("1.0.0");
24 MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support");
25
26 static unsigned int aes_disable;
27 module_param(aes_disable, uint, 0444);
28 MODULE_PARM_DESC(aes_disable, "Disable use of AES - any non-zero value");
29
30 static unsigned int sha_disable;
31 module_param(sha_disable, uint, 0444);
32 MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value");
33
34 static unsigned int des3_disable;
35 module_param(des3_disable, uint, 0444);
36 MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value");
37
38 static unsigned int rsa_disable;
39 module_param(rsa_disable, uint, 0444);
40 MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value");
41
42 /* List heads for the supported algorithms */
43 static LIST_HEAD(hash_algs);
44 static LIST_HEAD(cipher_algs);
45 static LIST_HEAD(aead_algs);
46 static LIST_HEAD(akcipher_algs);
47
48 /* For any tfm, requests for that tfm must be returned on the order
49  * received.  With multiple queues available, the CCP can process more
50  * than one cmd at a time.  Therefore we must maintain a cmd list to insure
51  * the proper ordering of requests on a given tfm.
52  */
53 struct ccp_crypto_queue {
54         struct list_head cmds;
55         struct list_head *backlog;
56         unsigned int cmd_count;
57 };
58
59 #define CCP_CRYPTO_MAX_QLEN     100
60
61 static struct ccp_crypto_queue req_queue;
62 static spinlock_t req_queue_lock;
63
64 struct ccp_crypto_cmd {
65         struct list_head entry;
66
67         struct ccp_cmd *cmd;
68
69         /* Save the crypto_tfm and crypto_async_request addresses
70          * separately to avoid any reference to a possibly invalid
71          * crypto_async_request structure after invoking the request
72          * callback
73          */
74         struct crypto_async_request *req;
75         struct crypto_tfm *tfm;
76
77         /* Used for held command processing to determine state */
78         int ret;
79 };
80
81 struct ccp_crypto_cpu {
82         struct work_struct work;
83         struct completion completion;
84         struct ccp_crypto_cmd *crypto_cmd;
85         int err;
86 };
87
88 static inline bool ccp_crypto_success(int err)
89 {
90         if (err && (err != -EINPROGRESS) && (err != -EBUSY))
91                 return false;
92
93         return true;
94 }
95
96 static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
97         struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog)
98 {
99         struct ccp_crypto_cmd *held = NULL, *tmp;
100         unsigned long flags;
101
102         *backlog = NULL;
103
104         spin_lock_irqsave(&req_queue_lock, flags);
105
106         /* Held cmds will be after the current cmd in the queue so start
107          * searching for a cmd with a matching tfm for submission.
108          */
109         tmp = crypto_cmd;
110         list_for_each_entry_continue(tmp, &req_queue.cmds, entry) {
111                 if (crypto_cmd->tfm != tmp->tfm)
112                         continue;
113                 held = tmp;
114                 break;
115         }
116
117         /* Process the backlog:
118          *   Because cmds can be executed from any point in the cmd list
119          *   special precautions have to be taken when handling the backlog.
120          */
121         if (req_queue.backlog != &req_queue.cmds) {
122                 /* Skip over this cmd if it is the next backlog cmd */
123                 if (req_queue.backlog == &crypto_cmd->entry)
124                         req_queue.backlog = crypto_cmd->entry.next;
125
126                 *backlog = container_of(req_queue.backlog,
127                                         struct ccp_crypto_cmd, entry);
128                 req_queue.backlog = req_queue.backlog->next;
129
130                 /* Skip over this cmd if it is now the next backlog cmd */
131                 if (req_queue.backlog == &crypto_cmd->entry)
132                         req_queue.backlog = crypto_cmd->entry.next;
133         }
134
135         /* Remove the cmd entry from the list of cmds */
136         req_queue.cmd_count--;
137         list_del(&crypto_cmd->entry);
138
139         spin_unlock_irqrestore(&req_queue_lock, flags);
140
141         return held;
142 }
143
144 static void ccp_crypto_complete(void *data, int err)
145 {
146         struct ccp_crypto_cmd *crypto_cmd = data;
147         struct ccp_crypto_cmd *held, *next, *backlog;
148         struct crypto_async_request *req = crypto_cmd->req;
149         struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm);
150         int ret;
151
152         if (err == -EINPROGRESS) {
153                 /* Only propagate the -EINPROGRESS if necessary */
154                 if (crypto_cmd->ret == -EBUSY) {
155                         crypto_cmd->ret = -EINPROGRESS;
156                         req->complete(req, -EINPROGRESS);
157                 }
158
159                 return;
160         }
161
162         /* Operation has completed - update the queue before invoking
163          * the completion callbacks and retrieve the next cmd (cmd with
164          * a matching tfm) that can be submitted to the CCP.
165          */
166         held = ccp_crypto_cmd_complete(crypto_cmd, &backlog);
167         if (backlog) {
168                 backlog->ret = -EINPROGRESS;
169                 backlog->req->complete(backlog->req, -EINPROGRESS);
170         }
171
172         /* Transition the state from -EBUSY to -EINPROGRESS first */
173         if (crypto_cmd->ret == -EBUSY)
174                 req->complete(req, -EINPROGRESS);
175
176         /* Completion callbacks */
177         ret = err;
178         if (ctx->complete)
179                 ret = ctx->complete(req, ret);
180         req->complete(req, ret);
181
182         /* Submit the next cmd */
183         while (held) {
184                 /* Since we have already queued the cmd, we must indicate that
185                  * we can backlog so as not to "lose" this request.
186                  */
187                 held->cmd->flags |= CCP_CMD_MAY_BACKLOG;
188                 ret = ccp_enqueue_cmd(held->cmd);
189                 if (ccp_crypto_success(ret))
190                         break;
191
192                 /* Error occurred, report it and get the next entry */
193                 ctx = crypto_tfm_ctx(held->req->tfm);
194                 if (ctx->complete)
195                         ret = ctx->complete(held->req, ret);
196                 held->req->complete(held->req, ret);
197
198                 next = ccp_crypto_cmd_complete(held, &backlog);
199                 if (backlog) {
200                         backlog->ret = -EINPROGRESS;
201                         backlog->req->complete(backlog->req, -EINPROGRESS);
202                 }
203
204                 kfree(held);
205                 held = next;
206         }
207
208         kfree(crypto_cmd);
209 }
210
211 static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
212 {
213         struct ccp_crypto_cmd *active = NULL, *tmp;
214         unsigned long flags;
215         bool free_cmd = true;
216         int ret;
217
218         spin_lock_irqsave(&req_queue_lock, flags);
219
220         /* Check if the cmd can/should be queued */
221         if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
222                 if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) {
223                         ret = -ENOSPC;
224                         goto e_lock;
225                 }
226         }
227
228         /* Look for an entry with the same tfm.  If there is a cmd
229          * with the same tfm in the list then the current cmd cannot
230          * be submitted to the CCP yet.
231          */
232         list_for_each_entry(tmp, &req_queue.cmds, entry) {
233                 if (crypto_cmd->tfm != tmp->tfm)
234                         continue;
235                 active = tmp;
236                 break;
237         }
238
239         ret = -EINPROGRESS;
240         if (!active) {
241                 ret = ccp_enqueue_cmd(crypto_cmd->cmd);
242                 if (!ccp_crypto_success(ret))
243                         goto e_lock;    /* Error, don't queue it */
244         }
245
246         if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
247                 ret = -EBUSY;
248                 if (req_queue.backlog == &req_queue.cmds)
249                         req_queue.backlog = &crypto_cmd->entry;
250         }
251         crypto_cmd->ret = ret;
252
253         req_queue.cmd_count++;
254         list_add_tail(&crypto_cmd->entry, &req_queue.cmds);
255
256         free_cmd = false;
257
258 e_lock:
259         spin_unlock_irqrestore(&req_queue_lock, flags);
260
261         if (free_cmd)
262                 kfree(crypto_cmd);
263
264         return ret;
265 }
266
267 /**
268  * ccp_crypto_enqueue_request - queue an crypto async request for processing
269  *                              by the CCP
270  *
271  * @req: crypto_async_request struct to be processed
272  * @cmd: ccp_cmd struct to be sent to the CCP
273  */
274 int ccp_crypto_enqueue_request(struct crypto_async_request *req,
275                                struct ccp_cmd *cmd)
276 {
277         struct ccp_crypto_cmd *crypto_cmd;
278         gfp_t gfp;
279
280         gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
281
282         crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp);
283         if (!crypto_cmd)
284                 return -ENOMEM;
285
286         /* The tfm pointer must be saved and not referenced from the
287          * crypto_async_request (req) pointer because it is used after
288          * completion callback for the request and the req pointer
289          * might not be valid anymore.
290          */
291         crypto_cmd->cmd = cmd;
292         crypto_cmd->req = req;
293         crypto_cmd->tfm = req->tfm;
294
295         cmd->callback = ccp_crypto_complete;
296         cmd->data = crypto_cmd;
297
298         if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
299                 cmd->flags |= CCP_CMD_MAY_BACKLOG;
300         else
301                 cmd->flags &= ~CCP_CMD_MAY_BACKLOG;
302
303         return ccp_crypto_enqueue_cmd(crypto_cmd);
304 }
305
306 struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
307                                             struct scatterlist *sg_add)
308 {
309         struct scatterlist *sg, *sg_last = NULL;
310
311         for (sg = table->sgl; sg; sg = sg_next(sg))
312                 if (!sg_page(sg))
313                         break;
314         if (WARN_ON(!sg))
315                 return NULL;
316
317         for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) {
318                 sg_set_page(sg, sg_page(sg_add), sg_add->length,
319                             sg_add->offset);
320                 sg_last = sg;
321         }
322         if (WARN_ON(sg_add))
323                 return NULL;
324
325         return sg_last;
326 }
327
328 static int ccp_register_algs(void)
329 {
330         int ret;
331
332         if (!aes_disable) {
333                 ret = ccp_register_aes_algs(&cipher_algs);
334                 if (ret)
335                         return ret;
336
337                 ret = ccp_register_aes_cmac_algs(&hash_algs);
338                 if (ret)
339                         return ret;
340
341                 ret = ccp_register_aes_xts_algs(&cipher_algs);
342                 if (ret)
343                         return ret;
344
345                 ret = ccp_register_aes_aeads(&aead_algs);
346                 if (ret)
347                         return ret;
348         }
349
350         if (!des3_disable) {
351                 ret = ccp_register_des3_algs(&cipher_algs);
352                 if (ret)
353                         return ret;
354         }
355
356         if (!sha_disable) {
357                 ret = ccp_register_sha_algs(&hash_algs);
358                 if (ret)
359                         return ret;
360         }
361
362         if (!rsa_disable) {
363                 ret = ccp_register_rsa_algs(&akcipher_algs);
364                 if (ret)
365                         return ret;
366         }
367
368         return 0;
369 }
370
371 static void ccp_unregister_algs(void)
372 {
373         struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
374         struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
375         struct ccp_crypto_aead *aead_alg, *aead_tmp;
376         struct ccp_crypto_akcipher_alg *akc_alg, *akc_tmp;
377
378         list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
379                 crypto_unregister_ahash(&ahash_alg->alg);
380                 list_del(&ahash_alg->entry);
381                 kfree(ahash_alg);
382         }
383
384         list_for_each_entry_safe(ablk_alg, ablk_tmp, &cipher_algs, entry) {
385                 crypto_unregister_alg(&ablk_alg->alg);
386                 list_del(&ablk_alg->entry);
387                 kfree(ablk_alg);
388         }
389
390         list_for_each_entry_safe(aead_alg, aead_tmp, &aead_algs, entry) {
391                 crypto_unregister_aead(&aead_alg->alg);
392                 list_del(&aead_alg->entry);
393                 kfree(aead_alg);
394         }
395
396         list_for_each_entry_safe(akc_alg, akc_tmp, &akcipher_algs, entry) {
397                 crypto_unregister_akcipher(&akc_alg->alg);
398                 list_del(&akc_alg->entry);
399                 kfree(akc_alg);
400         }
401 }
402
403 static int ccp_crypto_init(void)
404 {
405         int ret;
406
407         ret = ccp_present();
408         if (ret) {
409                 pr_err("Cannot load: there are no available CCPs\n");
410                 return ret;
411         }
412
413         spin_lock_init(&req_queue_lock);
414         INIT_LIST_HEAD(&req_queue.cmds);
415         req_queue.backlog = &req_queue.cmds;
416         req_queue.cmd_count = 0;
417
418         ret = ccp_register_algs();
419         if (ret)
420                 ccp_unregister_algs();
421
422         return ret;
423 }
424
425 static void ccp_crypto_exit(void)
426 {
427         ccp_unregister_algs();
428 }
429
430 module_init(ccp_crypto_init);
431 module_exit(ccp_crypto_exit);