Linux-libre 3.18.37-gnu
[librecmc/linux-libre.git] / drivers / crypto / qat / qat_common / qat_algs.c
1 /*
2   This file is provided under a dual BSD/GPLv2 license.  When using or
3   redistributing this file, you may do so under either license.
4
5   GPL LICENSE SUMMARY
6   Copyright(c) 2014 Intel Corporation.
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of version 2 of the GNU General Public License as
9   published by the Free Software Foundation.
10
11   This program is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15
16   Contact Information:
17   qat-linux@intel.com
18
19   BSD LICENSE
20   Copyright(c) 2014 Intel Corporation.
21   Redistribution and use in source and binary forms, with or without
22   modification, are permitted provided that the following conditions
23   are met:
24
25     * Redistributions of source code must retain the above copyright
26       notice, this list of conditions and the following disclaimer.
27     * Redistributions in binary form must reproduce the above copyright
28       notice, this list of conditions and the following disclaimer in
29       the documentation and/or other materials provided with the
30       distribution.
31     * Neither the name of Intel Corporation nor the names of its
32       contributors may be used to endorse or promote products derived
33       from this software without specific prior written permission.
34
35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/algapi.h>
55 #include <crypto/authenc.h>
56 #include <crypto/rng.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
65
66 #define QAT_AES_HW_CONFIG_ENC(alg) \
67         ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
68                         ICP_QAT_HW_CIPHER_NO_CONVERT, \
69                         ICP_QAT_HW_CIPHER_ENCRYPT)
70
71 #define QAT_AES_HW_CONFIG_DEC(alg) \
72         ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
73                         ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74                         ICP_QAT_HW_CIPHER_DECRYPT)
75
76 static DEFINE_MUTEX(algs_lock);
77 static unsigned int active_devs;
78
79 struct qat_alg_buf {
80         uint32_t len;
81         uint32_t resrvd;
82         uint64_t addr;
83 } __packed;
84
85 struct qat_alg_buf_list {
86         uint64_t resrvd;
87         uint32_t num_bufs;
88         uint32_t num_mapped_bufs;
89         struct qat_alg_buf bufers[];
90 } __packed __aligned(64);
91
92 /* Common content descriptor */
93 struct qat_alg_cd {
94         union {
95                 struct qat_enc { /* Encrypt content desc */
96                         struct icp_qat_hw_cipher_algo_blk cipher;
97                         struct icp_qat_hw_auth_algo_blk hash;
98                 } qat_enc_cd;
99                 struct qat_dec { /* Decrytp content desc */
100                         struct icp_qat_hw_auth_algo_blk hash;
101                         struct icp_qat_hw_cipher_algo_blk cipher;
102                 } qat_dec_cd;
103         };
104 } __aligned(64);
105
106 #define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk)
107
108 struct qat_auth_state {
109         uint8_t data[MAX_AUTH_STATE_SIZE + 64];
110 } __aligned(64);
111
112 struct qat_alg_session_ctx {
113         struct qat_alg_cd *enc_cd;
114         dma_addr_t enc_cd_paddr;
115         struct qat_alg_cd *dec_cd;
116         dma_addr_t dec_cd_paddr;
117         struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl;
118         struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl;
119         struct qat_crypto_instance *inst;
120         struct crypto_tfm *tfm;
121         struct crypto_shash *hash_tfm;
122         enum icp_qat_hw_auth_algo qat_hash_alg;
123         uint8_t salt[AES_BLOCK_SIZE];
124         spinlock_t lock;        /* protects qat_alg_session_ctx struct */
125 };
126
127 static int get_current_node(void)
128 {
129         return cpu_data(current_thread_info()->cpu).phys_proc_id;
130 }
131
132 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
133 {
134         switch (qat_hash_alg) {
135         case ICP_QAT_HW_AUTH_ALGO_SHA1:
136                 return ICP_QAT_HW_SHA1_STATE1_SZ;
137         case ICP_QAT_HW_AUTH_ALGO_SHA256:
138                 return ICP_QAT_HW_SHA256_STATE1_SZ;
139         case ICP_QAT_HW_AUTH_ALGO_SHA512:
140                 return ICP_QAT_HW_SHA512_STATE1_SZ;
141         default:
142                 return -EFAULT;
143         };
144         return -EFAULT;
145 }
146
147 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
148                                   struct qat_alg_session_ctx *ctx,
149                                   const uint8_t *auth_key,
150                                   unsigned int auth_keylen)
151 {
152         struct qat_auth_state auth_state;
153         SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
154         struct sha1_state sha1;
155         struct sha256_state sha256;
156         struct sha512_state sha512;
157         int block_size = crypto_shash_blocksize(ctx->hash_tfm);
158         int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
159         uint8_t *ipad = auth_state.data;
160         uint8_t *opad = ipad + block_size;
161         __be32 *hash_state_out;
162         __be64 *hash512_state_out;
163         int i, offset;
164
165         memset(auth_state.data, '\0', MAX_AUTH_STATE_SIZE + 64);
166         shash->tfm = ctx->hash_tfm;
167         shash->flags = 0x0;
168
169         if (auth_keylen > block_size) {
170                 char buff[SHA512_BLOCK_SIZE];
171                 int ret = crypto_shash_digest(shash, auth_key,
172                                               auth_keylen, buff);
173                 if (ret)
174                         return ret;
175
176                 memcpy(ipad, buff, digest_size);
177                 memcpy(opad, buff, digest_size);
178                 memset(ipad + digest_size, 0, block_size - digest_size);
179                 memset(opad + digest_size, 0, block_size - digest_size);
180         } else {
181                 memcpy(ipad, auth_key, auth_keylen);
182                 memcpy(opad, auth_key, auth_keylen);
183                 memset(ipad + auth_keylen, 0, block_size - auth_keylen);
184                 memset(opad + auth_keylen, 0, block_size - auth_keylen);
185         }
186
187         for (i = 0; i < block_size; i++) {
188                 char *ipad_ptr = ipad + i;
189                 char *opad_ptr = opad + i;
190                 *ipad_ptr ^= 0x36;
191                 *opad_ptr ^= 0x5C;
192         }
193
194         if (crypto_shash_init(shash))
195                 return -EFAULT;
196
197         if (crypto_shash_update(shash, ipad, block_size))
198                 return -EFAULT;
199
200         hash_state_out = (__be32 *)hash->sha.state1;
201         hash512_state_out = (__be64 *)hash_state_out;
202
203         switch (ctx->qat_hash_alg) {
204         case ICP_QAT_HW_AUTH_ALGO_SHA1:
205                 if (crypto_shash_export(shash, &sha1))
206                         return -EFAULT;
207                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
208                         *hash_state_out = cpu_to_be32(*(sha1.state + i));
209                 break;
210         case ICP_QAT_HW_AUTH_ALGO_SHA256:
211                 if (crypto_shash_export(shash, &sha256))
212                         return -EFAULT;
213                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
214                         *hash_state_out = cpu_to_be32(*(sha256.state + i));
215                 break;
216         case ICP_QAT_HW_AUTH_ALGO_SHA512:
217                 if (crypto_shash_export(shash, &sha512))
218                         return -EFAULT;
219                 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
220                         *hash512_state_out = cpu_to_be64(*(sha512.state + i));
221                 break;
222         default:
223                 return -EFAULT;
224         }
225
226         if (crypto_shash_init(shash))
227                 return -EFAULT;
228
229         if (crypto_shash_update(shash, opad, block_size))
230                 return -EFAULT;
231
232         offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
233         hash_state_out = (__be32 *)(hash->sha.state1 + offset);
234         hash512_state_out = (__be64 *)hash_state_out;
235
236         switch (ctx->qat_hash_alg) {
237         case ICP_QAT_HW_AUTH_ALGO_SHA1:
238                 if (crypto_shash_export(shash, &sha1))
239                         return -EFAULT;
240                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
241                         *hash_state_out = cpu_to_be32(*(sha1.state + i));
242                 break;
243         case ICP_QAT_HW_AUTH_ALGO_SHA256:
244                 if (crypto_shash_export(shash, &sha256))
245                         return -EFAULT;
246                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
247                         *hash_state_out = cpu_to_be32(*(sha256.state + i));
248                 break;
249         case ICP_QAT_HW_AUTH_ALGO_SHA512:
250                 if (crypto_shash_export(shash, &sha512))
251                         return -EFAULT;
252                 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
253                         *hash512_state_out = cpu_to_be64(*(sha512.state + i));
254                 break;
255         default:
256                 return -EFAULT;
257         }
258         return 0;
259 }
260
261 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
262 {
263         header->hdr_flags =
264                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
265         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
266         header->comn_req_flags =
267                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
268                                             QAT_COMN_PTR_TYPE_SGL);
269         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
270                                            ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
271         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
272                                   ICP_QAT_FW_LA_PARTIAL_NONE);
273         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
274                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
275         ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
276                                 ICP_QAT_FW_LA_NO_PROTO);
277         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
278                                        ICP_QAT_FW_LA_NO_UPDATE_STATE);
279 }
280
281 static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
282                                     int alg, struct crypto_authenc_keys *keys)
283 {
284         struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
285         unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
286         struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
287         struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
288         struct icp_qat_hw_auth_algo_blk *hash =
289                 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
290                 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
291         struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req_tmpl;
292         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
293         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
294         void *ptr = &req_tmpl->cd_ctrl;
295         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
296         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
297
298         /* CD setup */
299         cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg);
300         memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
301         hash->sha.inner_setup.auth_config.config =
302                 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
303                                              ctx->qat_hash_alg, digestsize);
304         hash->sha.inner_setup.auth_counter.counter =
305                 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
306
307         if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
308                 return -EFAULT;
309
310         /* Request setup */
311         qat_alg_init_common_hdr(header);
312         header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
313         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
314                                    ICP_QAT_FW_LA_RET_AUTH_RES);
315         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
316                                    ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
317         cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
318         cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
319
320         /* Cipher CD config setup */
321         cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
322         cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
323         cipher_cd_ctrl->cipher_cfg_offset = 0;
324         ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
325         ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
326         /* Auth CD config setup */
327         hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
328         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
329         hash_cd_ctrl->inner_res_sz = digestsize;
330         hash_cd_ctrl->final_sz = digestsize;
331
332         switch (ctx->qat_hash_alg) {
333         case ICP_QAT_HW_AUTH_ALGO_SHA1:
334                 hash_cd_ctrl->inner_state1_sz =
335                         round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
336                 hash_cd_ctrl->inner_state2_sz =
337                         round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
338                 break;
339         case ICP_QAT_HW_AUTH_ALGO_SHA256:
340                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
341                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
342                 break;
343         case ICP_QAT_HW_AUTH_ALGO_SHA512:
344                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
345                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
346                 break;
347         default:
348                 break;
349         }
350         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
351                         ((sizeof(struct icp_qat_hw_auth_setup) +
352                          round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
353         ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
354         ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
355         return 0;
356 }
357
358 static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
359                                     int alg, struct crypto_authenc_keys *keys)
360 {
361         struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
362         unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
363         struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
364         struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
365         struct icp_qat_hw_cipher_algo_blk *cipher =
366                 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
367                 sizeof(struct icp_qat_hw_auth_setup) +
368                 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
369         struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req_tmpl;
370         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
371         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
372         void *ptr = &req_tmpl->cd_ctrl;
373         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
374         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
375         struct icp_qat_fw_la_auth_req_params *auth_param =
376                 (struct icp_qat_fw_la_auth_req_params *)
377                 ((char *)&req_tmpl->serv_specif_rqpars +
378                 sizeof(struct icp_qat_fw_la_cipher_req_params));
379
380         /* CD setup */
381         cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg);
382         memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
383         hash->sha.inner_setup.auth_config.config =
384                 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
385                                              ctx->qat_hash_alg,
386                                              digestsize);
387         hash->sha.inner_setup.auth_counter.counter =
388                 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
389
390         if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
391                 return -EFAULT;
392
393         /* Request setup */
394         qat_alg_init_common_hdr(header);
395         header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
396         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
397                                    ICP_QAT_FW_LA_NO_RET_AUTH_RES);
398         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
399                                    ICP_QAT_FW_LA_CMP_AUTH_RES);
400         cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
401         cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
402
403         /* Cipher CD config setup */
404         cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
405         cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
406         cipher_cd_ctrl->cipher_cfg_offset =
407                 (sizeof(struct icp_qat_hw_auth_setup) +
408                  roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
409         ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
410         ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
411
412         /* Auth CD config setup */
413         hash_cd_ctrl->hash_cfg_offset = 0;
414         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
415         hash_cd_ctrl->inner_res_sz = digestsize;
416         hash_cd_ctrl->final_sz = digestsize;
417
418         switch (ctx->qat_hash_alg) {
419         case ICP_QAT_HW_AUTH_ALGO_SHA1:
420                 hash_cd_ctrl->inner_state1_sz =
421                         round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
422                 hash_cd_ctrl->inner_state2_sz =
423                         round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
424                 break;
425         case ICP_QAT_HW_AUTH_ALGO_SHA256:
426                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
427                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
428                 break;
429         case ICP_QAT_HW_AUTH_ALGO_SHA512:
430                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
431                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
432                 break;
433         default:
434                 break;
435         }
436
437         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
438                         ((sizeof(struct icp_qat_hw_auth_setup) +
439                          round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
440         auth_param->auth_res_sz = digestsize;
441         ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
442         ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
443         return 0;
444 }
445
446 static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx,
447                                  const uint8_t *key, unsigned int keylen)
448 {
449         struct crypto_authenc_keys keys;
450         int alg;
451
452         if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
453                 return -EFAULT;
454
455         if (crypto_authenc_extractkeys(&keys, key, keylen))
456                 goto bad_key;
457
458         switch (keys.enckeylen) {
459         case AES_KEYSIZE_128:
460                 alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
461                 break;
462         case AES_KEYSIZE_192:
463                 alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
464                 break;
465         case AES_KEYSIZE_256:
466                 alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
467                 break;
468         default:
469                 goto bad_key;
470                 break;
471         }
472
473         if (qat_alg_init_enc_session(ctx, alg, &keys))
474                 goto error;
475
476         if (qat_alg_init_dec_session(ctx, alg, &keys))
477                 goto error;
478
479         return 0;
480 bad_key:
481         crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
482         return -EINVAL;
483 error:
484         return -EFAULT;
485 }
486
487 static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
488                           unsigned int keylen)
489 {
490         struct qat_alg_session_ctx *ctx = crypto_aead_ctx(tfm);
491         struct device *dev;
492
493         spin_lock(&ctx->lock);
494         if (ctx->enc_cd) {
495                 /* rekeying */
496                 dev = &GET_DEV(ctx->inst->accel_dev);
497                 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
498                 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
499                 memset(&ctx->enc_fw_req_tmpl, 0,
500                        sizeof(struct icp_qat_fw_la_bulk_req));
501                 memset(&ctx->dec_fw_req_tmpl, 0,
502                        sizeof(struct icp_qat_fw_la_bulk_req));
503         } else {
504                 /* new key */
505                 int node = get_current_node();
506                 struct qat_crypto_instance *inst =
507                                 qat_crypto_get_instance_node(node);
508                 if (!inst) {
509                         spin_unlock(&ctx->lock);
510                         return -EINVAL;
511                 }
512
513                 dev = &GET_DEV(inst->accel_dev);
514                 ctx->inst = inst;
515                 ctx->enc_cd = dma_zalloc_coherent(dev,
516                                                   sizeof(struct qat_alg_cd),
517                                                   &ctx->enc_cd_paddr,
518                                                   GFP_ATOMIC);
519                 if (!ctx->enc_cd) {
520                         spin_unlock(&ctx->lock);
521                         return -ENOMEM;
522                 }
523                 ctx->dec_cd = dma_zalloc_coherent(dev,
524                                                   sizeof(struct qat_alg_cd),
525                                                   &ctx->dec_cd_paddr,
526                                                   GFP_ATOMIC);
527                 if (!ctx->dec_cd) {
528                         spin_unlock(&ctx->lock);
529                         goto out_free_enc;
530                 }
531         }
532         spin_unlock(&ctx->lock);
533         if (qat_alg_init_sessions(ctx, key, keylen))
534                 goto out_free_all;
535
536         return 0;
537
538 out_free_all:
539         dma_free_coherent(dev, sizeof(struct qat_alg_cd),
540                           ctx->dec_cd, ctx->dec_cd_paddr);
541         ctx->dec_cd = NULL;
542 out_free_enc:
543         dma_free_coherent(dev, sizeof(struct qat_alg_cd),
544                           ctx->enc_cd, ctx->enc_cd_paddr);
545         ctx->enc_cd = NULL;
546         return -ENOMEM;
547 }
548
549 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
550                               struct qat_crypto_request *qat_req)
551 {
552         struct device *dev = &GET_DEV(inst->accel_dev);
553         struct qat_alg_buf_list *bl = qat_req->buf.bl;
554         struct qat_alg_buf_list *blout = qat_req->buf.blout;
555         dma_addr_t blp = qat_req->buf.blp;
556         dma_addr_t blpout = qat_req->buf.bloutp;
557         size_t sz = qat_req->buf.sz;
558         int i, bufs = bl->num_bufs;
559
560         for (i = 0; i < bl->num_bufs; i++)
561                 dma_unmap_single(dev, bl->bufers[i].addr,
562                                  bl->bufers[i].len, DMA_BIDIRECTIONAL);
563
564         dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
565         kfree(bl);
566         if (blp != blpout) {
567                 /* If out of place operation dma unmap only data */
568                 int bufless = bufs - blout->num_mapped_bufs;
569
570                 for (i = bufless; i < bufs; i++) {
571                         dma_unmap_single(dev, blout->bufers[i].addr,
572                                          blout->bufers[i].len,
573                                          DMA_BIDIRECTIONAL);
574                 }
575                 dma_unmap_single(dev, blpout, sz, DMA_TO_DEVICE);
576                 kfree(blout);
577         }
578 }
579
580 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
581                                struct scatterlist *assoc,
582                                struct scatterlist *sgl,
583                                struct scatterlist *sglout, uint8_t *iv,
584                                uint8_t ivlen,
585                                struct qat_crypto_request *qat_req)
586 {
587         struct device *dev = &GET_DEV(inst->accel_dev);
588         int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc);
589         struct qat_alg_buf_list *bufl;
590         struct qat_alg_buf_list *buflout = NULL;
591         dma_addr_t blp;
592         dma_addr_t bloutp = 0;
593         struct scatterlist *sg;
594         size_t sz = sizeof(struct qat_alg_buf_list) +
595                         ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
596
597         if (unlikely(!n))
598                 return -EINVAL;
599
600         bufl = kmalloc_node(sz, GFP_ATOMIC,
601                             dev_to_node(&GET_DEV(inst->accel_dev)));
602         if (unlikely(!bufl))
603                 return -ENOMEM;
604
605         blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
606         if (unlikely(dma_mapping_error(dev, blp)))
607                 goto err;
608
609         for_each_sg(assoc, sg, assoc_n, i) {
610                 if (!sg->length)
611                         continue;
612                 bufl->bufers[bufs].addr = dma_map_single(dev,
613                                                          sg_virt(sg),
614                                                          sg->length,
615                                                          DMA_BIDIRECTIONAL);
616                 bufl->bufers[bufs].len = sg->length;
617                 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
618                         goto err;
619                 bufs++;
620         }
621         bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
622                                                  DMA_BIDIRECTIONAL);
623         bufl->bufers[bufs].len = ivlen;
624         if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
625                 goto err;
626         bufs++;
627
628         for_each_sg(sgl, sg, n, i) {
629                 int y = i + bufs;
630
631                 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
632                                                       sg->length,
633                                                       DMA_BIDIRECTIONAL);
634                 bufl->bufers[y].len = sg->length;
635                 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
636                         goto err;
637         }
638         bufl->num_bufs = n + bufs;
639         qat_req->buf.bl = bufl;
640         qat_req->buf.blp = blp;
641         qat_req->buf.sz = sz;
642         /* Handle out of place operation */
643         if (sgl != sglout) {
644                 struct qat_alg_buf *bufers;
645
646                 buflout = kmalloc_node(sz, GFP_ATOMIC,
647                                        dev_to_node(&GET_DEV(inst->accel_dev)));
648                 if (unlikely(!buflout))
649                         goto err;
650                 bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);
651                 if (unlikely(dma_mapping_error(dev, bloutp)))
652                         goto err;
653                 bufers = buflout->bufers;
654                 /* For out of place operation dma map only data and
655                  * reuse assoc mapping and iv */
656                 for (i = 0; i < bufs; i++) {
657                         bufers[i].len = bufl->bufers[i].len;
658                         bufers[i].addr = bufl->bufers[i].addr;
659                 }
660                 for_each_sg(sglout, sg, n, i) {
661                         int y = i + bufs;
662
663                         bufers[y].addr = dma_map_single(dev, sg_virt(sg),
664                                                         sg->length,
665                                                         DMA_BIDIRECTIONAL);
666                         buflout->bufers[y].len = sg->length;
667                         if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
668                                 goto err;
669                 }
670                 buflout->num_bufs = n + bufs;
671                 buflout->num_mapped_bufs = n;
672                 qat_req->buf.blout = buflout;
673                 qat_req->buf.bloutp = bloutp;
674         } else {
675                 /* Otherwise set the src and dst to the same address */
676                 qat_req->buf.bloutp = qat_req->buf.blp;
677         }
678         return 0;
679 err:
680         dev_err(dev, "Failed to map buf for dma\n");
681         for_each_sg(sgl, sg, n + bufs, i) {
682                 if (!dma_mapping_error(dev, bufl->bufers[i].addr)) {
683                         dma_unmap_single(dev, bufl->bufers[i].addr,
684                                          bufl->bufers[i].len,
685                                          DMA_BIDIRECTIONAL);
686                 }
687         }
688         if (!dma_mapping_error(dev, blp))
689                 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
690         kfree(bufl);
691         if (sgl != sglout && buflout) {
692                 for_each_sg(sglout, sg, n, i) {
693                         int y = i + bufs;
694
695                         if (!dma_mapping_error(dev, buflout->bufers[y].addr))
696                                 dma_unmap_single(dev, buflout->bufers[y].addr,
697                                                  buflout->bufers[y].len,
698                                                  DMA_BIDIRECTIONAL);
699                 }
700                 if (!dma_mapping_error(dev, bloutp))
701                         dma_unmap_single(dev, bloutp, sz, DMA_TO_DEVICE);
702                 kfree(buflout);
703         }
704         return -ENOMEM;
705 }
706
707 void qat_alg_callback(void *resp)
708 {
709         struct icp_qat_fw_la_resp *qat_resp = resp;
710         struct qat_crypto_request *qat_req =
711                                 (void *)(__force long)qat_resp->opaque_data;
712         struct qat_alg_session_ctx *ctx = qat_req->ctx;
713         struct qat_crypto_instance *inst = ctx->inst;
714         struct aead_request *areq = qat_req->areq;
715         uint8_t stat_filed = qat_resp->comn_resp.comn_status;
716         int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
717
718         qat_alg_free_bufl(inst, qat_req);
719         if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
720                 res = -EBADMSG;
721         areq->base.complete(&areq->base, res);
722 }
723
724 static int qat_alg_dec(struct aead_request *areq)
725 {
726         struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
727         struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
728         struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
729         struct qat_crypto_request *qat_req = aead_request_ctx(areq);
730         struct icp_qat_fw_la_cipher_req_params *cipher_param;
731         struct icp_qat_fw_la_auth_req_params *auth_param;
732         struct icp_qat_fw_la_bulk_req *msg;
733         int digst_size = crypto_aead_crt(aead_tfm)->authsize;
734         int ret, ctr = 0;
735
736         ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
737                                   areq->iv, AES_BLOCK_SIZE, qat_req);
738         if (unlikely(ret))
739                 return ret;
740
741         msg = &qat_req->req;
742         *msg = ctx->dec_fw_req_tmpl;
743         qat_req->ctx = ctx;
744         qat_req->areq = areq;
745         qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
746         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
747         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
748         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
749         cipher_param->cipher_length = areq->cryptlen - digst_size;
750         cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
751         memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
752         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
753         auth_param->auth_off = 0;
754         auth_param->auth_len = areq->assoclen +
755                                 cipher_param->cipher_length + AES_BLOCK_SIZE;
756         do {
757                 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
758         } while (ret == -EAGAIN && ctr++ < 10);
759
760         if (ret == -EAGAIN) {
761                 qat_alg_free_bufl(ctx->inst, qat_req);
762                 return -EBUSY;
763         }
764         return -EINPROGRESS;
765 }
766
767 static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv,
768                                 int enc_iv)
769 {
770         struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
771         struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
772         struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
773         struct qat_crypto_request *qat_req = aead_request_ctx(areq);
774         struct icp_qat_fw_la_cipher_req_params *cipher_param;
775         struct icp_qat_fw_la_auth_req_params *auth_param;
776         struct icp_qat_fw_la_bulk_req *msg;
777         int ret, ctr = 0;
778
779         ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
780                                   iv, AES_BLOCK_SIZE, qat_req);
781         if (unlikely(ret))
782                 return ret;
783
784         msg = &qat_req->req;
785         *msg = ctx->enc_fw_req_tmpl;
786         qat_req->ctx = ctx;
787         qat_req->areq = areq;
788         qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
789         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
790         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
791         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
792         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
793
794         if (enc_iv) {
795                 cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
796                 cipher_param->cipher_offset = areq->assoclen;
797         } else {
798                 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
799                 cipher_param->cipher_length = areq->cryptlen;
800                 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
801         }
802         auth_param->auth_off = 0;
803         auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
804
805         do {
806                 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
807         } while (ret == -EAGAIN && ctr++ < 10);
808
809         if (ret == -EAGAIN) {
810                 qat_alg_free_bufl(ctx->inst, qat_req);
811                 return -EBUSY;
812         }
813         return -EINPROGRESS;
814 }
815
816 static int qat_alg_enc(struct aead_request *areq)
817 {
818         return qat_alg_enc_internal(areq, areq->iv, 0);
819 }
820
821 static int qat_alg_genivenc(struct aead_givcrypt_request *req)
822 {
823         struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
824         struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
825         struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
826         __be64 seq;
827
828         memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
829         seq = cpu_to_be64(req->seq);
830         memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
831                &seq, sizeof(uint64_t));
832         return qat_alg_enc_internal(&req->areq, req->giv, 1);
833 }
834
835 static int qat_alg_init(struct crypto_tfm *tfm,
836                         enum icp_qat_hw_auth_algo hash, const char *hash_name)
837 {
838         struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
839
840         memset(ctx, '\0', sizeof(*ctx));
841         ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
842         if (IS_ERR(ctx->hash_tfm))
843                 return -EFAULT;
844         spin_lock_init(&ctx->lock);
845         ctx->qat_hash_alg = hash;
846         tfm->crt_aead.reqsize = sizeof(struct aead_request) +
847                                 sizeof(struct qat_crypto_request);
848         ctx->tfm = tfm;
849         return 0;
850 }
851
852 static int qat_alg_sha1_init(struct crypto_tfm *tfm)
853 {
854         return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
855 }
856
857 static int qat_alg_sha256_init(struct crypto_tfm *tfm)
858 {
859         return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
860 }
861
862 static int qat_alg_sha512_init(struct crypto_tfm *tfm)
863 {
864         return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
865 }
866
867 static void qat_alg_exit(struct crypto_tfm *tfm)
868 {
869         struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
870         struct qat_crypto_instance *inst = ctx->inst;
871         struct device *dev;
872
873         if (!IS_ERR(ctx->hash_tfm))
874                 crypto_free_shash(ctx->hash_tfm);
875
876         if (!inst)
877                 return;
878
879         dev = &GET_DEV(inst->accel_dev);
880         if (ctx->enc_cd)
881                 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
882                                   ctx->enc_cd, ctx->enc_cd_paddr);
883         if (ctx->dec_cd)
884                 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
885                                   ctx->dec_cd, ctx->dec_cd_paddr);
886         qat_crypto_put_instance(inst);
887 }
888
889 static struct crypto_alg qat_algs[] = { {
890         .cra_name = "authenc(hmac(sha1),cbc(aes))",
891         .cra_driver_name = "qat_aes_cbc_hmac_sha1",
892         .cra_priority = 4001,
893         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
894         .cra_blocksize = AES_BLOCK_SIZE,
895         .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
896         .cra_alignmask = 0,
897         .cra_type = &crypto_aead_type,
898         .cra_module = THIS_MODULE,
899         .cra_init = qat_alg_sha1_init,
900         .cra_exit = qat_alg_exit,
901         .cra_u = {
902                 .aead = {
903                         .setkey = qat_alg_setkey,
904                         .decrypt = qat_alg_dec,
905                         .encrypt = qat_alg_enc,
906                         .givencrypt = qat_alg_genivenc,
907                         .ivsize = AES_BLOCK_SIZE,
908                         .maxauthsize = SHA1_DIGEST_SIZE,
909                 },
910         },
911 }, {
912         .cra_name = "authenc(hmac(sha256),cbc(aes))",
913         .cra_driver_name = "qat_aes_cbc_hmac_sha256",
914         .cra_priority = 4001,
915         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
916         .cra_blocksize = AES_BLOCK_SIZE,
917         .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
918         .cra_alignmask = 0,
919         .cra_type = &crypto_aead_type,
920         .cra_module = THIS_MODULE,
921         .cra_init = qat_alg_sha256_init,
922         .cra_exit = qat_alg_exit,
923         .cra_u = {
924                 .aead = {
925                         .setkey = qat_alg_setkey,
926                         .decrypt = qat_alg_dec,
927                         .encrypt = qat_alg_enc,
928                         .givencrypt = qat_alg_genivenc,
929                         .ivsize = AES_BLOCK_SIZE,
930                         .maxauthsize = SHA256_DIGEST_SIZE,
931                 },
932         },
933 }, {
934         .cra_name = "authenc(hmac(sha512),cbc(aes))",
935         .cra_driver_name = "qat_aes_cbc_hmac_sha512",
936         .cra_priority = 4001,
937         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
938         .cra_blocksize = AES_BLOCK_SIZE,
939         .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
940         .cra_alignmask = 0,
941         .cra_type = &crypto_aead_type,
942         .cra_module = THIS_MODULE,
943         .cra_init = qat_alg_sha512_init,
944         .cra_exit = qat_alg_exit,
945         .cra_u = {
946                 .aead = {
947                         .setkey = qat_alg_setkey,
948                         .decrypt = qat_alg_dec,
949                         .encrypt = qat_alg_enc,
950                         .givencrypt = qat_alg_genivenc,
951                         .ivsize = AES_BLOCK_SIZE,
952                         .maxauthsize = SHA512_DIGEST_SIZE,
953                 },
954         },
955 } };
956
957 int qat_algs_register(void)
958 {
959         int ret = 0;
960
961         mutex_lock(&algs_lock);
962         if (++active_devs == 1) {
963                 int i;
964
965                 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
966                         qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_AEAD |
967                                                 CRYPTO_ALG_ASYNC;
968                 ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
969         }
970         mutex_unlock(&algs_lock);
971         return ret;
972 }
973
974 int qat_algs_unregister(void)
975 {
976         int ret = 0;
977
978         mutex_lock(&algs_lock);
979         if (--active_devs == 0)
980                 ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
981         mutex_unlock(&algs_lock);
982         return ret;
983 }
984
985 int qat_algs_init(void)
986 {
987         crypto_get_default_rng();
988         return 0;
989 }
990
991 void qat_algs_exit(void)
992 {
993         crypto_put_default_rng();
994 }