2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/algapi.h>
55 #include <crypto/authenc.h>
56 #include <crypto/rng.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
66 #define QAT_AES_HW_CONFIG_ENC(alg) \
67 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
68 ICP_QAT_HW_CIPHER_NO_CONVERT, \
69 ICP_QAT_HW_CIPHER_ENCRYPT)
71 #define QAT_AES_HW_CONFIG_DEC(alg) \
72 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 ICP_QAT_HW_CIPHER_DECRYPT)
76 static DEFINE_MUTEX(algs_lock);
77 static unsigned int active_devs;
85 struct qat_alg_buf_list {
88 uint32_t num_mapped_bufs;
89 struct qat_alg_buf bufers[];
90 } __packed __aligned(64);
92 /* Common content descriptor */
95 struct qat_enc { /* Encrypt content desc */
96 struct icp_qat_hw_cipher_algo_blk cipher;
97 struct icp_qat_hw_auth_algo_blk hash;
99 struct qat_dec { /* Decrytp content desc */
100 struct icp_qat_hw_auth_algo_blk hash;
101 struct icp_qat_hw_cipher_algo_blk cipher;
106 #define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk)
108 struct qat_auth_state {
109 uint8_t data[MAX_AUTH_STATE_SIZE + 64];
112 struct qat_alg_session_ctx {
113 struct qat_alg_cd *enc_cd;
114 dma_addr_t enc_cd_paddr;
115 struct qat_alg_cd *dec_cd;
116 dma_addr_t dec_cd_paddr;
117 struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl;
118 struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl;
119 struct qat_crypto_instance *inst;
120 struct crypto_tfm *tfm;
121 struct crypto_shash *hash_tfm;
122 enum icp_qat_hw_auth_algo qat_hash_alg;
123 uint8_t salt[AES_BLOCK_SIZE];
124 spinlock_t lock; /* protects qat_alg_session_ctx struct */
127 static int get_current_node(void)
129 return cpu_data(current_thread_info()->cpu).phys_proc_id;
132 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
134 switch (qat_hash_alg) {
135 case ICP_QAT_HW_AUTH_ALGO_SHA1:
136 return ICP_QAT_HW_SHA1_STATE1_SZ;
137 case ICP_QAT_HW_AUTH_ALGO_SHA256:
138 return ICP_QAT_HW_SHA256_STATE1_SZ;
139 case ICP_QAT_HW_AUTH_ALGO_SHA512:
140 return ICP_QAT_HW_SHA512_STATE1_SZ;
147 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
148 struct qat_alg_session_ctx *ctx,
149 const uint8_t *auth_key,
150 unsigned int auth_keylen)
152 struct qat_auth_state auth_state;
153 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
154 struct sha1_state sha1;
155 struct sha256_state sha256;
156 struct sha512_state sha512;
157 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
158 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
159 uint8_t *ipad = auth_state.data;
160 uint8_t *opad = ipad + block_size;
161 __be32 *hash_state_out;
162 __be64 *hash512_state_out;
165 memset(auth_state.data, '\0', MAX_AUTH_STATE_SIZE + 64);
166 shash->tfm = ctx->hash_tfm;
169 if (auth_keylen > block_size) {
170 char buff[SHA512_BLOCK_SIZE];
171 int ret = crypto_shash_digest(shash, auth_key,
176 memcpy(ipad, buff, digest_size);
177 memcpy(opad, buff, digest_size);
178 memset(ipad + digest_size, 0, block_size - digest_size);
179 memset(opad + digest_size, 0, block_size - digest_size);
181 memcpy(ipad, auth_key, auth_keylen);
182 memcpy(opad, auth_key, auth_keylen);
183 memset(ipad + auth_keylen, 0, block_size - auth_keylen);
184 memset(opad + auth_keylen, 0, block_size - auth_keylen);
187 for (i = 0; i < block_size; i++) {
188 char *ipad_ptr = ipad + i;
189 char *opad_ptr = opad + i;
194 if (crypto_shash_init(shash))
197 if (crypto_shash_update(shash, ipad, block_size))
200 hash_state_out = (__be32 *)hash->sha.state1;
201 hash512_state_out = (__be64 *)hash_state_out;
203 switch (ctx->qat_hash_alg) {
204 case ICP_QAT_HW_AUTH_ALGO_SHA1:
205 if (crypto_shash_export(shash, &sha1))
207 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
208 *hash_state_out = cpu_to_be32(*(sha1.state + i));
210 case ICP_QAT_HW_AUTH_ALGO_SHA256:
211 if (crypto_shash_export(shash, &sha256))
213 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
214 *hash_state_out = cpu_to_be32(*(sha256.state + i));
216 case ICP_QAT_HW_AUTH_ALGO_SHA512:
217 if (crypto_shash_export(shash, &sha512))
219 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
220 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
226 if (crypto_shash_init(shash))
229 if (crypto_shash_update(shash, opad, block_size))
232 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
233 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
234 hash512_state_out = (__be64 *)hash_state_out;
236 switch (ctx->qat_hash_alg) {
237 case ICP_QAT_HW_AUTH_ALGO_SHA1:
238 if (crypto_shash_export(shash, &sha1))
240 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
241 *hash_state_out = cpu_to_be32(*(sha1.state + i));
243 case ICP_QAT_HW_AUTH_ALGO_SHA256:
244 if (crypto_shash_export(shash, &sha256))
246 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
247 *hash_state_out = cpu_to_be32(*(sha256.state + i));
249 case ICP_QAT_HW_AUTH_ALGO_SHA512:
250 if (crypto_shash_export(shash, &sha512))
252 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
253 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
261 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
264 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
265 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
266 header->comn_req_flags =
267 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
268 QAT_COMN_PTR_TYPE_SGL);
269 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
270 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
271 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
272 ICP_QAT_FW_LA_PARTIAL_NONE);
273 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
274 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
275 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
276 ICP_QAT_FW_LA_NO_PROTO);
277 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
278 ICP_QAT_FW_LA_NO_UPDATE_STATE);
281 static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
282 int alg, struct crypto_authenc_keys *keys)
284 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
285 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
286 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
287 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
288 struct icp_qat_hw_auth_algo_blk *hash =
289 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
290 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
291 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req_tmpl;
292 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
293 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
294 void *ptr = &req_tmpl->cd_ctrl;
295 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
296 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
299 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg);
300 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
301 hash->sha.inner_setup.auth_config.config =
302 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
303 ctx->qat_hash_alg, digestsize);
304 hash->sha.inner_setup.auth_counter.counter =
305 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
307 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
311 qat_alg_init_common_hdr(header);
312 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
313 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
314 ICP_QAT_FW_LA_RET_AUTH_RES);
315 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
316 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
317 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
318 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
320 /* Cipher CD config setup */
321 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
322 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
323 cipher_cd_ctrl->cipher_cfg_offset = 0;
324 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
325 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
326 /* Auth CD config setup */
327 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
328 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
329 hash_cd_ctrl->inner_res_sz = digestsize;
330 hash_cd_ctrl->final_sz = digestsize;
332 switch (ctx->qat_hash_alg) {
333 case ICP_QAT_HW_AUTH_ALGO_SHA1:
334 hash_cd_ctrl->inner_state1_sz =
335 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
336 hash_cd_ctrl->inner_state2_sz =
337 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
339 case ICP_QAT_HW_AUTH_ALGO_SHA256:
340 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
341 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
343 case ICP_QAT_HW_AUTH_ALGO_SHA512:
344 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
345 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
350 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
351 ((sizeof(struct icp_qat_hw_auth_setup) +
352 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
353 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
354 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
358 static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
359 int alg, struct crypto_authenc_keys *keys)
361 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
362 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
363 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
364 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
365 struct icp_qat_hw_cipher_algo_blk *cipher =
366 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
367 sizeof(struct icp_qat_hw_auth_setup) +
368 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
369 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req_tmpl;
370 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
371 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
372 void *ptr = &req_tmpl->cd_ctrl;
373 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
374 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
375 struct icp_qat_fw_la_auth_req_params *auth_param =
376 (struct icp_qat_fw_la_auth_req_params *)
377 ((char *)&req_tmpl->serv_specif_rqpars +
378 sizeof(struct icp_qat_fw_la_cipher_req_params));
381 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg);
382 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
383 hash->sha.inner_setup.auth_config.config =
384 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
387 hash->sha.inner_setup.auth_counter.counter =
388 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
390 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
394 qat_alg_init_common_hdr(header);
395 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
396 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
397 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
398 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
399 ICP_QAT_FW_LA_CMP_AUTH_RES);
400 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
401 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
403 /* Cipher CD config setup */
404 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
405 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
406 cipher_cd_ctrl->cipher_cfg_offset =
407 (sizeof(struct icp_qat_hw_auth_setup) +
408 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
409 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
410 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
412 /* Auth CD config setup */
413 hash_cd_ctrl->hash_cfg_offset = 0;
414 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
415 hash_cd_ctrl->inner_res_sz = digestsize;
416 hash_cd_ctrl->final_sz = digestsize;
418 switch (ctx->qat_hash_alg) {
419 case ICP_QAT_HW_AUTH_ALGO_SHA1:
420 hash_cd_ctrl->inner_state1_sz =
421 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
422 hash_cd_ctrl->inner_state2_sz =
423 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
425 case ICP_QAT_HW_AUTH_ALGO_SHA256:
426 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
427 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
429 case ICP_QAT_HW_AUTH_ALGO_SHA512:
430 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
431 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
437 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
438 ((sizeof(struct icp_qat_hw_auth_setup) +
439 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
440 auth_param->auth_res_sz = digestsize;
441 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
442 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
446 static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx,
447 const uint8_t *key, unsigned int keylen)
449 struct crypto_authenc_keys keys;
452 if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
455 if (crypto_authenc_extractkeys(&keys, key, keylen))
458 switch (keys.enckeylen) {
459 case AES_KEYSIZE_128:
460 alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
462 case AES_KEYSIZE_192:
463 alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
465 case AES_KEYSIZE_256:
466 alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
473 if (qat_alg_init_enc_session(ctx, alg, &keys))
476 if (qat_alg_init_dec_session(ctx, alg, &keys))
481 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
487 static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
490 struct qat_alg_session_ctx *ctx = crypto_aead_ctx(tfm);
493 spin_lock(&ctx->lock);
496 dev = &GET_DEV(ctx->inst->accel_dev);
497 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
498 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
499 memset(&ctx->enc_fw_req_tmpl, 0,
500 sizeof(struct icp_qat_fw_la_bulk_req));
501 memset(&ctx->dec_fw_req_tmpl, 0,
502 sizeof(struct icp_qat_fw_la_bulk_req));
505 int node = get_current_node();
506 struct qat_crypto_instance *inst =
507 qat_crypto_get_instance_node(node);
509 spin_unlock(&ctx->lock);
513 dev = &GET_DEV(inst->accel_dev);
515 ctx->enc_cd = dma_zalloc_coherent(dev,
516 sizeof(struct qat_alg_cd),
520 spin_unlock(&ctx->lock);
523 ctx->dec_cd = dma_zalloc_coherent(dev,
524 sizeof(struct qat_alg_cd),
528 spin_unlock(&ctx->lock);
532 spin_unlock(&ctx->lock);
533 if (qat_alg_init_sessions(ctx, key, keylen))
539 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
540 ctx->dec_cd, ctx->dec_cd_paddr);
543 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
544 ctx->enc_cd, ctx->enc_cd_paddr);
549 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
550 struct qat_crypto_request *qat_req)
552 struct device *dev = &GET_DEV(inst->accel_dev);
553 struct qat_alg_buf_list *bl = qat_req->buf.bl;
554 struct qat_alg_buf_list *blout = qat_req->buf.blout;
555 dma_addr_t blp = qat_req->buf.blp;
556 dma_addr_t blpout = qat_req->buf.bloutp;
557 size_t sz = qat_req->buf.sz;
558 int i, bufs = bl->num_bufs;
560 for (i = 0; i < bl->num_bufs; i++)
561 dma_unmap_single(dev, bl->bufers[i].addr,
562 bl->bufers[i].len, DMA_BIDIRECTIONAL);
564 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
567 /* If out of place operation dma unmap only data */
568 int bufless = bufs - blout->num_mapped_bufs;
570 for (i = bufless; i < bufs; i++) {
571 dma_unmap_single(dev, blout->bufers[i].addr,
572 blout->bufers[i].len,
575 dma_unmap_single(dev, blpout, sz, DMA_TO_DEVICE);
580 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
581 struct scatterlist *assoc,
582 struct scatterlist *sgl,
583 struct scatterlist *sglout, uint8_t *iv,
585 struct qat_crypto_request *qat_req)
587 struct device *dev = &GET_DEV(inst->accel_dev);
588 int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc);
589 struct qat_alg_buf_list *bufl;
590 struct qat_alg_buf_list *buflout = NULL;
592 dma_addr_t bloutp = 0;
593 struct scatterlist *sg;
594 size_t sz = sizeof(struct qat_alg_buf_list) +
595 ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
600 bufl = kmalloc_node(sz, GFP_ATOMIC,
601 dev_to_node(&GET_DEV(inst->accel_dev)));
605 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
606 if (unlikely(dma_mapping_error(dev, blp)))
609 for_each_sg(assoc, sg, assoc_n, i) {
612 bufl->bufers[bufs].addr = dma_map_single(dev,
616 bufl->bufers[bufs].len = sg->length;
617 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
621 bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
623 bufl->bufers[bufs].len = ivlen;
624 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
628 for_each_sg(sgl, sg, n, i) {
631 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
634 bufl->bufers[y].len = sg->length;
635 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
638 bufl->num_bufs = n + bufs;
639 qat_req->buf.bl = bufl;
640 qat_req->buf.blp = blp;
641 qat_req->buf.sz = sz;
642 /* Handle out of place operation */
644 struct qat_alg_buf *bufers;
646 buflout = kmalloc_node(sz, GFP_ATOMIC,
647 dev_to_node(&GET_DEV(inst->accel_dev)));
648 if (unlikely(!buflout))
650 bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);
651 if (unlikely(dma_mapping_error(dev, bloutp)))
653 bufers = buflout->bufers;
654 /* For out of place operation dma map only data and
655 * reuse assoc mapping and iv */
656 for (i = 0; i < bufs; i++) {
657 bufers[i].len = bufl->bufers[i].len;
658 bufers[i].addr = bufl->bufers[i].addr;
660 for_each_sg(sglout, sg, n, i) {
663 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
666 buflout->bufers[y].len = sg->length;
667 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
670 buflout->num_bufs = n + bufs;
671 buflout->num_mapped_bufs = n;
672 qat_req->buf.blout = buflout;
673 qat_req->buf.bloutp = bloutp;
675 /* Otherwise set the src and dst to the same address */
676 qat_req->buf.bloutp = qat_req->buf.blp;
680 dev_err(dev, "Failed to map buf for dma\n");
681 for_each_sg(sgl, sg, n + bufs, i) {
682 if (!dma_mapping_error(dev, bufl->bufers[i].addr)) {
683 dma_unmap_single(dev, bufl->bufers[i].addr,
688 if (!dma_mapping_error(dev, blp))
689 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
691 if (sgl != sglout && buflout) {
692 for_each_sg(sglout, sg, n, i) {
695 if (!dma_mapping_error(dev, buflout->bufers[y].addr))
696 dma_unmap_single(dev, buflout->bufers[y].addr,
697 buflout->bufers[y].len,
700 if (!dma_mapping_error(dev, bloutp))
701 dma_unmap_single(dev, bloutp, sz, DMA_TO_DEVICE);
707 void qat_alg_callback(void *resp)
709 struct icp_qat_fw_la_resp *qat_resp = resp;
710 struct qat_crypto_request *qat_req =
711 (void *)(__force long)qat_resp->opaque_data;
712 struct qat_alg_session_ctx *ctx = qat_req->ctx;
713 struct qat_crypto_instance *inst = ctx->inst;
714 struct aead_request *areq = qat_req->areq;
715 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
716 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
718 qat_alg_free_bufl(inst, qat_req);
719 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
721 areq->base.complete(&areq->base, res);
724 static int qat_alg_dec(struct aead_request *areq)
726 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
727 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
728 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
729 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
730 struct icp_qat_fw_la_cipher_req_params *cipher_param;
731 struct icp_qat_fw_la_auth_req_params *auth_param;
732 struct icp_qat_fw_la_bulk_req *msg;
733 int digst_size = crypto_aead_crt(aead_tfm)->authsize;
736 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
737 areq->iv, AES_BLOCK_SIZE, qat_req);
742 *msg = ctx->dec_fw_req_tmpl;
744 qat_req->areq = areq;
745 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
746 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
747 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
748 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
749 cipher_param->cipher_length = areq->cryptlen - digst_size;
750 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
751 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
752 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
753 auth_param->auth_off = 0;
754 auth_param->auth_len = areq->assoclen +
755 cipher_param->cipher_length + AES_BLOCK_SIZE;
757 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
758 } while (ret == -EAGAIN && ctr++ < 10);
760 if (ret == -EAGAIN) {
761 qat_alg_free_bufl(ctx->inst, qat_req);
767 static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv,
770 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
771 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
772 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
773 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
774 struct icp_qat_fw_la_cipher_req_params *cipher_param;
775 struct icp_qat_fw_la_auth_req_params *auth_param;
776 struct icp_qat_fw_la_bulk_req *msg;
779 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
780 iv, AES_BLOCK_SIZE, qat_req);
785 *msg = ctx->enc_fw_req_tmpl;
787 qat_req->areq = areq;
788 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
789 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
790 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
791 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
792 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
795 cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
796 cipher_param->cipher_offset = areq->assoclen;
798 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
799 cipher_param->cipher_length = areq->cryptlen;
800 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
802 auth_param->auth_off = 0;
803 auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
806 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
807 } while (ret == -EAGAIN && ctr++ < 10);
809 if (ret == -EAGAIN) {
810 qat_alg_free_bufl(ctx->inst, qat_req);
816 static int qat_alg_enc(struct aead_request *areq)
818 return qat_alg_enc_internal(areq, areq->iv, 0);
821 static int qat_alg_genivenc(struct aead_givcrypt_request *req)
823 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
824 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
825 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
828 memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
829 seq = cpu_to_be64(req->seq);
830 memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
831 &seq, sizeof(uint64_t));
832 return qat_alg_enc_internal(&req->areq, req->giv, 1);
835 static int qat_alg_init(struct crypto_tfm *tfm,
836 enum icp_qat_hw_auth_algo hash, const char *hash_name)
838 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
840 memset(ctx, '\0', sizeof(*ctx));
841 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
842 if (IS_ERR(ctx->hash_tfm))
844 spin_lock_init(&ctx->lock);
845 ctx->qat_hash_alg = hash;
846 tfm->crt_aead.reqsize = sizeof(struct aead_request) +
847 sizeof(struct qat_crypto_request);
852 static int qat_alg_sha1_init(struct crypto_tfm *tfm)
854 return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
857 static int qat_alg_sha256_init(struct crypto_tfm *tfm)
859 return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
862 static int qat_alg_sha512_init(struct crypto_tfm *tfm)
864 return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
867 static void qat_alg_exit(struct crypto_tfm *tfm)
869 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
870 struct qat_crypto_instance *inst = ctx->inst;
873 if (!IS_ERR(ctx->hash_tfm))
874 crypto_free_shash(ctx->hash_tfm);
879 dev = &GET_DEV(inst->accel_dev);
881 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
882 ctx->enc_cd, ctx->enc_cd_paddr);
884 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
885 ctx->dec_cd, ctx->dec_cd_paddr);
886 qat_crypto_put_instance(inst);
889 static struct crypto_alg qat_algs[] = { {
890 .cra_name = "authenc(hmac(sha1),cbc(aes))",
891 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
892 .cra_priority = 4001,
893 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
894 .cra_blocksize = AES_BLOCK_SIZE,
895 .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
897 .cra_type = &crypto_aead_type,
898 .cra_module = THIS_MODULE,
899 .cra_init = qat_alg_sha1_init,
900 .cra_exit = qat_alg_exit,
903 .setkey = qat_alg_setkey,
904 .decrypt = qat_alg_dec,
905 .encrypt = qat_alg_enc,
906 .givencrypt = qat_alg_genivenc,
907 .ivsize = AES_BLOCK_SIZE,
908 .maxauthsize = SHA1_DIGEST_SIZE,
912 .cra_name = "authenc(hmac(sha256),cbc(aes))",
913 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
914 .cra_priority = 4001,
915 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
916 .cra_blocksize = AES_BLOCK_SIZE,
917 .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
919 .cra_type = &crypto_aead_type,
920 .cra_module = THIS_MODULE,
921 .cra_init = qat_alg_sha256_init,
922 .cra_exit = qat_alg_exit,
925 .setkey = qat_alg_setkey,
926 .decrypt = qat_alg_dec,
927 .encrypt = qat_alg_enc,
928 .givencrypt = qat_alg_genivenc,
929 .ivsize = AES_BLOCK_SIZE,
930 .maxauthsize = SHA256_DIGEST_SIZE,
934 .cra_name = "authenc(hmac(sha512),cbc(aes))",
935 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
936 .cra_priority = 4001,
937 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
938 .cra_blocksize = AES_BLOCK_SIZE,
939 .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
941 .cra_type = &crypto_aead_type,
942 .cra_module = THIS_MODULE,
943 .cra_init = qat_alg_sha512_init,
944 .cra_exit = qat_alg_exit,
947 .setkey = qat_alg_setkey,
948 .decrypt = qat_alg_dec,
949 .encrypt = qat_alg_enc,
950 .givencrypt = qat_alg_genivenc,
951 .ivsize = AES_BLOCK_SIZE,
952 .maxauthsize = SHA512_DIGEST_SIZE,
957 int qat_algs_register(void)
961 mutex_lock(&algs_lock);
962 if (++active_devs == 1) {
965 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
966 qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_AEAD |
968 ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
970 mutex_unlock(&algs_lock);
974 int qat_algs_unregister(void)
978 mutex_lock(&algs_lock);
979 if (--active_devs == 0)
980 ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
981 mutex_unlock(&algs_lock);
985 int qat_algs_init(void)
987 crypto_get_default_rng();
991 void qat_algs_exit(void)
993 crypto_put_default_rng();