1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
4 #include <crypto/internal/aead.h>
5 #include <crypto/authenc.h>
6 #include <crypto/scatterwalk.h>
7 #include <linux/dmapool.h>
8 #include <linux/dma-mapping.h>
10 #include "cc_buffer_mgr.h"
11 #include "cc_lli_defs.h"
12 #include "cc_cipher.h"
16 enum dma_buffer_type {
22 struct buff_mgr_handle {
23 struct dma_pool *mlli_buffs_pool;
26 union buffer_array_entry {
27 struct scatterlist *sgl;
28 dma_addr_t buffer_dma;
32 unsigned int num_of_buffers;
33 union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
34 unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
35 int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
36 int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
37 enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
38 bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
39 u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
42 static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
57 * cc_copy_mac() - Copy MAC to temporary location
60 * @req: aead request object
61 * @dir: [IN] copy from/to sgl
63 static void cc_copy_mac(struct device *dev, struct aead_request *req,
64 enum cc_sg_cpy_direct dir)
66 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
67 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
68 u32 skip = areq_ctx->assoclen + req->cryptlen;
70 if (areq_ctx->is_gcm4543)
71 skip += crypto_aead_ivsize(tfm);
73 cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
74 (skip - areq_ctx->req_authsize), skip, dir);
78 * cc_get_sgl_nents() - Get scatterlist number of entries.
81 * @nbytes: [IN] Total SGL data bytes.
82 * @lbytes: [OUT] Returns the amount of bytes at the last entry
84 static unsigned int cc_get_sgl_nents(struct device *dev,
85 struct scatterlist *sg_list,
86 unsigned int nbytes, u32 *lbytes)
88 unsigned int nents = 0;
90 while (nbytes && sg_list) {
92 /* get the number of bytes in the last entry */
94 nbytes -= (sg_list->length > nbytes) ?
95 nbytes : sg_list->length;
96 sg_list = sg_next(sg_list);
98 dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
103 * cc_zero_sgl() - Zero scatter scatter list data.
107 void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
109 struct scatterlist *current_sg = sgl;
112 while (sg_index <= data_len) {
114 /* reached the end of the sgl --> just return back */
117 memset(sg_virt(current_sg), 0, current_sg->length);
118 sg_index += current_sg->length;
119 current_sg = sg_next(current_sg);
124 * cc_copy_sg_portion() - Copy scatter list data,
125 * from to_skip to end, to dest and vice versa
133 void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
134 u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
138 nents = sg_nents_for_len(sg, end);
139 sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
140 (direct == CC_SG_TO_BUF));
143 static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
144 u32 buff_size, u32 *curr_nents,
147 u32 *mlli_entry_p = *mlli_entry_pp;
150 /* Verify there is no memory overflow*/
151 new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
152 if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) {
153 dev_err(dev, "Too many mlli entries. current %d max %d\n",
154 new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES);
158 /*handle buffer longer than 64 kbytes */
159 while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
160 cc_lli_set_addr(mlli_entry_p, buff_dma);
161 cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
162 dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
163 *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
164 mlli_entry_p[LLI_WORD1_OFFSET]);
165 buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
166 buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
167 mlli_entry_p = mlli_entry_p + 2;
171 cc_lli_set_addr(mlli_entry_p, buff_dma);
172 cc_lli_set_size(mlli_entry_p, buff_size);
173 dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
174 *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
175 mlli_entry_p[LLI_WORD1_OFFSET]);
176 mlli_entry_p = mlli_entry_p + 2;
177 *mlli_entry_pp = mlli_entry_p;
182 static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
183 u32 sgl_data_len, u32 sgl_offset,
184 u32 *curr_nents, u32 **mlli_entry_pp)
186 struct scatterlist *curr_sgl = sgl;
187 u32 *mlli_entry_p = *mlli_entry_pp;
190 for ( ; (curr_sgl && sgl_data_len);
191 curr_sgl = sg_next(curr_sgl)) {
193 (sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
194 sg_dma_len(curr_sgl) - sgl_offset :
196 sgl_data_len -= entry_data_len;
197 rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
198 sgl_offset, entry_data_len,
199 curr_nents, &mlli_entry_p);
205 *mlli_entry_pp = mlli_entry_p;
209 static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
210 struct mlli_params *mlli_params, gfp_t flags)
213 u32 total_nents = 0, prev_total_nents = 0;
216 dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
218 /* Allocate memory from the pointed pool */
219 mlli_params->mlli_virt_addr =
220 dma_pool_alloc(mlli_params->curr_pool, flags,
221 &mlli_params->mlli_dma_addr);
222 if (!mlli_params->mlli_virt_addr) {
223 dev_err(dev, "dma_pool_alloc() failed\n");
225 goto build_mlli_exit;
227 /* Point to start of MLLI */
228 mlli_p = (u32 *)mlli_params->mlli_virt_addr;
229 /* go over all SG's and link it to one MLLI table */
230 for (i = 0; i < sg_data->num_of_buffers; i++) {
231 union buffer_array_entry *entry = &sg_data->entry[i];
232 u32 tot_len = sg_data->total_data_len[i];
233 u32 offset = sg_data->offset[i];
235 if (sg_data->type[i] == DMA_SGL_TYPE)
236 rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len,
237 offset, &total_nents,
239 else /*DMA_BUFF_TYPE*/
240 rc = cc_render_buff_to_mlli(dev, entry->buffer_dma,
241 tot_len, &total_nents,
246 /* set last bit in the current table */
247 if (sg_data->mlli_nents[i]) {
248 /*Calculate the current MLLI table length for the
249 *length field in the descriptor
251 *sg_data->mlli_nents[i] +=
252 (total_nents - prev_total_nents);
253 prev_total_nents = total_nents;
257 /* Set MLLI size for the bypass operation */
258 mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
260 dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
261 mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
262 mlli_params->mlli_len);
268 static void cc_add_buffer_entry(struct device *dev,
269 struct buffer_array *sgl_data,
270 dma_addr_t buffer_dma, unsigned int buffer_len,
271 bool is_last_entry, u32 *mlli_nents)
273 unsigned int index = sgl_data->num_of_buffers;
275 dev_dbg(dev, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n",
276 index, &buffer_dma, buffer_len, is_last_entry);
277 sgl_data->nents[index] = 1;
278 sgl_data->entry[index].buffer_dma = buffer_dma;
279 sgl_data->offset[index] = 0;
280 sgl_data->total_data_len[index] = buffer_len;
281 sgl_data->type[index] = DMA_BUFF_TYPE;
282 sgl_data->is_last[index] = is_last_entry;
283 sgl_data->mlli_nents[index] = mlli_nents;
284 if (sgl_data->mlli_nents[index])
285 *sgl_data->mlli_nents[index] = 0;
286 sgl_data->num_of_buffers++;
289 static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
290 unsigned int nents, struct scatterlist *sgl,
291 unsigned int data_len, unsigned int data_offset,
292 bool is_last_table, u32 *mlli_nents)
294 unsigned int index = sgl_data->num_of_buffers;
296 dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
297 index, nents, sgl, data_len, is_last_table);
298 sgl_data->nents[index] = nents;
299 sgl_data->entry[index].sgl = sgl;
300 sgl_data->offset[index] = data_offset;
301 sgl_data->total_data_len[index] = data_len;
302 sgl_data->type[index] = DMA_SGL_TYPE;
303 sgl_data->is_last[index] = is_last_table;
304 sgl_data->mlli_nents[index] = mlli_nents;
305 if (sgl_data->mlli_nents[index])
306 *sgl_data->mlli_nents[index] = 0;
307 sgl_data->num_of_buffers++;
310 static int cc_map_sg(struct device *dev, struct scatterlist *sg,
311 unsigned int nbytes, int direction, u32 *nents,
312 u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
314 if (sg_is_last(sg)) {
315 /* One entry only case -set to DLLI */
316 if (dma_map_sg(dev, sg, 1, direction) != 1) {
317 dev_err(dev, "dma_map_sg() single buffer failed\n");
320 dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
321 &sg_dma_address(sg), sg_page(sg), sg_virt(sg),
322 sg->offset, sg->length);
326 } else { /*sg_is_last*/
327 *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
328 if (*nents > max_sg_nents) {
330 dev_err(dev, "Too many fragments. current %d max %d\n",
331 *nents, max_sg_nents);
334 /* In case of mmu the number of mapped nents might
335 * be changed from the original sgl nents
337 *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
338 if (*mapped_nents == 0) {
340 dev_err(dev, "dma_map_sg() sg buffer failed\n");
349 cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
350 u8 *config_data, struct buffer_array *sg_data,
351 unsigned int assoclen)
353 dev_dbg(dev, " handle additional data config set to DLLI\n");
354 /* create sg for the current buffer */
355 sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
356 AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
357 if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
358 dev_err(dev, "dma_map_sg() config buffer failed\n");
361 dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
362 &sg_dma_address(&areq_ctx->ccm_adata_sg),
363 sg_page(&areq_ctx->ccm_adata_sg),
364 sg_virt(&areq_ctx->ccm_adata_sg),
365 areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
366 /* prepare for case of MLLI */
368 cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
369 (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
375 static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
376 u8 *curr_buff, u32 curr_buff_cnt,
377 struct buffer_array *sg_data)
379 dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt);
380 /* create sg for the current buffer */
381 sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
382 if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
383 dev_err(dev, "dma_map_sg() src buffer failed\n");
386 dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
387 &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
388 sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
389 areq_ctx->buff_sg->length);
390 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
391 areq_ctx->curr_sg = areq_ctx->buff_sg;
392 areq_ctx->in_nents = 0;
393 /* prepare for case of MLLI */
394 cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
399 void cc_unmap_cipher_request(struct device *dev, void *ctx,
400 unsigned int ivsize, struct scatterlist *src,
401 struct scatterlist *dst)
403 struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
405 if (req_ctx->gen_ctx.iv_dma_addr) {
406 dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
407 &req_ctx->gen_ctx.iv_dma_addr, ivsize);
408 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
409 ivsize, DMA_BIDIRECTIONAL);
412 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
413 req_ctx->mlli_params.mlli_virt_addr) {
414 dma_pool_free(req_ctx->mlli_params.curr_pool,
415 req_ctx->mlli_params.mlli_virt_addr,
416 req_ctx->mlli_params.mlli_dma_addr);
419 dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
420 dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
423 dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
424 dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
428 int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
429 unsigned int ivsize, unsigned int nbytes,
430 void *info, struct scatterlist *src,
431 struct scatterlist *dst, gfp_t flags)
433 struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
434 struct mlli_params *mlli_params = &req_ctx->mlli_params;
435 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
436 struct device *dev = drvdata_to_dev(drvdata);
437 struct buffer_array sg_data;
440 u32 mapped_nents = 0;
442 req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
443 mlli_params->curr_pool = NULL;
444 sg_data.num_of_buffers = 0;
448 dump_byte_array("iv", (u8 *)info, ivsize);
449 req_ctx->gen_ctx.iv_dma_addr =
450 dma_map_single(dev, (void *)info,
451 ivsize, DMA_BIDIRECTIONAL);
452 if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
453 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
457 dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
458 ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
460 req_ctx->gen_ctx.iv_dma_addr = 0;
463 /* Map the src SGL */
464 rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
465 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
468 if (mapped_nents > 1)
469 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
472 /* Handle inplace operation */
473 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
474 req_ctx->out_nents = 0;
475 cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
477 &req_ctx->in_mlli_nents);
481 rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
482 &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
483 &dummy, &mapped_nents);
486 if (mapped_nents > 1)
487 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
489 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
490 cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
492 &req_ctx->in_mlli_nents);
493 cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
495 &req_ctx->out_mlli_nents);
499 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
500 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
501 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
506 dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
507 cc_dma_buf_type(req_ctx->dma_buf_type));
512 cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
516 void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
518 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
519 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
520 struct cc_drvdata *drvdata = dev_get_drvdata(dev);
522 if (areq_ctx->mac_buf_dma_addr) {
523 dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
524 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
527 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
528 if (areq_ctx->hkey_dma_addr) {
529 dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
530 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
533 if (areq_ctx->gcm_block_len_dma_addr) {
534 dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
535 AES_BLOCK_SIZE, DMA_TO_DEVICE);
538 if (areq_ctx->gcm_iv_inc1_dma_addr) {
539 dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
540 AES_BLOCK_SIZE, DMA_TO_DEVICE);
543 if (areq_ctx->gcm_iv_inc2_dma_addr) {
544 dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
545 AES_BLOCK_SIZE, DMA_TO_DEVICE);
549 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
550 if (areq_ctx->ccm_iv0_dma_addr) {
551 dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
552 AES_BLOCK_SIZE, DMA_TO_DEVICE);
555 dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
557 if (areq_ctx->gen_ctx.iv_dma_addr) {
558 dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
559 hw_iv_size, DMA_BIDIRECTIONAL);
560 kzfree(areq_ctx->gen_ctx.iv);
564 if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
565 areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
566 (areq_ctx->mlli_params.mlli_virt_addr)) {
567 dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
568 &areq_ctx->mlli_params.mlli_dma_addr,
569 areq_ctx->mlli_params.mlli_virt_addr);
570 dma_pool_free(areq_ctx->mlli_params.curr_pool,
571 areq_ctx->mlli_params.mlli_virt_addr,
572 areq_ctx->mlli_params.mlli_dma_addr);
575 dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
576 sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
577 areq_ctx->assoclen, req->cryptlen);
579 dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL);
580 if (req->src != req->dst) {
581 dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
583 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
586 if (drvdata->coherent &&
587 areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
588 req->src == req->dst) {
589 /* copy back mac from temporary location to deal with possible
590 * data memory overriding that caused by cache coherence
593 cc_copy_mac(dev, req, CC_SG_FROM_BUF);
597 static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize,
598 u32 last_entry_data_size)
600 return ((sgl_nents > 1) && (last_entry_data_size < authsize));
603 static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
604 struct aead_request *req,
605 struct buffer_array *sg_data,
606 bool is_last, bool do_chain)
608 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
609 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
610 struct device *dev = drvdata_to_dev(drvdata);
611 gfp_t flags = cc_gfp_flags(&req->base);
615 areq_ctx->gen_ctx.iv_dma_addr = 0;
616 areq_ctx->gen_ctx.iv = NULL;
620 areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
621 if (!areq_ctx->gen_ctx.iv)
624 areq_ctx->gen_ctx.iv_dma_addr =
625 dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
627 if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
628 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
629 hw_iv_size, req->iv);
630 kzfree(areq_ctx->gen_ctx.iv);
631 areq_ctx->gen_ctx.iv = NULL;
636 dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
637 hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
638 // TODO: what about CTR?? ask Ron
639 if (do_chain && areq_ctx->plaintext_authenticate_only) {
640 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
641 unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
642 unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
643 /* Chain to given list */
644 cc_add_buffer_entry(dev, sg_data,
645 (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs),
646 iv_size_to_authenc, is_last,
647 &areq_ctx->assoc.mlli_nents);
648 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
655 static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
656 struct aead_request *req,
657 struct buffer_array *sg_data,
658 bool is_last, bool do_chain)
660 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
662 int mapped_nents = 0;
663 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
664 unsigned int size_of_assoc = areq_ctx->assoclen;
665 struct device *dev = drvdata_to_dev(drvdata);
667 if (areq_ctx->is_gcm4543)
668 size_of_assoc += crypto_aead_ivsize(tfm);
672 goto chain_assoc_exit;
675 if (areq_ctx->assoclen == 0) {
676 areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
677 areq_ctx->assoc.nents = 0;
678 areq_ctx->assoc.mlli_nents = 0;
679 dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
680 cc_dma_buf_type(areq_ctx->assoc_buff_type),
681 areq_ctx->assoc.nents);
682 goto chain_assoc_exit;
685 mapped_nents = sg_nents_for_len(req->src, size_of_assoc);
686 if (mapped_nents < 0)
689 if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
690 dev_err(dev, "Too many fragments. current %d max %d\n",
691 mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
694 areq_ctx->assoc.nents = mapped_nents;
696 /* in CCM case we have additional entry for
697 * ccm header configurations
699 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
700 if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
701 dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
702 (areq_ctx->assoc.nents + 1),
703 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
705 goto chain_assoc_exit;
709 if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
710 areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
712 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
714 if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
715 dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
716 cc_dma_buf_type(areq_ctx->assoc_buff_type),
717 areq_ctx->assoc.nents);
718 cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
719 areq_ctx->assoclen, 0, is_last,
720 &areq_ctx->assoc.mlli_nents);
721 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
728 static void cc_prepare_aead_data_dlli(struct aead_request *req,
729 u32 *src_last_bytes, u32 *dst_last_bytes)
731 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
732 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
733 unsigned int authsize = areq_ctx->req_authsize;
734 struct scatterlist *sg;
737 areq_ctx->is_icv_fragmented = false;
739 if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
740 sg = areq_ctx->src_sgl;
741 offset = *src_last_bytes - authsize;
743 sg = areq_ctx->dst_sgl;
744 offset = *dst_last_bytes - authsize;
747 areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset;
748 areq_ctx->icv_virt_addr = sg_virt(sg) + offset;
751 static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
752 struct aead_request *req,
753 struct buffer_array *sg_data,
754 u32 *src_last_bytes, u32 *dst_last_bytes,
757 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
758 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
759 unsigned int authsize = areq_ctx->req_authsize;
760 struct device *dev = drvdata_to_dev(drvdata);
761 struct scatterlist *sg;
763 if (req->src == req->dst) {
765 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
766 areq_ctx->src_sgl, areq_ctx->cryptlen,
767 areq_ctx->src_offset, is_last_table,
768 &areq_ctx->src.mlli_nents);
770 areq_ctx->is_icv_fragmented =
771 cc_is_icv_frag(areq_ctx->src.nents, authsize,
774 if (areq_ctx->is_icv_fragmented) {
775 /* Backup happens only when ICV is fragmented, ICV
776 * verification is made by CPU compare in order to
777 * simplify MAC verification upon request completion
779 if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
780 /* In coherent platforms (e.g. ACP)
781 * already copying ICV for any
782 * INPLACE-DECRYPT operation, hence
783 * we must neglect this code.
785 if (!drvdata->coherent)
786 cc_copy_mac(dev, req, CC_SG_TO_BUF);
788 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
790 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
791 areq_ctx->icv_dma_addr =
792 areq_ctx->mac_buf_dma_addr;
794 } else { /* Contig. ICV */
795 sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
796 /*Should hanlde if the sg is not contig.*/
797 areq_ctx->icv_dma_addr = sg_dma_address(sg) +
798 (*src_last_bytes - authsize);
799 areq_ctx->icv_virt_addr = sg_virt(sg) +
800 (*src_last_bytes - authsize);
803 } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
804 /*NON-INPLACE and DECRYPT*/
805 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
806 areq_ctx->src_sgl, areq_ctx->cryptlen,
807 areq_ctx->src_offset, is_last_table,
808 &areq_ctx->src.mlli_nents);
809 cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
810 areq_ctx->dst_sgl, areq_ctx->cryptlen,
811 areq_ctx->dst_offset, is_last_table,
812 &areq_ctx->dst.mlli_nents);
814 areq_ctx->is_icv_fragmented =
815 cc_is_icv_frag(areq_ctx->src.nents, authsize,
817 /* Backup happens only when ICV is fragmented, ICV
819 * verification is made by CPU compare in order to simplify
820 * MAC verification upon request completion
822 if (areq_ctx->is_icv_fragmented) {
823 cc_copy_mac(dev, req, CC_SG_TO_BUF);
824 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
826 } else { /* Contig. ICV */
827 sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
828 /*Should hanlde if the sg is not contig.*/
829 areq_ctx->icv_dma_addr = sg_dma_address(sg) +
830 (*src_last_bytes - authsize);
831 areq_ctx->icv_virt_addr = sg_virt(sg) +
832 (*src_last_bytes - authsize);
836 /*NON-INPLACE and ENCRYPT*/
837 cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
838 areq_ctx->dst_sgl, areq_ctx->cryptlen,
839 areq_ctx->dst_offset, is_last_table,
840 &areq_ctx->dst.mlli_nents);
841 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
842 areq_ctx->src_sgl, areq_ctx->cryptlen,
843 areq_ctx->src_offset, is_last_table,
844 &areq_ctx->src.mlli_nents);
846 areq_ctx->is_icv_fragmented =
847 cc_is_icv_frag(areq_ctx->dst.nents, authsize,
850 if (!areq_ctx->is_icv_fragmented) {
851 sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
853 areq_ctx->icv_dma_addr = sg_dma_address(sg) +
854 (*dst_last_bytes - authsize);
855 areq_ctx->icv_virt_addr = sg_virt(sg) +
856 (*dst_last_bytes - authsize);
858 areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
859 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
864 static int cc_aead_chain_data(struct cc_drvdata *drvdata,
865 struct aead_request *req,
866 struct buffer_array *sg_data,
867 bool is_last_table, bool do_chain)
869 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
870 struct device *dev = drvdata_to_dev(drvdata);
871 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
872 unsigned int authsize = areq_ctx->req_authsize;
873 unsigned int src_last_bytes = 0, dst_last_bytes = 0;
875 u32 src_mapped_nents = 0, dst_mapped_nents = 0;
877 /* non-inplace mode */
878 unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen;
879 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
881 bool is_gcm4543 = areq_ctx->is_gcm4543;
882 u32 size_to_skip = areq_ctx->assoclen;
883 struct scatterlist *sgl;
886 size_to_skip += crypto_aead_ivsize(tfm);
888 offset = size_to_skip;
893 areq_ctx->src_sgl = req->src;
894 areq_ctx->dst_sgl = req->dst;
897 size_for_map += crypto_aead_ivsize(tfm);
899 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
901 src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
903 sg_index = areq_ctx->src_sgl->length;
904 //check where the data starts
905 while (sg_index <= size_to_skip) {
907 offset -= areq_ctx->src_sgl->length;
908 sgl = sg_next(areq_ctx->src_sgl);
911 areq_ctx->src_sgl = sgl;
912 sg_index += areq_ctx->src_sgl->length;
914 if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
915 dev_err(dev, "Too many fragments. current %d max %d\n",
916 src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
920 areq_ctx->src.nents = src_mapped_nents;
922 areq_ctx->src_offset = offset;
924 if (req->src != req->dst) {
925 size_for_map = areq_ctx->assoclen + req->cryptlen;
926 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
929 size_for_map += crypto_aead_ivsize(tfm);
931 rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
932 &areq_ctx->dst.nents,
933 LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
936 goto chain_data_exit;
939 dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
941 sg_index = areq_ctx->dst_sgl->length;
942 offset = size_to_skip;
944 //check where the data starts
945 while (sg_index <= size_to_skip) {
947 offset -= areq_ctx->dst_sgl->length;
948 sgl = sg_next(areq_ctx->dst_sgl);
951 areq_ctx->dst_sgl = sgl;
952 sg_index += areq_ctx->dst_sgl->length;
954 if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
955 dev_err(dev, "Too many fragments. current %d max %d\n",
956 dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
959 areq_ctx->dst.nents = dst_mapped_nents;
960 areq_ctx->dst_offset = offset;
961 if (src_mapped_nents > 1 ||
962 dst_mapped_nents > 1 ||
964 areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
965 cc_prepare_aead_data_mlli(drvdata, req, sg_data,
966 &src_last_bytes, &dst_last_bytes,
969 areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
970 cc_prepare_aead_data_dlli(req, &src_last_bytes,
978 static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
979 struct aead_request *req)
981 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
982 u32 curr_mlli_size = 0;
984 if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
985 areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
986 curr_mlli_size = areq_ctx->assoc.mlli_nents *
990 if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
991 /*Inplace case dst nents equal to src nents*/
992 if (req->src == req->dst) {
993 areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
994 areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
996 areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
997 if (!areq_ctx->is_single_pass)
998 areq_ctx->assoc.mlli_nents +=
999 areq_ctx->src.mlli_nents;
1001 if (areq_ctx->gen_ctx.op_type ==
1002 DRV_CRYPTO_DIRECTION_DECRYPT) {
1003 areq_ctx->src.sram_addr =
1004 drvdata->mlli_sram_addr +
1006 areq_ctx->dst.sram_addr =
1007 areq_ctx->src.sram_addr +
1008 areq_ctx->src.mlli_nents *
1009 LLI_ENTRY_BYTE_SIZE;
1010 if (!areq_ctx->is_single_pass)
1011 areq_ctx->assoc.mlli_nents +=
1012 areq_ctx->src.mlli_nents;
1014 areq_ctx->dst.sram_addr =
1015 drvdata->mlli_sram_addr +
1017 areq_ctx->src.sram_addr =
1018 areq_ctx->dst.sram_addr +
1019 areq_ctx->dst.mlli_nents *
1020 LLI_ENTRY_BYTE_SIZE;
1021 if (!areq_ctx->is_single_pass)
1022 areq_ctx->assoc.mlli_nents +=
1023 areq_ctx->dst.mlli_nents;
1029 int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
1031 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1032 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1033 struct device *dev = drvdata_to_dev(drvdata);
1034 struct buffer_array sg_data;
1035 unsigned int authsize = areq_ctx->req_authsize;
1036 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1038 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1039 bool is_gcm4543 = areq_ctx->is_gcm4543;
1040 dma_addr_t dma_addr;
1041 u32 mapped_nents = 0;
1042 u32 dummy = 0; /*used for the assoc data fragments */
1043 u32 size_to_map = 0;
1044 gfp_t flags = cc_gfp_flags(&req->base);
1046 mlli_params->curr_pool = NULL;
1047 sg_data.num_of_buffers = 0;
1049 /* copy mac to a temporary location to deal with possible
1050 * data memory overriding that caused by cache coherence problem.
1052 if (drvdata->coherent &&
1053 areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
1054 req->src == req->dst)
1055 cc_copy_mac(dev, req, CC_SG_TO_BUF);
1057 /* cacluate the size for cipher remove ICV in decrypt*/
1058 areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
1059 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1061 (req->cryptlen - authsize);
1063 dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
1065 if (dma_mapping_error(dev, dma_addr)) {
1066 dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
1067 MAX_MAC_SIZE, areq_ctx->mac_buf);
1069 goto aead_map_failure;
1071 areq_ctx->mac_buf_dma_addr = dma_addr;
1073 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
1074 void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1076 dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
1079 if (dma_mapping_error(dev, dma_addr)) {
1080 dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
1081 AES_BLOCK_SIZE, addr);
1082 areq_ctx->ccm_iv0_dma_addr = 0;
1084 goto aead_map_failure;
1086 areq_ctx->ccm_iv0_dma_addr = dma_addr;
1088 rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
1089 &sg_data, areq_ctx->assoclen);
1091 goto aead_map_failure;
1094 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
1095 dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
1097 if (dma_mapping_error(dev, dma_addr)) {
1098 dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
1099 AES_BLOCK_SIZE, areq_ctx->hkey);
1101 goto aead_map_failure;
1103 areq_ctx->hkey_dma_addr = dma_addr;
1105 dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
1106 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1107 if (dma_mapping_error(dev, dma_addr)) {
1108 dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1109 AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
1111 goto aead_map_failure;
1113 areq_ctx->gcm_block_len_dma_addr = dma_addr;
1115 dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
1116 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1118 if (dma_mapping_error(dev, dma_addr)) {
1119 dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
1120 AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
1121 areq_ctx->gcm_iv_inc1_dma_addr = 0;
1123 goto aead_map_failure;
1125 areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
1127 dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
1128 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1130 if (dma_mapping_error(dev, dma_addr)) {
1131 dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
1132 AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
1133 areq_ctx->gcm_iv_inc2_dma_addr = 0;
1135 goto aead_map_failure;
1137 areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
1140 size_to_map = req->cryptlen + areq_ctx->assoclen;
1141 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
1142 size_to_map += authsize;
1145 size_to_map += crypto_aead_ivsize(tfm);
1146 rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
1147 &areq_ctx->src.nents,
1148 (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
1149 LLI_MAX_NUM_OF_DATA_ENTRIES),
1150 &dummy, &mapped_nents);
1152 goto aead_map_failure;
1154 if (areq_ctx->is_single_pass) {
1156 * Create MLLI table for:
1159 * Note: IV is contg. buffer (not an SGL)
1161 rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
1163 goto aead_map_failure;
1164 rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
1166 goto aead_map_failure;
1167 rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
1169 goto aead_map_failure;
1170 } else { /* DOUBLE-PASS flow */
1172 * Prepare MLLI table(s) in this order:
1174 * If ENCRYPT/DECRYPT (inplace):
1175 * (1) MLLI table for assoc
1176 * (2) IV entry (chained right after end of assoc)
1177 * (3) MLLI for src/dst (inplace operation)
1179 * If ENCRYPT (non-inplace)
1180 * (1) MLLI table for assoc
1181 * (2) IV entry (chained right after end of assoc)
1185 * If DECRYPT (non-inplace)
1186 * (1) MLLI table for assoc
1187 * (2) IV entry (chained right after end of assoc)
1191 rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
1193 goto aead_map_failure;
1194 rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
1196 goto aead_map_failure;
1197 rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
1199 goto aead_map_failure;
1202 /* Mlli support -start building the MLLI according to the above
1205 if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1206 areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
1207 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1208 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1210 goto aead_map_failure;
1212 cc_update_aead_mlli_nents(drvdata, req);
1213 dev_dbg(dev, "assoc params mn %d\n",
1214 areq_ctx->assoc.mlli_nents);
1215 dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
1216 dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
1221 cc_unmap_aead_request(dev, req);
1225 int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
1226 struct scatterlist *src, unsigned int nbytes,
1227 bool do_update, gfp_t flags)
1229 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1230 struct device *dev = drvdata_to_dev(drvdata);
1231 u8 *curr_buff = cc_hash_buf(areq_ctx);
1232 u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1233 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1234 struct buffer_array sg_data;
1235 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1238 u32 mapped_nents = 0;
1240 dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
1241 curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1242 /* Init the type of the dma buffer */
1243 areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1244 mlli_params->curr_pool = NULL;
1245 sg_data.num_of_buffers = 0;
1246 areq_ctx->in_nents = 0;
1248 if (nbytes == 0 && *curr_buff_cnt == 0) {
1253 /*TODO: copy data in case that buffer is enough for operation */
1254 /* map the previous buffer */
1255 if (*curr_buff_cnt) {
1256 rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1262 if (src && nbytes > 0 && do_update) {
1263 rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
1264 &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
1265 &dummy, &mapped_nents);
1267 goto unmap_curr_buff;
1268 if (src && mapped_nents == 1 &&
1269 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1270 memcpy(areq_ctx->buff_sg, src,
1271 sizeof(struct scatterlist));
1272 areq_ctx->buff_sg->length = nbytes;
1273 areq_ctx->curr_sg = areq_ctx->buff_sg;
1274 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1276 areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1281 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1282 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1283 /* add the src data to the sg_data */
1284 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
1285 0, true, &areq_ctx->mlli_nents);
1286 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1288 goto fail_unmap_din;
1290 /* change the buffer index for the unmap function */
1291 areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
1292 dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
1293 cc_dma_buf_type(areq_ctx->data_dma_buf_type));
1297 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1301 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1306 int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
1307 struct scatterlist *src, unsigned int nbytes,
1308 unsigned int block_size, gfp_t flags)
1310 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1311 struct device *dev = drvdata_to_dev(drvdata);
1312 u8 *curr_buff = cc_hash_buf(areq_ctx);
1313 u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1314 u8 *next_buff = cc_next_buf(areq_ctx);
1315 u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
1316 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1317 unsigned int update_data_len;
1318 u32 total_in_len = nbytes + *curr_buff_cnt;
1319 struct buffer_array sg_data;
1320 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1321 unsigned int swap_index = 0;
1324 u32 mapped_nents = 0;
1326 dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
1327 curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1328 /* Init the type of the dma buffer */
1329 areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1330 mlli_params->curr_pool = NULL;
1331 areq_ctx->curr_sg = NULL;
1332 sg_data.num_of_buffers = 0;
1333 areq_ctx->in_nents = 0;
1335 if (total_in_len < block_size) {
1336 dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
1337 curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
1338 areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
1339 sg_copy_to_buffer(src, areq_ctx->in_nents,
1340 &curr_buff[*curr_buff_cnt], nbytes);
1341 *curr_buff_cnt += nbytes;
1345 /* Calculate the residue size*/
1346 *next_buff_cnt = total_in_len & (block_size - 1);
1347 /* update data len */
1348 update_data_len = total_in_len - *next_buff_cnt;
1350 dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
1351 *next_buff_cnt, update_data_len);
1353 /* Copy the new residue to next buffer */
1354 if (*next_buff_cnt) {
1355 dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
1356 next_buff, (update_data_len - *curr_buff_cnt),
1358 cc_copy_sg_portion(dev, next_buff, src,
1359 (update_data_len - *curr_buff_cnt),
1360 nbytes, CC_SG_TO_BUF);
1361 /* change the buffer index for next operation */
1365 if (*curr_buff_cnt) {
1366 rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1370 /* change the buffer index for next operation */
1374 if (update_data_len > *curr_buff_cnt) {
1375 rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
1376 DMA_TO_DEVICE, &areq_ctx->in_nents,
1377 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
1380 goto unmap_curr_buff;
1381 if (mapped_nents == 1 &&
1382 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1383 /* only one entry in the SG and no previous data */
1384 memcpy(areq_ctx->buff_sg, src,
1385 sizeof(struct scatterlist));
1386 areq_ctx->buff_sg->length = update_data_len;
1387 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1388 areq_ctx->curr_sg = areq_ctx->buff_sg;
1390 areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1394 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1395 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1396 /* add the src data to the sg_data */
1397 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
1398 (update_data_len - *curr_buff_cnt), 0, true,
1399 &areq_ctx->mlli_nents);
1400 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1402 goto fail_unmap_din;
1404 areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
1409 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1413 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1418 void cc_unmap_hash_request(struct device *dev, void *ctx,
1419 struct scatterlist *src, bool do_revert)
1421 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1422 u32 *prev_len = cc_next_buf_cnt(areq_ctx);
1424 /*In case a pool was set, a table was
1425 *allocated and should be released
1427 if (areq_ctx->mlli_params.curr_pool) {
1428 dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
1429 &areq_ctx->mlli_params.mlli_dma_addr,
1430 areq_ctx->mlli_params.mlli_virt_addr);
1431 dma_pool_free(areq_ctx->mlli_params.curr_pool,
1432 areq_ctx->mlli_params.mlli_virt_addr,
1433 areq_ctx->mlli_params.mlli_dma_addr);
1436 if (src && areq_ctx->in_nents) {
1437 dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
1438 sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
1439 dma_unmap_sg(dev, src,
1440 areq_ctx->in_nents, DMA_TO_DEVICE);
1444 dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
1445 sg_virt(areq_ctx->buff_sg),
1446 &sg_dma_address(areq_ctx->buff_sg),
1447 sg_dma_len(areq_ctx->buff_sg));
1448 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1450 /* clean the previous data length for update
1455 areq_ctx->buff_index ^= 1;
1460 int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
1462 struct buff_mgr_handle *buff_mgr_handle;
1463 struct device *dev = drvdata_to_dev(drvdata);
1465 buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL);
1466 if (!buff_mgr_handle)
1469 drvdata->buff_mgr_handle = buff_mgr_handle;
1471 buff_mgr_handle->mlli_buffs_pool =
1472 dma_pool_create("dx_single_mlli_tables", dev,
1473 MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
1474 LLI_ENTRY_BYTE_SIZE,
1475 MLLI_TABLE_MIN_ALIGNMENT, 0);
1477 if (!buff_mgr_handle->mlli_buffs_pool)
1483 cc_buffer_mgr_fini(drvdata);
1487 int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
1489 struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
1491 if (buff_mgr_handle) {
1492 dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
1493 kfree(drvdata->buff_mgr_handle);
1494 drvdata->buff_mgr_handle = NULL;