2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/slab.h>
49 #include "adf_accel_devices.h"
50 #include "adf_common_drv.h"
51 #include "icp_qat_hal.h"
52 #include "icp_qat_uclo.h"
54 #define BAD_REGADDR 0xffff
55 #define MAX_RETRY_TIMES 10000
56 #define INIT_CTX_ARB_VALUE 0x0
57 #define INIT_CTX_ENABLE_VALUE 0x0
58 #define INIT_PC_VALUE 0x0
59 #define INIT_WAKEUP_EVENTS_VALUE 0x1
60 #define INIT_SIG_EVENTS_VALUE 0x1
61 #define INIT_CCENABLE_VALUE 0x2000
62 #define RST_CSR_QAT_LSB 20
63 #define RST_CSR_AE_LSB 0
64 #define MC_TIMESTAMP_ENABLE (0x1 << 7)
66 #define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \
67 (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
68 (~(1 << CE_REG_PAR_ERR_BITPOS)))
69 #define INSERT_IMMED_GPRA_CONST(inst, const_val) \
70 (inst = ((inst & 0xFFFF00C03FFull) | \
71 ((((const_val) << 12) & 0x0FF00000ull) | \
72 (((const_val) << 10) & 0x0003FC00ull))))
73 #define INSERT_IMMED_GPRB_CONST(inst, const_val) \
74 (inst = ((inst & 0xFFFF00FFF00ull) | \
75 ((((const_val) << 12) & 0x0FF00000ull) | \
76 (((const_val) << 0) & 0x000000FFull))))
78 #define AE(handle, ae) handle->hal_handle->aes[ae]
80 static const uint64_t inst_4b[] = {
81 0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull,
82 0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
86 static const uint64_t inst[] = {
87 0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull,
88 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
89 0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull,
90 0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
91 0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull,
92 0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull,
93 0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull,
94 0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull,
95 0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull,
96 0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull,
97 0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull,
98 0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull,
99 0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull,
100 0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull,
101 0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull,
102 0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull,
103 0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull,
104 0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull,
105 0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull,
106 0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull,
107 0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull,
108 0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull,
111 void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
112 unsigned char ae, unsigned int ctx_mask)
114 AE(handle, ae).live_ctx_mask = ctx_mask;
117 #define CSR_RETRY_TIMES 500
118 static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
119 unsigned char ae, unsigned int csr,
122 unsigned int iterations = CSR_RETRY_TIMES;
125 *value = GET_AE_CSR(handle, ae, csr);
126 if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
128 } while (iterations--);
130 pr_err("QAT: Read CSR timeout\n");
134 static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
135 unsigned char ae, unsigned int csr,
138 unsigned int iterations = CSR_RETRY_TIMES;
141 SET_AE_CSR(handle, ae, csr, value);
142 if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
144 } while (iterations--);
146 pr_err("QAT: Write CSR Timeout\n");
150 static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
151 unsigned char ae, unsigned char ctx,
152 unsigned int *events)
154 unsigned int cur_ctx;
156 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
157 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
158 qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events);
159 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
162 static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
163 unsigned char ae, unsigned int cycles,
166 unsigned int base_cnt = 0, cur_cnt = 0;
167 unsigned int csr = (1 << ACS_ABO_BITPOS);
168 int times = MAX_RETRY_TIMES;
169 int elapsed_cycles = 0;
171 qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt);
173 while ((int)cycles > elapsed_cycles && times--) {
175 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr);
177 qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt);
179 elapsed_cycles = cur_cnt - base_cnt;
181 if (elapsed_cycles < 0)
182 elapsed_cycles += 0x10000;
184 /* ensure at least 8 time cycles elapsed in wait_cycles */
185 if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS)))
189 pr_err("QAT: wait_num_cycles time out\n");
195 #define CLR_BIT(wrd, bit) (wrd & ~(1 << bit))
196 #define SET_BIT(wrd, bit) (wrd | 1 << bit)
198 int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
199 unsigned char ae, unsigned char mode)
201 unsigned int csr, new_csr;
203 if ((mode != 4) && (mode != 8)) {
204 pr_err("QAT: bad ctx mode=%d\n", mode);
208 /* Sets the accelaration engine context mode to either four or eight */
209 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
210 csr = IGNORE_W1C_MASK & csr;
211 new_csr = (mode == 4) ?
212 SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
213 CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS);
214 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
218 int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
219 unsigned char ae, unsigned char mode)
221 unsigned int csr, new_csr;
223 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
224 csr &= IGNORE_W1C_MASK;
227 SET_BIT(csr, CE_NN_MODE_BITPOS) :
228 CLR_BIT(csr, CE_NN_MODE_BITPOS);
231 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
236 int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
237 unsigned char ae, enum icp_qat_uof_regtype lm_type,
240 unsigned int csr, new_csr;
242 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
243 csr &= IGNORE_W1C_MASK;
247 SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) :
248 CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS);
252 SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) :
253 CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS);
256 pr_err("QAT: lmType = 0x%x\n", lm_type);
261 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
265 static unsigned short qat_hal_get_reg_addr(unsigned int type,
266 unsigned short reg_num)
268 unsigned short reg_addr;
273 reg_addr = 0x80 | (reg_num & 0x7f);
277 reg_addr = reg_num & 0x1f;
282 reg_addr = 0x180 | (reg_num & 0x1f);
285 reg_addr = 0x140 | ((reg_num & 0x3) << 1);
290 reg_addr = 0x1c0 | (reg_num & 0x1f);
293 reg_addr = 0x100 | ((reg_num & 0x3) << 1);
296 reg_addr = 0x280 | (reg_num & 0x1f);
305 reg_addr = 0x300 | (reg_num & 0xff);
308 reg_addr = BAD_REGADDR;
314 void qat_hal_reset(struct icp_qat_fw_loader_handle *handle)
316 unsigned int ae_reset_csr;
318 ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET);
319 ae_reset_csr |= handle->hal_handle->ae_mask << RST_CSR_AE_LSB;
320 ae_reset_csr |= handle->hal_handle->slice_mask << RST_CSR_QAT_LSB;
321 SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr);
324 static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
325 unsigned char ae, unsigned int ctx_mask,
326 unsigned int ae_csr, unsigned int csr_val)
328 unsigned int ctx, cur_ctx;
330 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
332 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
333 if (!(ctx_mask & (1 << ctx)))
335 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
336 qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val);
339 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
342 static void qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
343 unsigned char ae, unsigned char ctx,
344 unsigned int ae_csr, unsigned int *csr_val)
346 unsigned int cur_ctx;
348 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
349 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
350 qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val);
351 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
354 static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
355 unsigned char ae, unsigned int ctx_mask,
358 unsigned int ctx, cur_ctx;
360 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
361 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
362 if (!(ctx_mask & (1 << ctx)))
364 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
365 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events);
367 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
370 static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
371 unsigned char ae, unsigned int ctx_mask,
374 unsigned int ctx, cur_ctx;
376 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
377 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
378 if (!(ctx_mask & (1 << ctx)))
380 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
381 qat_hal_wr_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT,
384 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
387 static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
389 unsigned int base_cnt, cur_cnt;
391 unsigned int times = MAX_RETRY_TIMES;
393 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
394 if (!(handle->hal_handle->ae_mask & (1 << ae)))
397 qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
398 (unsigned int *)&base_cnt);
402 qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
403 (unsigned int *)&cur_cnt);
405 } while (times-- && (cur_cnt == base_cnt));
408 pr_err("QAT: AE%d is inactive!!\n", ae);
416 static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
418 unsigned int misc_ctl;
421 /* stop the timestamp timers */
422 misc_ctl = GET_GLB_CSR(handle, MISC_CONTROL);
423 if (misc_ctl & MC_TIMESTAMP_ENABLE)
424 SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl &
425 (~MC_TIMESTAMP_ENABLE));
427 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
428 if (!(handle->hal_handle->ae_mask & (1 << ae)))
430 qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0);
431 qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0);
433 /* start timestamp timers */
434 SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl | MC_TIMESTAMP_ENABLE);
437 #define ESRAM_AUTO_TINIT (1<<2)
438 #define ESRAM_AUTO_TINIT_DONE (1<<3)
439 #define ESRAM_AUTO_INIT_USED_CYCLES (1640)
440 #define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C
441 static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
443 void __iomem *csr_addr = handle->hal_ep_csr_addr_v +
444 ESRAM_AUTO_INIT_CSR_OFFSET;
445 unsigned int csr_val, times = 30;
447 csr_val = ADF_CSR_RD(csr_addr, 0);
448 if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE))
451 csr_val = ADF_CSR_RD(csr_addr, 0);
452 csr_val |= ESRAM_AUTO_TINIT;
453 ADF_CSR_WR(csr_addr, 0, csr_val);
456 qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0);
457 csr_val = ADF_CSR_RD(csr_addr, 0);
458 } while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--);
460 pr_err("QAT: Fail to init eSram!\n");
466 #define SHRAM_INIT_CYCLES 2060
467 int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle)
469 unsigned int ae_reset_csr;
471 unsigned int clk_csr;
472 unsigned int times = 100;
475 /* write to the reset csr */
476 ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET);
477 ae_reset_csr &= ~(handle->hal_handle->ae_mask << RST_CSR_AE_LSB);
478 ae_reset_csr &= ~(handle->hal_handle->slice_mask << RST_CSR_QAT_LSB);
480 SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr);
483 csr = GET_GLB_CSR(handle, ICP_RESET);
484 } while ((handle->hal_handle->ae_mask |
485 (handle->hal_handle->slice_mask << RST_CSR_QAT_LSB)) & csr);
487 clk_csr = GET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE);
488 clk_csr |= handle->hal_handle->ae_mask << 0;
489 clk_csr |= handle->hal_handle->slice_mask << 20;
490 SET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE, clk_csr);
491 if (qat_hal_check_ae_alive(handle))
494 /* Set undefined power-up/reset states to reasonable default values */
495 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
496 if (!(handle->hal_handle->ae_mask & (1 << ae)))
498 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
499 INIT_CTX_ENABLE_VALUE);
500 qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX,
502 handle->hal_handle->upc_mask &
504 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
505 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
506 qat_hal_put_wakeup_event(handle, ae,
507 ICP_QAT_UCLO_AE_ALL_CTX,
508 INIT_WAKEUP_EVENTS_VALUE);
509 qat_hal_put_sig_event(handle, ae,
510 ICP_QAT_UCLO_AE_ALL_CTX,
511 INIT_SIG_EVENTS_VALUE);
513 if (qat_hal_init_esram(handle))
515 if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0))
517 qat_hal_reset_timestamp(handle);
521 pr_err("QAT: failed to get device out of reset\n");
525 static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
526 unsigned char ae, unsigned int ctx_mask)
530 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
531 ctx &= IGNORE_W1C_MASK &
532 (~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
533 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
536 static uint64_t qat_hal_parity_64bit(uint64_t word)
547 static uint64_t qat_hal_set_uword_ecc(uint64_t uword)
549 uint64_t bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL,
550 bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL,
551 bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL,
552 bit6_mask = 0xdaf69a46910ULL;
554 /* clear the ecc bits */
555 uword &= ~(0x7fULL << 0x2C);
556 uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C;
557 uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D;
558 uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E;
559 uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F;
560 uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30;
561 uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31;
562 uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32;
566 void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
567 unsigned char ae, unsigned int uaddr,
568 unsigned int words_num, uint64_t *uword)
570 unsigned int ustore_addr;
573 qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
575 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
576 for (i = 0; i < words_num; i++) {
577 unsigned int uwrd_lo, uwrd_hi;
580 tmp = qat_hal_set_uword_ecc(uword[i]);
581 uwrd_lo = (unsigned int)(tmp & 0xffffffff);
582 uwrd_hi = (unsigned int)(tmp >> 0x20);
583 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
584 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
586 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
589 static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
590 unsigned char ae, unsigned int ctx_mask)
594 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
595 ctx &= IGNORE_W1C_MASK;
596 ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
597 ctx |= (ctx_mask << CE_ENABLE_BITPOS);
598 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
601 static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
604 unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX;
605 int times = MAX_RETRY_TIMES;
606 unsigned int csr_val = 0;
608 unsigned int savctx = 0;
611 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
612 if (!(handle->hal_handle->ae_mask & (1 << ae)))
614 for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) {
615 qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS,
617 qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS,
620 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
621 csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
622 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
623 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val);
624 csr_val &= IGNORE_W1C_MASK;
625 csr_val |= CE_NN_MODE;
626 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
627 qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst),
629 qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
630 handle->hal_handle->upc_mask &
632 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
633 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
634 qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
635 qat_hal_wr_indr_csr(handle, ae, ctx_mask,
636 CTX_SIG_EVENTS_INDIRECT, 0);
637 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
638 qat_hal_enable_ctx(handle, ae, ctx_mask);
640 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
641 if (!(handle->hal_handle->ae_mask & (1 << ae)))
643 /* wait for AE to finish */
645 ret = qat_hal_wait_cycles(handle, ae, 20, 1);
646 } while (ret && times--);
649 pr_err("QAT: clear GPR of AE %d failed", ae);
652 qat_hal_disable_ctx(handle, ae, ctx_mask);
653 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
655 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
656 INIT_CTX_ENABLE_VALUE);
657 qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
658 handle->hal_handle->upc_mask &
660 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
661 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
662 qat_hal_put_wakeup_event(handle, ae, ctx_mask,
663 INIT_WAKEUP_EVENTS_VALUE);
664 qat_hal_put_sig_event(handle, ae, ctx_mask,
665 INIT_SIG_EVENTS_VALUE);
670 #define ICP_DH895XCC_AE_OFFSET 0x20000
671 #define ICP_DH895XCC_CAP_OFFSET (ICP_DH895XCC_AE_OFFSET + 0x10000)
672 #define LOCAL_TO_XFER_REG_OFFSET 0x800
673 #define ICP_DH895XCC_EP_OFFSET 0x3a000
674 #define ICP_DH895XCC_PMISC_BAR 1
675 int qat_hal_init(struct adf_accel_dev *accel_dev)
678 unsigned int max_en_ae_id = 0;
679 struct icp_qat_fw_loader_handle *handle;
680 struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
681 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
682 struct adf_bar *bar = &pci_info->pci_bars[ADF_DH895XCC_PMISC_BAR];
684 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
688 handle->hal_cap_g_ctl_csr_addr_v = bar->virt_addr +
689 ICP_DH895XCC_CAP_OFFSET;
690 handle->hal_cap_ae_xfer_csr_addr_v = bar->virt_addr +
691 ICP_DH895XCC_AE_OFFSET;
692 handle->hal_ep_csr_addr_v = bar->virt_addr + ICP_DH895XCC_EP_OFFSET;
693 handle->hal_cap_ae_local_csr_addr_v =
694 handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET;
696 handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
697 if (!handle->hal_handle)
699 handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid;
700 handle->hal_handle->ae_mask = hw_data->ae_mask;
701 handle->hal_handle->slice_mask = hw_data->accel_mask;
702 /* create AE objects */
703 handle->hal_handle->upc_mask = 0x1ffff;
704 handle->hal_handle->max_ustore = 0x4000;
705 for (ae = 0; ae < ICP_QAT_UCLO_MAX_AE; ae++) {
706 if (!(hw_data->ae_mask & (1 << ae)))
708 handle->hal_handle->aes[ae].free_addr = 0;
709 handle->hal_handle->aes[ae].free_size =
710 handle->hal_handle->max_ustore;
711 handle->hal_handle->aes[ae].ustore_size =
712 handle->hal_handle->max_ustore;
713 handle->hal_handle->aes[ae].live_ctx_mask =
714 ICP_QAT_UCLO_AE_ALL_CTX;
717 handle->hal_handle->ae_max_num = max_en_ae_id + 1;
718 /* take all AEs out of reset */
719 if (qat_hal_clr_reset(handle)) {
720 pr_err("QAT: qat_hal_clr_reset error\n");
723 if (qat_hal_clear_gpr(handle))
725 /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
726 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
727 unsigned int csr_val = 0;
729 if (!(hw_data->ae_mask & (1 << ae)))
731 qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val);
733 qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
735 accel_dev->fw_loader->fw_loader = handle;
739 kfree(handle->hal_handle);
745 void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle)
749 kfree(handle->hal_handle);
753 void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
754 unsigned int ctx_mask)
756 qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) &
757 ICP_QAT_UCLO_AE_ALL_CTX, 0x10000);
758 qat_hal_enable_ctx(handle, ae, ctx_mask);
761 void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
762 unsigned int ctx_mask)
764 qat_hal_disable_ctx(handle, ae, ctx_mask);
767 void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
768 unsigned char ae, unsigned int ctx_mask, unsigned int upc)
770 qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
771 handle->hal_handle->upc_mask & upc);
774 static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
775 unsigned char ae, unsigned int uaddr,
776 unsigned int words_num, uint64_t *uword)
778 unsigned int i, uwrd_lo, uwrd_hi;
779 unsigned int ustore_addr, misc_control;
781 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control);
782 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL,
783 misc_control & 0xfffffffb);
784 qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
786 for (i = 0; i < words_num; i++) {
787 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
789 qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo);
790 qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi);
792 uword[i] = (uword[i] << 0x20) | uwrd_lo;
794 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control);
795 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
798 void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
799 unsigned char ae, unsigned int uaddr,
800 unsigned int words_num, unsigned int *data)
802 unsigned int i, ustore_addr;
804 qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
806 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
807 for (i = 0; i < words_num; i++) {
808 unsigned int uwrd_lo, uwrd_hi, tmp;
810 uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) |
811 ((data[i] & 0xff00) << 2) |
812 (0x3 << 8) | (data[i] & 0xff);
813 uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28);
814 uwrd_hi |= (hweight32(data[i] & 0xffff) & 0x1) << 8;
815 tmp = ((data[i] >> 0x10) & 0xffff);
816 uwrd_hi |= (hweight32(tmp) & 0x1) << 9;
817 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
818 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
820 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
823 #define MAX_EXEC_INST 100
824 static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
825 unsigned char ae, unsigned char ctx,
826 uint64_t *micro_inst, unsigned int inst_num,
827 int code_off, unsigned int max_cycle,
830 uint64_t savuwords[MAX_EXEC_INST];
831 unsigned int ind_lm_addr0, ind_lm_addr1;
832 unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1;
833 unsigned int ind_cnt_sig;
834 unsigned int ind_sig, act_sig;
835 unsigned int csr_val = 0, newcsr_val;
837 unsigned int savcc, wakeup_events, savpc;
838 unsigned int ctxarb_ctl, ctx_enables;
840 if ((inst_num > handle->hal_handle->max_ustore) || !micro_inst) {
841 pr_err("QAT: invalid instruction num %d\n", inst_num);
844 /* save current context */
845 qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0);
846 qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1);
847 qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX,
849 qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX,
851 if (inst_num <= MAX_EXEC_INST)
852 qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
853 qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
854 qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc);
855 savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
856 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
857 ctx_enables &= IGNORE_W1C_MASK;
858 qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc);
859 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
860 qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl);
861 qat_hal_rd_indr_csr(handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT,
863 qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig);
864 qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig);
865 /* execute micro codes */
866 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
867 qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
868 qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0);
869 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO);
871 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff);
872 qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY);
873 qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0);
874 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
875 qat_hal_enable_ctx(handle, ae, (1 << ctx));
876 /* wait for micro codes to finish */
877 if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0)
880 unsigned int ctx_status;
882 qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT,
884 *endpc = ctx_status & handle->hal_handle->upc_mask;
886 /* retore to saved context */
887 qat_hal_disable_ctx(handle, ae, (1 << ctx));
888 if (inst_num <= MAX_EXEC_INST)
889 qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords);
890 qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
891 qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT,
892 handle->hal_handle->upc_mask & savpc);
893 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
894 newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
895 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
896 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
897 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO);
898 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl);
899 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
900 LM_ADDR_0_INDIRECT, ind_lm_addr0);
901 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
902 LM_ADDR_1_INDIRECT, ind_lm_addr1);
903 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
904 INDIRECT_LM_ADDR_0_BYTE_INDEX, ind_lm_addr_byte0);
905 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
906 INDIRECT_LM_ADDR_1_BYTE_INDEX, ind_lm_addr_byte1);
907 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
908 FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig);
909 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
910 CTX_SIG_EVENTS_INDIRECT, ind_sig);
911 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig);
912 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
917 static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
918 unsigned char ae, unsigned char ctx,
919 enum icp_qat_uof_regtype reg_type,
920 unsigned short reg_num, unsigned int *data)
922 unsigned int savctx, uaddr, uwrd_lo, uwrd_hi;
923 unsigned int ctxarb_cntl, ustore_addr, ctx_enables;
924 unsigned short reg_addr;
926 uint64_t insts, savuword;
928 reg_addr = qat_hal_get_reg_addr(reg_type, reg_num);
929 if (reg_addr == BAD_REGADDR) {
930 pr_err("QAT: bad regaddr=0x%x\n", reg_addr);
935 insts = 0xA070000000ull | (reg_addr & 0x3ff);
938 insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
941 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
942 qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl);
943 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
944 ctx_enables &= IGNORE_W1C_MASK;
945 if (ctx != (savctx & ACS_ACNO))
946 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
948 qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
949 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
950 qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
952 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
953 insts = qat_hal_set_uword_ecc(insts);
954 uwrd_lo = (unsigned int)(insts & 0xffffffff);
955 uwrd_hi = (unsigned int)(insts >> 0x20);
956 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
957 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
958 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
959 /* delay for at least 8 cycles */
960 qat_hal_wait_cycles(handle, ae, 0x8, 0);
963 * the instruction should have been executed
964 * prior to clearing the ECS in putUwords
966 qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data);
967 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
968 qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
969 if (ctx != (savctx & ACS_ACNO))
970 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
972 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl);
973 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
978 static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle,
979 unsigned char ae, unsigned char ctx,
980 enum icp_qat_uof_regtype reg_type,
981 unsigned short reg_num, unsigned int data)
983 unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo;
990 const int num_inst = ARRAY_SIZE(insts), code_off = 1;
991 const int imm_w1 = 0, imm_w0 = 1;
993 dest_addr = qat_hal_get_reg_addr(reg_type, reg_num);
994 if (dest_addr == BAD_REGADDR) {
995 pr_err("QAT: bad destAddr=0x%x\n", dest_addr);
999 data16lo = 0xffff & data;
1000 data16hi = 0xffff & (data >> 0x10);
1001 src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
1003 src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
1007 insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
1008 ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
1009 insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
1010 ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
1013 insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
1014 ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
1016 insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
1017 ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
1021 return qat_hal_exec_micro_inst(handle, ae, ctx, insts, num_inst,
1022 code_off, num_inst * 0x5, NULL);
1025 int qat_hal_get_ins_num(void)
1027 return ARRAY_SIZE(inst_4b);
1030 static int qat_hal_concat_micro_code(uint64_t *micro_inst,
1031 unsigned int inst_num, unsigned int size,
1032 unsigned int addr, unsigned int *value)
1035 unsigned int cur_value;
1036 const uint64_t *inst_arr;
1041 orig_num = inst_num;
1043 cur_value = value[val_indx++];
1045 usize = ARRAY_SIZE(inst_4b);
1046 fixup_offset = inst_num;
1047 for (i = 0; i < usize; i++)
1048 micro_inst[inst_num++] = inst_arr[i];
1049 INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr));
1051 INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0);
1053 INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0));
1055 INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10));
1057 return inst_num - orig_num;
1060 static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle,
1061 unsigned char ae, unsigned char ctx,
1062 int *pfirst_exec, uint64_t *micro_inst,
1063 unsigned int inst_num)
1066 unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0;
1067 unsigned int gprb0 = 0, gprb1 = 0;
1070 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0);
1071 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1);
1072 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2);
1073 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0);
1074 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1);
1077 stat = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, inst_num, 1,
1078 inst_num * 0x5, NULL);
1081 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0);
1082 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1);
1083 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2);
1084 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0);
1085 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1);
1090 int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
1092 struct icp_qat_uof_batch_init *lm_init_header)
1094 struct icp_qat_uof_batch_init *plm_init;
1095 uint64_t *micro_inst_arry;
1097 int alloc_inst_size;
1101 plm_init = lm_init_header->next;
1102 alloc_inst_size = lm_init_header->size;
1103 if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore)
1104 alloc_inst_size = handle->hal_handle->max_ustore;
1105 micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(uint64_t),
1107 if (!micro_inst_arry)
1111 unsigned int addr, *value, size;
1114 addr = plm_init->addr;
1115 value = plm_init->value;
1116 size = plm_init->size;
1117 micro_inst_num += qat_hal_concat_micro_code(micro_inst_arry,
1120 plm_init = plm_init->next;
1122 /* exec micro codes */
1123 if (micro_inst_arry && (micro_inst_num > 0)) {
1124 micro_inst_arry[micro_inst_num++] = 0x0E000010000ull;
1125 stat = qat_hal_exec_micro_init_lm(handle, ae, 0, &first_exec,
1129 kfree(micro_inst_arry);
1133 static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
1134 unsigned char ae, unsigned char ctx,
1135 enum icp_qat_uof_regtype reg_type,
1136 unsigned short reg_num, unsigned int val)
1139 unsigned int reg_addr;
1140 unsigned int ctx_enables;
1141 unsigned short mask;
1142 unsigned short dr_offset = 0x10;
1144 status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1145 if (CE_INUSE_CONTEXTS & ctx_enables) {
1147 pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
1155 if (reg_num & ~mask)
1157 reg_addr = reg_num + (ctx << 0x5);
1161 SET_AE_XFER(handle, ae, reg_addr, val);
1165 SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val);
1174 static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
1175 unsigned char ae, unsigned char ctx,
1176 enum icp_qat_uof_regtype reg_type,
1177 unsigned short reg_num, unsigned int data)
1179 unsigned int gprval, ctx_enables;
1180 unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi,
1182 unsigned short reg_mask;
1184 uint64_t micro_inst[] = {
1191 const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
1192 const unsigned short gprnum = 0, dly = num_inst * 0x5;
1194 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1195 if (CE_INUSE_CONTEXTS & ctx_enables) {
1197 pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
1200 reg_mask = (unsigned short)~0x1f;
1202 reg_mask = (unsigned short)~0xf;
1204 if (reg_num & reg_mask)
1206 xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num);
1207 if (xfr_addr == BAD_REGADDR) {
1208 pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
1211 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
1212 gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
1213 data16low = 0xffff & data;
1214 data16hi = 0xffff & (data >> 0x10);
1215 src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
1216 (unsigned short)(0xff & data16hi));
1217 src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
1218 (unsigned short)(0xff & data16low));
1219 micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) |
1220 ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
1221 micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) |
1222 ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
1223 micro_inst[0x2] = micro_inst[0x2] |
1224 ((xfr_addr & 0x3ff) << 20) | ((gpr_addr & 0x3ff) << 10);
1225 status = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, num_inst,
1226 code_off, dly, NULL);
1227 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval);
1231 static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
1232 unsigned char ae, unsigned char ctx,
1233 unsigned short nn, unsigned int val)
1235 unsigned int ctx_enables;
1238 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1239 ctx_enables &= IGNORE_W1C_MASK;
1240 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
1242 stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val);
1243 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1247 static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle
1248 *handle, unsigned char ae,
1249 unsigned short absreg_num,
1250 unsigned short *relreg,
1253 unsigned int ctx_enables;
1255 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1256 if (ctx_enables & CE_INUSE_CONTEXTS) {
1258 *relreg = absreg_num & 0x1F;
1259 *ctx = (absreg_num >> 0x4) & 0x6;
1262 *relreg = absreg_num & 0x0F;
1263 *ctx = (absreg_num >> 0x4) & 0x7;
1268 int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
1269 unsigned char ae, unsigned char ctx_mask,
1270 enum icp_qat_uof_regtype reg_type,
1271 unsigned short reg_num, unsigned int regdata)
1275 unsigned char ctx = 0;
1276 enum icp_qat_uof_regtype type;
1278 if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG)
1282 if (ctx_mask == 0) {
1283 qat_hal_convert_abs_to_rel(handle, ae, reg_num, ®,
1285 type = reg_type - 1;
1289 if (!test_bit(ctx, (unsigned long *)&ctx_mask))
1292 stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata);
1294 pr_err("QAT: write gpr fail\n");
1297 } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1302 int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
1303 unsigned char ae, unsigned char ctx_mask,
1304 enum icp_qat_uof_regtype reg_type,
1305 unsigned short reg_num, unsigned int regdata)
1309 unsigned char ctx = 0;
1310 enum icp_qat_uof_regtype type;
1312 if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
1316 if (ctx_mask == 0) {
1317 qat_hal_convert_abs_to_rel(handle, ae, reg_num, ®,
1319 type = reg_type - 3;
1323 if (!test_bit(ctx, (unsigned long *)&ctx_mask))
1326 stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, type, reg,
1329 pr_err("QAT: write wr xfer fail\n");
1332 } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1337 int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
1338 unsigned char ae, unsigned char ctx_mask,
1339 enum icp_qat_uof_regtype reg_type,
1340 unsigned short reg_num, unsigned int regdata)
1344 unsigned char ctx = 0;
1345 enum icp_qat_uof_regtype type;
1347 if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
1351 if (ctx_mask == 0) {
1352 qat_hal_convert_abs_to_rel(handle, ae, reg_num, ®,
1354 type = reg_type - 3;
1358 if (!test_bit(ctx, (unsigned long *)&ctx_mask))
1361 stat = qat_hal_put_rel_rd_xfer(handle, ae, ctx, type, reg,
1364 pr_err("QAT: write rd xfer fail\n");
1367 } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1372 int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
1373 unsigned char ae, unsigned char ctx_mask,
1374 unsigned short reg_num, unsigned int regdata)
1382 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
1383 if (!test_bit(ctx, (unsigned long *)&ctx_mask))
1385 stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata);
1387 pr_err("QAT: write neigh error\n");