Linux-libre 4.14.12-gnu
[librecmc/linux-libre.git] / drivers / infiniband / hw / i40iw / i40iw_ctrl.c
1 /*******************************************************************************
2 *
3 * Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
10 *
11 *   Redistribution and use in source and binary forms, with or
12 *   without modification, are permitted provided that the following
13 *   conditions are met:
14 *
15 *    - Redistributions of source code must retain the above
16 *       copyright notice, this list of conditions and the following
17 *       disclaimer.
18 *
19 *    - Redistributions in binary form must reproduce the above
20 *       copyright notice, this list of conditions and the following
21 *       disclaimer in the documentation and/or other materials
22 *       provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 *******************************************************************************/
34
35 #include "i40iw_osdep.h"
36 #include "i40iw_register.h"
37 #include "i40iw_status.h"
38 #include "i40iw_hmc.h"
39
40 #include "i40iw_d.h"
41 #include "i40iw_type.h"
42 #include "i40iw_p.h"
43 #include "i40iw_vf.h"
44 #include "i40iw_virtchnl.h"
45
46 /**
47  * i40iw_insert_wqe_hdr - write wqe header
48  * @wqe: cqp wqe for header
49  * @header: header for the cqp wqe
50  */
51 void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
52 {
53         wmb();            /* make sure WQE is populated before polarity is set */
54         set_64bit_val(wqe, 24, header);
55 }
56
57 void i40iw_check_cqp_progress(struct i40iw_cqp_timeout *cqp_timeout, struct i40iw_sc_dev *dev)
58 {
59         if (cqp_timeout->compl_cqp_cmds != dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]) {
60                 cqp_timeout->compl_cqp_cmds = dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS];
61                 cqp_timeout->count = 0;
62         } else {
63                 if (dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] != cqp_timeout->compl_cqp_cmds)
64                         cqp_timeout->count++;
65         }
66 }
67
68 /**
69  * i40iw_get_cqp_reg_info - get head and tail for cqp using registers
70  * @cqp: struct for cqp hw
71  * @val: cqp tail register value
72  * @tail:wqtail register value
73  * @error: cqp processing err
74  */
75 static inline void i40iw_get_cqp_reg_info(struct i40iw_sc_cqp *cqp,
76                                           u32 *val,
77                                           u32 *tail,
78                                           u32 *error)
79 {
80         if (cqp->dev->is_pf) {
81                 *val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPTAIL);
82                 *tail = RS_32(*val, I40E_PFPE_CQPTAIL_WQTAIL);
83                 *error = RS_32(*val, I40E_PFPE_CQPTAIL_CQP_OP_ERR);
84         } else {
85                 *val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPTAIL1);
86                 *tail = RS_32(*val, I40E_VFPE_CQPTAIL_WQTAIL);
87                 *error = RS_32(*val, I40E_VFPE_CQPTAIL_CQP_OP_ERR);
88         }
89 }
90
91 /**
92  * i40iw_cqp_poll_registers - poll cqp registers
93  * @cqp: struct for cqp hw
94  * @tail:wqtail register value
95  * @count: how many times to try for completion
96  */
97 static enum i40iw_status_code i40iw_cqp_poll_registers(
98                                                 struct i40iw_sc_cqp *cqp,
99                                                 u32 tail,
100                                                 u32 count)
101 {
102         u32 i = 0;
103         u32 newtail, error, val;
104
105         while (i < count) {
106                 i++;
107                 i40iw_get_cqp_reg_info(cqp, &val, &newtail, &error);
108                 if (error) {
109                         error = (cqp->dev->is_pf) ?
110                                  i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES) :
111                                  i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);
112                         return I40IW_ERR_CQP_COMPL_ERROR;
113                 }
114                 if (newtail != tail) {
115                         /* SUCCESS */
116                         I40IW_RING_MOVE_TAIL(cqp->sq_ring);
117                         cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;
118                         return 0;
119                 }
120                 udelay(I40IW_SLEEP_COUNT);
121         }
122         return I40IW_ERR_TIMEOUT;
123 }
124
125 /**
126  * i40iw_sc_parse_fpm_commit_buf - parse fpm commit buffer
127  * @buf: ptr to fpm commit buffer
128  * @info: ptr to i40iw_hmc_obj_info struct
129  * @sd: number of SDs for HMC objects
130  *
131  * parses fpm commit info and copy base value
132  * of hmc objects in hmc_info
133  */
134 static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
135                                 u64 *buf,
136                                 struct i40iw_hmc_obj_info *info,
137                                 u32 *sd)
138 {
139         u64 temp;
140         u64 size;
141         u64 base = 0;
142         u32 i, j;
143         u32 k = 0;
144
145         /* copy base values in obj_info */
146         for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
147                 if ((i == I40IW_HMC_IW_SRQ) ||
148                         (i == I40IW_HMC_IW_FSIMC) ||
149                         (i == I40IW_HMC_IW_FSIAV)) {
150                         info[i].base = 0;
151                         info[i].cnt = 0;
152                         continue;
153                 }
154                 get_64bit_val(buf, j, &temp);
155                 info[i].base = RS_64_1(temp, 32) * 512;
156                 if (info[i].base > base) {
157                         base = info[i].base;
158                         k = i;
159                 }
160                 if (i == I40IW_HMC_IW_APBVT_ENTRY) {
161                         info[i].cnt = 1;
162                         continue;
163                 }
164                 if (i == I40IW_HMC_IW_QP)
165                         info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
166                 else if (i == I40IW_HMC_IW_CQ)
167                         info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
168                 else
169                         info[i].cnt = (u32)(temp);
170         }
171         size = info[k].cnt * info[k].size + info[k].base;
172         if (size & 0x1FFFFF)
173                 *sd = (u32)((size >> 21) + 1); /* add 1 for remainder */
174         else
175                 *sd = (u32)(size >> 21);
176
177         return 0;
178 }
179
180 /**
181  * i40iw_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
182  * @buf: ptr to fpm query buffer
183  * @buf_idx: index into buf
184  * @info: ptr to i40iw_hmc_obj_info struct
185  * @rsrc_idx: resource index into info
186  *
187  * Decode a 64 bit value from fpm query buffer into max count and size
188  */
189 static u64 i40iw_sc_decode_fpm_query(u64 *buf,
190                                             u32 buf_idx,
191                                             struct i40iw_hmc_obj_info *obj_info,
192                                             u32 rsrc_idx)
193 {
194         u64 temp;
195         u32 size;
196
197         get_64bit_val(buf, buf_idx, &temp);
198         obj_info[rsrc_idx].max_cnt = (u32)temp;
199         size = (u32)RS_64_1(temp, 32);
200         obj_info[rsrc_idx].size = LS_64_1(1, size);
201
202         return temp;
203 }
204
205 /**
206  * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer
207  * @buf: ptr to fpm query buffer
208  * @info: ptr to i40iw_hmc_obj_info struct
209  * @hmc_fpm_misc: ptr to fpm data
210  *
211  * parses fpm query buffer and copy max_cnt and
212  * size value of hmc objects in hmc_info
213  */
214 static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
215                                 u64 *buf,
216                                 struct i40iw_hmc_info *hmc_info,
217                                 struct i40iw_hmc_fpm_misc *hmc_fpm_misc)
218 {
219         struct i40iw_hmc_obj_info *obj_info;
220         u64 temp;
221         u32 size;
222         u16 max_pe_sds;
223
224         obj_info = hmc_info->hmc_obj;
225
226         get_64bit_val(buf, 0, &temp);
227         hmc_info->first_sd_index = (u16)RS_64(temp, I40IW_QUERY_FPM_FIRST_PE_SD_INDEX);
228         max_pe_sds = (u16)RS_64(temp, I40IW_QUERY_FPM_MAX_PE_SDS);
229
230         /* Reduce SD count for VFs by 1 to account for PBLE backing page rounding */
231         if (hmc_info->hmc_fn_id >= I40IW_FIRST_VF_FPM_ID)
232                 max_pe_sds--;
233         hmc_fpm_misc->max_sds = max_pe_sds;
234         hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
235
236         get_64bit_val(buf, 8, &temp);
237         obj_info[I40IW_HMC_IW_QP].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
238         size = (u32)RS_64_1(temp, 32);
239         obj_info[I40IW_HMC_IW_QP].size = LS_64_1(1, size);
240
241         get_64bit_val(buf, 16, &temp);
242         obj_info[I40IW_HMC_IW_CQ].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
243         size = (u32)RS_64_1(temp, 32);
244         obj_info[I40IW_HMC_IW_CQ].size = LS_64_1(1, size);
245
246         i40iw_sc_decode_fpm_query(buf, 32, obj_info, I40IW_HMC_IW_HTE);
247         i40iw_sc_decode_fpm_query(buf, 40, obj_info, I40IW_HMC_IW_ARP);
248
249         obj_info[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
250         obj_info[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
251
252         i40iw_sc_decode_fpm_query(buf, 48, obj_info, I40IW_HMC_IW_MR);
253         i40iw_sc_decode_fpm_query(buf, 56, obj_info, I40IW_HMC_IW_XF);
254
255         get_64bit_val(buf, 64, &temp);
256         obj_info[I40IW_HMC_IW_XFFL].max_cnt = (u32)temp;
257         obj_info[I40IW_HMC_IW_XFFL].size = 4;
258         hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE);
259         if (!hmc_fpm_misc->xf_block_size)
260                 return I40IW_ERR_INVALID_SIZE;
261
262         i40iw_sc_decode_fpm_query(buf, 72, obj_info, I40IW_HMC_IW_Q1);
263
264         get_64bit_val(buf, 80, &temp);
265         obj_info[I40IW_HMC_IW_Q1FL].max_cnt = (u32)temp;
266         obj_info[I40IW_HMC_IW_Q1FL].size = 4;
267         hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE);
268         if (!hmc_fpm_misc->q1_block_size)
269                 return I40IW_ERR_INVALID_SIZE;
270
271         i40iw_sc_decode_fpm_query(buf, 88, obj_info, I40IW_HMC_IW_TIMER);
272
273         get_64bit_val(buf, 112, &temp);
274         obj_info[I40IW_HMC_IW_PBLE].max_cnt = (u32)temp;
275         obj_info[I40IW_HMC_IW_PBLE].size = 8;
276
277         get_64bit_val(buf, 120, &temp);
278         hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
279         hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
280         hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
281
282         return 0;
283 }
284
285 /**
286  * i40iw_fill_qos_list - Change all unknown qs handles to available ones
287  * @qs_list: list of qs_handles to be fixed with valid qs_handles
288  */
289 static void i40iw_fill_qos_list(u16 *qs_list)
290 {
291         u16 qshandle = qs_list[0];
292         int i;
293
294         for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
295                 if (qs_list[i] == QS_HANDLE_UNKNOWN)
296                         qs_list[i] = qshandle;
297                 else
298                         qshandle = qs_list[i];
299         }
300 }
301
302 /**
303  * i40iw_qp_from_entry - Given entry, get to the qp structure
304  * @entry: Points to list of qp structure
305  */
306 static struct i40iw_sc_qp *i40iw_qp_from_entry(struct list_head *entry)
307 {
308         if (!entry)
309                 return NULL;
310
311         return (struct i40iw_sc_qp *)((char *)entry - offsetof(struct i40iw_sc_qp, list));
312 }
313
314 /**
315  * i40iw_get_qp - get the next qp from the list given current qp
316  * @head: Listhead of qp's
317  * @qp: current qp
318  */
319 static struct i40iw_sc_qp *i40iw_get_qp(struct list_head *head, struct i40iw_sc_qp *qp)
320 {
321         struct list_head *entry = NULL;
322         struct list_head *lastentry;
323
324         if (list_empty(head))
325                 return NULL;
326
327         if (!qp) {
328                 entry = head->next;
329         } else {
330                 lastentry = &qp->list;
331                 entry = (lastentry != head) ? lastentry->next : NULL;
332         }
333
334         return i40iw_qp_from_entry(entry);
335 }
336
337 /**
338  * i40iw_change_l2params - given the new l2 parameters, change all qp
339  * @vsi: pointer to the vsi structure
340  * @l2params: New paramaters from l2
341  */
342 void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params)
343 {
344         struct i40iw_sc_dev *dev = vsi->dev;
345         struct i40iw_sc_qp *qp = NULL;
346         bool qs_handle_change = false;
347         unsigned long flags;
348         u16 qs_handle;
349         int i;
350
351         vsi->mss = l2params->mss;
352
353         i40iw_fill_qos_list(l2params->qs_handle_list);
354         for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
355                 qs_handle = l2params->qs_handle_list[i];
356                 if (vsi->qos[i].qs_handle != qs_handle)
357                         qs_handle_change = true;
358                 spin_lock_irqsave(&vsi->qos[i].lock, flags);
359                 qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
360                 while (qp) {
361                         if (qs_handle_change) {
362                                 qp->qs_handle = qs_handle;
363                                 /* issue cqp suspend command */
364                                 i40iw_qp_suspend_resume(dev, qp, true);
365                         }
366                         qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
367                 }
368                 spin_unlock_irqrestore(&vsi->qos[i].lock, flags);
369                 vsi->qos[i].qs_handle = qs_handle;
370         }
371 }
372
373 /**
374  * i40iw_qp_rem_qos - remove qp from qos lists during destroy qp
375  * @qp: qp to be removed from qos
376  */
377 static void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp)
378 {
379         struct i40iw_sc_vsi *vsi = qp->vsi;
380         unsigned long flags;
381
382         if (!qp->on_qoslist)
383                 return;
384         spin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags);
385         list_del(&qp->list);
386         spin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags);
387 }
388
389 /**
390  * i40iw_qp_add_qos - called during setctx fot qp to be added to qos
391  * @qp: qp to be added to qos
392  */
393 void i40iw_qp_add_qos(struct i40iw_sc_qp *qp)
394 {
395         struct i40iw_sc_vsi *vsi = qp->vsi;
396         unsigned long flags;
397
398         if (qp->on_qoslist)
399                 return;
400         spin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags);
401         qp->qs_handle = vsi->qos[qp->user_pri].qs_handle;
402         list_add(&qp->list, &vsi->qos[qp->user_pri].qplist);
403         qp->on_qoslist = true;
404         spin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags);
405 }
406
407 /**
408  * i40iw_sc_pd_init - initialize sc pd struct
409  * @dev: sc device struct
410  * @pd: sc pd ptr
411  * @pd_id: pd_id for allocated pd
412  * @abi_ver: ABI version from user context, -1 if not valid
413  */
414 static void i40iw_sc_pd_init(struct i40iw_sc_dev *dev,
415                              struct i40iw_sc_pd *pd,
416                              u16 pd_id,
417                              int abi_ver)
418 {
419         pd->size = sizeof(*pd);
420         pd->pd_id = pd_id;
421         pd->abi_ver = abi_ver;
422         pd->dev = dev;
423 }
424
425 /**
426  * i40iw_get_encoded_wqe_size - given wq size, returns hardware encoded size
427  * @wqsize: size of the wq (sq, rq, srq) to encoded_size
428  * @cqpsq: encoded size for sq for cqp as its encoded size is 1+ other wq's
429  */
430 u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq)
431 {
432         u8 encoded_size = 0;
433
434         /* cqp sq's hw coded value starts from 1 for size of 4
435          * while it starts from 0 for qp' wq's.
436          */
437         if (cqpsq)
438                 encoded_size = 1;
439         wqsize >>= 2;
440         while (wqsize >>= 1)
441                 encoded_size++;
442         return encoded_size;
443 }
444
445 /**
446  * i40iw_sc_cqp_init - Initialize buffers for a control Queue Pair
447  * @cqp: IWARP control queue pair pointer
448  * @info: IWARP control queue pair init info pointer
449  *
450  * Initializes the object and context buffers for a control Queue Pair.
451  */
452 static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
453                                                 struct i40iw_cqp_init_info *info)
454 {
455         u8 hw_sq_size;
456
457         if ((info->sq_size > I40IW_CQP_SW_SQSIZE_2048) ||
458             (info->sq_size < I40IW_CQP_SW_SQSIZE_4) ||
459             ((info->sq_size & (info->sq_size - 1))))
460                 return I40IW_ERR_INVALID_SIZE;
461
462         hw_sq_size = i40iw_get_encoded_wqe_size(info->sq_size, true);
463         cqp->size = sizeof(*cqp);
464         cqp->sq_size = info->sq_size;
465         cqp->hw_sq_size = hw_sq_size;
466         cqp->sq_base = info->sq;
467         cqp->host_ctx = info->host_ctx;
468         cqp->sq_pa = info->sq_pa;
469         cqp->host_ctx_pa = info->host_ctx_pa;
470         cqp->dev = info->dev;
471         cqp->struct_ver = info->struct_ver;
472         cqp->scratch_array = info->scratch_array;
473         cqp->polarity = 0;
474         cqp->en_datacenter_tcp = info->en_datacenter_tcp;
475         cqp->enabled_vf_count = info->enabled_vf_count;
476         cqp->hmc_profile = info->hmc_profile;
477         info->dev->cqp = cqp;
478
479         I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size);
480         cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] = 0;
481         cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS] = 0;
482
483         i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
484                     "%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n",
485                     __func__, cqp->sq_size, cqp->hw_sq_size,
486                     cqp->sq_base, cqp->sq_pa, cqp, cqp->polarity);
487         return 0;
488 }
489
490 /**
491  * i40iw_sc_cqp_create - create cqp during bringup
492  * @cqp: struct for cqp hw
493  * @maj_err: If error, major err number
494  * @min_err: If error, minor err number
495  */
496 static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
497                                                   u16 *maj_err,
498                                                   u16 *min_err)
499 {
500         u64 temp;
501         u32 cnt = 0, p1, p2, val = 0, err_code;
502         enum i40iw_status_code ret_code;
503
504         *maj_err = 0;
505         *min_err = 0;
506
507         ret_code = i40iw_allocate_dma_mem(cqp->dev->hw,
508                                           &cqp->sdbuf,
509                                           128,
510                                           I40IW_SD_BUF_ALIGNMENT);
511
512         if (ret_code)
513                 goto exit;
514
515         temp = LS_64(cqp->hw_sq_size, I40IW_CQPHC_SQSIZE) |
516                LS_64(cqp->struct_ver, I40IW_CQPHC_SVER);
517
518         set_64bit_val(cqp->host_ctx, 0, temp);
519         set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
520         temp = LS_64(cqp->enabled_vf_count, I40IW_CQPHC_ENABLED_VFS) |
521                LS_64(cqp->hmc_profile, I40IW_CQPHC_HMC_PROFILE);
522         set_64bit_val(cqp->host_ctx, 16, temp);
523         set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp);
524         set_64bit_val(cqp->host_ctx, 32, 0);
525         set_64bit_val(cqp->host_ctx, 40, 0);
526         set_64bit_val(cqp->host_ctx, 48, 0);
527         set_64bit_val(cqp->host_ctx, 56, 0);
528
529         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQP_HOST_CTX",
530                         cqp->host_ctx, I40IW_CQP_CTX_SIZE * 8);
531
532         p1 = RS_32_1(cqp->host_ctx_pa, 32);
533         p2 = (u32)cqp->host_ctx_pa;
534
535         if (cqp->dev->is_pf) {
536                 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, p1);
537                 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, p2);
538         } else {
539                 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, p1);
540                 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, p2);
541         }
542         do {
543                 if (cnt++ > I40IW_DONE_COUNT) {
544                         i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
545                         ret_code = I40IW_ERR_TIMEOUT;
546                         /*
547                          * read PFPE_CQPERRORCODES register to get the minor
548                          * and major error code
549                          */
550                         if (cqp->dev->is_pf)
551                                 err_code = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES);
552                         else
553                                 err_code = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);
554                         *min_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE);
555                         *maj_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE);
556                         goto exit;
557                 }
558                 udelay(I40IW_SLEEP_COUNT);
559                 if (cqp->dev->is_pf)
560                         val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CCQPSTATUS);
561                 else
562                         val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CCQPSTATUS1);
563         } while (!val);
564
565 exit:
566         if (!ret_code)
567                 cqp->process_cqp_sds = i40iw_update_sds_noccq;
568         return ret_code;
569 }
570
571 /**
572  * i40iw_sc_cqp_post_sq - post of cqp's sq
573  * @cqp: struct for cqp hw
574  */
575 void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp)
576 {
577         if (cqp->dev->is_pf)
578                 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));
579         else
580                 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CQPDB1, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));
581
582         i40iw_debug(cqp->dev,
583                     I40IW_DEBUG_WQE,
584                     "%s: HEAD_TAIL[%04d,%04d,%04d]\n",
585                     __func__,
586                     cqp->sq_ring.head,
587                     cqp->sq_ring.tail,
588                     cqp->sq_ring.size);
589 }
590
591 /**
592  * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
593  * @cqp: struct for cqp hw
594  * @wqe_idx: we index of cqp ring
595  */
596 u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
597 {
598         u64 *wqe = NULL;
599         u32     wqe_idx;
600         enum i40iw_status_code ret_code;
601
602         if (I40IW_RING_FULL_ERR(cqp->sq_ring)) {
603                 i40iw_debug(cqp->dev,
604                             I40IW_DEBUG_WQE,
605                             "%s: ring is full head %x tail %x size %x\n",
606                             __func__,
607                             cqp->sq_ring.head,
608                             cqp->sq_ring.tail,
609                             cqp->sq_ring.size);
610                 return NULL;
611         }
612         I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code);
613         cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++;
614         if (ret_code)
615                 return NULL;
616         if (!wqe_idx)
617                 cqp->polarity = !cqp->polarity;
618
619         wqe = cqp->sq_base[wqe_idx].elem;
620         cqp->scratch_array[wqe_idx] = scratch;
621         I40IW_CQP_INIT_WQE(wqe);
622
623         return wqe;
624 }
625
626 /**
627  * i40iw_sc_cqp_destroy - destroy cqp during close
628  * @cqp: struct for cqp hw
629  */
630 static enum i40iw_status_code i40iw_sc_cqp_destroy(struct i40iw_sc_cqp *cqp)
631 {
632         u32 cnt = 0, val = 1;
633         enum i40iw_status_code ret_code = 0;
634         u32 cqpstat_addr;
635
636         if (cqp->dev->is_pf) {
637                 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, 0);
638                 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, 0);
639                 cqpstat_addr = I40E_PFPE_CCQPSTATUS;
640         } else {
641                 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, 0);
642                 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, 0);
643                 cqpstat_addr = I40E_VFPE_CCQPSTATUS1;
644         }
645         do {
646                 if (cnt++ > I40IW_DONE_COUNT) {
647                         ret_code = I40IW_ERR_TIMEOUT;
648                         break;
649                 }
650                 udelay(I40IW_SLEEP_COUNT);
651                 val = i40iw_rd32(cqp->dev->hw, cqpstat_addr);
652         } while (val);
653
654         i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
655         return ret_code;
656 }
657
658 /**
659  * i40iw_sc_ccq_arm - enable intr for control cq
660  * @ccq: ccq sc struct
661  */
662 static void i40iw_sc_ccq_arm(struct i40iw_sc_cq *ccq)
663 {
664         u64 temp_val;
665         u16 sw_cq_sel;
666         u8 arm_next_se;
667         u8 arm_seq_num;
668
669         /* write to cq doorbell shadow area */
670         /* arm next se should always be zero */
671         get_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val);
672
673         sw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT);
674         arm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE);
675
676         arm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM);
677         arm_seq_num++;
678
679         temp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) |
680                    LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) |
681                    LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) |
682                    LS_64(1, I40IW_CQ_DBSA_ARM_NEXT);
683
684         set_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val);
685
686         wmb();       /* make sure shadow area is updated before arming */
687
688         if (ccq->dev->is_pf)
689                 i40iw_wr32(ccq->dev->hw, I40E_PFPE_CQARM, ccq->cq_uk.cq_id);
690         else
691                 i40iw_wr32(ccq->dev->hw, I40E_VFPE_CQARM1, ccq->cq_uk.cq_id);
692 }
693
694 /**
695  * i40iw_sc_ccq_get_cqe_info - get ccq's cq entry
696  * @ccq: ccq sc struct
697  * @info: completion q entry to return
698  */
699 static enum i40iw_status_code i40iw_sc_ccq_get_cqe_info(
700                                         struct i40iw_sc_cq *ccq,
701                                         struct i40iw_ccq_cqe_info *info)
702 {
703         u64 qp_ctx, temp, temp1;
704         u64 *cqe;
705         struct i40iw_sc_cqp *cqp;
706         u32 wqe_idx;
707         u8 polarity;
708         enum i40iw_status_code ret_code = 0;
709
710         if (ccq->cq_uk.avoid_mem_cflct)
711                 cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(&ccq->cq_uk);
712         else
713                 cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&ccq->cq_uk);
714
715         get_64bit_val(cqe, 24, &temp);
716         polarity = (u8)RS_64(temp, I40IW_CQ_VALID);
717         if (polarity != ccq->cq_uk.polarity)
718                 return I40IW_ERR_QUEUE_EMPTY;
719
720         get_64bit_val(cqe, 8, &qp_ctx);
721         cqp = (struct i40iw_sc_cqp *)(unsigned long)qp_ctx;
722         info->error = (bool)RS_64(temp, I40IW_CQ_ERROR);
723         info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR);
724         if (info->error) {
725                 info->maj_err_code = (u16)RS_64(temp, I40IW_CQ_MAJERR);
726                 info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR);
727         }
728         wqe_idx = (u32)RS_64(temp, I40IW_CQ_WQEIDX);
729         info->scratch = cqp->scratch_array[wqe_idx];
730
731         get_64bit_val(cqe, 16, &temp1);
732         info->op_ret_val = (u32)RS_64(temp1, I40IW_CCQ_OPRETVAL);
733         get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1);
734         info->op_code = (u8)RS_64(temp1, I40IW_CQPSQ_OPCODE);
735         info->cqp = cqp;
736
737         /*  move the head for cq */
738         I40IW_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code);
739         if (I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring) == 0)
740                 ccq->cq_uk.polarity ^= 1;
741
742         /* update cq tail in cq shadow memory also */
743         I40IW_RING_MOVE_TAIL(ccq->cq_uk.cq_ring);
744         set_64bit_val(ccq->cq_uk.shadow_area,
745                       0,
746                       I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring));
747         wmb(); /* write shadow area before tail */
748         I40IW_RING_MOVE_TAIL(cqp->sq_ring);
749         ccq->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;
750
751         return ret_code;
752 }
753
754 /**
755  * i40iw_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
756  * @cqp: struct for cqp hw
757  * @op_code: cqp opcode for completion
758  * @info: completion q entry to return
759  */
760 static enum i40iw_status_code i40iw_sc_poll_for_cqp_op_done(
761                                         struct i40iw_sc_cqp *cqp,
762                                         u8 op_code,
763                                         struct i40iw_ccq_cqe_info *compl_info)
764 {
765         struct i40iw_ccq_cqe_info info;
766         struct i40iw_sc_cq *ccq;
767         enum i40iw_status_code ret_code = 0;
768         u32 cnt = 0;
769
770         memset(&info, 0, sizeof(info));
771         ccq = cqp->dev->ccq;
772         while (1) {
773                 if (cnt++ > I40IW_DONE_COUNT)
774                         return I40IW_ERR_TIMEOUT;
775
776                 if (i40iw_sc_ccq_get_cqe_info(ccq, &info)) {
777                         udelay(I40IW_SLEEP_COUNT);
778                         continue;
779                 }
780
781                 if (info.error) {
782                         ret_code = I40IW_ERR_CQP_COMPL_ERROR;
783                         break;
784                 }
785                 /* check if opcode is cq create */
786                 if (op_code != info.op_code) {
787                         i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
788                                     "%s: opcode mismatch for my op code 0x%x, returned opcode %x\n",
789                                     __func__, op_code, info.op_code);
790                 }
791                 /* success, exit out of the loop */
792                 if (op_code == info.op_code)
793                         break;
794         }
795
796         if (compl_info)
797                 memcpy(compl_info, &info, sizeof(*compl_info));
798
799         return ret_code;
800 }
801
802 /**
803  * i40iw_sc_manage_push_page - Handle push page
804  * @cqp: struct for cqp hw
805  * @info: push page info
806  * @scratch: u64 saved to be used during cqp completion
807  * @post_sq: flag for cqp db to ring
808  */
809 static enum i40iw_status_code i40iw_sc_manage_push_page(
810                                 struct i40iw_sc_cqp *cqp,
811                                 struct i40iw_cqp_manage_push_page_info *info,
812                                 u64 scratch,
813                                 bool post_sq)
814 {
815         u64 *wqe;
816         u64 header;
817
818         if (info->push_idx >= I40IW_MAX_PUSH_PAGE_COUNT)
819                 return I40IW_ERR_INVALID_PUSH_PAGE_INDEX;
820
821         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
822         if (!wqe)
823                 return I40IW_ERR_RING_FULL;
824
825         set_64bit_val(wqe, 16, info->qs_handle);
826
827         header = LS_64(info->push_idx, I40IW_CQPSQ_MPP_PPIDX) |
828                  LS_64(I40IW_CQP_OP_MANAGE_PUSH_PAGES, I40IW_CQPSQ_OPCODE) |
829                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
830                  LS_64(info->free_page, I40IW_CQPSQ_MPP_FREE_PAGE);
831
832         i40iw_insert_wqe_hdr(wqe, header);
833
834         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_PUSH_PAGES WQE",
835                         wqe, I40IW_CQP_WQE_SIZE * 8);
836
837         if (post_sq)
838                 i40iw_sc_cqp_post_sq(cqp);
839         return 0;
840 }
841
842 /**
843  * i40iw_sc_manage_hmc_pm_func_table - manage of function table
844  * @cqp: struct for cqp hw
845  * @scratch: u64 saved to be used during cqp completion
846  * @vf_index: vf index for cqp
847  * @free_pm_fcn: function number
848  * @post_sq: flag for cqp db to ring
849  */
850 static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table(
851                                 struct i40iw_sc_cqp *cqp,
852                                 u64 scratch,
853                                 u8 vf_index,
854                                 bool free_pm_fcn,
855                                 bool post_sq)
856 {
857         u64 *wqe;
858         u64 header;
859
860         if (vf_index >= I40IW_MAX_VF_PER_PF)
861                 return I40IW_ERR_INVALID_VF_ID;
862         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
863         if (!wqe)
864                 return I40IW_ERR_RING_FULL;
865
866         header = LS_64(vf_index, I40IW_CQPSQ_MHMC_VFIDX) |
867                  LS_64(I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, I40IW_CQPSQ_OPCODE) |
868                  LS_64(free_pm_fcn, I40IW_CQPSQ_MHMC_FREEPMFN) |
869                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
870
871         i40iw_insert_wqe_hdr(wqe, header);
872         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE",
873                         wqe, I40IW_CQP_WQE_SIZE * 8);
874         if (post_sq)
875                 i40iw_sc_cqp_post_sq(cqp);
876         return 0;
877 }
878
879 /**
880  * i40iw_sc_set_hmc_resource_profile - cqp wqe for hmc profile
881  * @cqp: struct for cqp hw
882  * @scratch: u64 saved to be used during cqp completion
883  * @hmc_profile_type: type of profile to set
884  * @vf_num: vf number for profile
885  * @post_sq: flag for cqp db to ring
886  * @poll_registers: flag to poll register for cqp completion
887  */
888 static enum i40iw_status_code i40iw_sc_set_hmc_resource_profile(
889                                 struct i40iw_sc_cqp *cqp,
890                                 u64 scratch,
891                                 u8 hmc_profile_type,
892                                 u8 vf_num, bool post_sq,
893                                 bool poll_registers)
894 {
895         u64 *wqe;
896         u64 header;
897         u32 val, tail, error;
898         enum i40iw_status_code ret_code = 0;
899
900         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
901         if (!wqe)
902                 return I40IW_ERR_RING_FULL;
903
904         set_64bit_val(wqe, 16,
905                       (LS_64(hmc_profile_type, I40IW_CQPSQ_SHMCRP_HMC_PROFILE) |
906                                 LS_64(vf_num, I40IW_CQPSQ_SHMCRP_VFNUM)));
907
908         header = LS_64(I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE, I40IW_CQPSQ_OPCODE) |
909                        LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
910
911         i40iw_insert_wqe_hdr(wqe, header);
912
913         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE",
914                         wqe, I40IW_CQP_WQE_SIZE * 8);
915
916         i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
917         if (error)
918                 return I40IW_ERR_CQP_COMPL_ERROR;
919
920         if (post_sq) {
921                 i40iw_sc_cqp_post_sq(cqp);
922                 if (poll_registers)
923                         ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000000);
924                 else
925                         ret_code = i40iw_sc_poll_for_cqp_op_done(cqp,
926                                                                  I40IW_CQP_OP_SHMC_PAGES_ALLOCATED,
927                                                                  NULL);
928         }
929
930         return ret_code;
931 }
932
933 /**
934  * i40iw_sc_manage_hmc_pm_func_table_done - wait for cqp wqe completion for function table
935  * @cqp: struct for cqp hw
936  */
937 static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table_done(struct i40iw_sc_cqp *cqp)
938 {
939         return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, NULL);
940 }
941
942 /**
943  * i40iw_sc_commit_fpm_values_done - wait for cqp eqe completion for fpm commit
944  * @cqp: struct for cqp hw
945  */
946 static enum i40iw_status_code i40iw_sc_commit_fpm_values_done(struct i40iw_sc_cqp *cqp)
947 {
948         return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_COMMIT_FPM_VALUES, NULL);
949 }
950
951 /**
952  * i40iw_sc_commit_fpm_values - cqp wqe for commit fpm values
953  * @cqp: struct for cqp hw
954  * @scratch: u64 saved to be used during cqp completion
955  * @hmc_fn_id: hmc function id
956  * @commit_fpm_mem; Memory for fpm values
957  * @post_sq: flag for cqp db to ring
958  * @wait_type: poll ccq or cqp registers for cqp completion
959  */
960 static enum i40iw_status_code i40iw_sc_commit_fpm_values(
961                                         struct i40iw_sc_cqp *cqp,
962                                         u64 scratch,
963                                         u8 hmc_fn_id,
964                                         struct i40iw_dma_mem *commit_fpm_mem,
965                                         bool post_sq,
966                                         u8 wait_type)
967 {
968         u64 *wqe;
969         u64 header;
970         u32 tail, val, error;
971         enum i40iw_status_code ret_code = 0;
972
973         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
974         if (!wqe)
975                 return I40IW_ERR_RING_FULL;
976
977         set_64bit_val(wqe, 16, hmc_fn_id);
978         set_64bit_val(wqe, 32, commit_fpm_mem->pa);
979
980         header = LS_64(I40IW_CQP_OP_COMMIT_FPM_VALUES, I40IW_CQPSQ_OPCODE) |
981                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
982
983         i40iw_insert_wqe_hdr(wqe, header);
984
985         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "COMMIT_FPM_VALUES WQE",
986                         wqe, I40IW_CQP_WQE_SIZE * 8);
987
988         i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
989         if (error)
990                 return I40IW_ERR_CQP_COMPL_ERROR;
991
992         if (post_sq) {
993                 i40iw_sc_cqp_post_sq(cqp);
994
995                 if (wait_type == I40IW_CQP_WAIT_POLL_REGS)
996                         ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
997                 else if (wait_type == I40IW_CQP_WAIT_POLL_CQ)
998                         ret_code = i40iw_sc_commit_fpm_values_done(cqp);
999         }
1000
1001         return ret_code;
1002 }
1003
1004 /**
1005  * i40iw_sc_query_fpm_values_done - poll for cqp wqe completion for query fpm
1006  * @cqp: struct for cqp hw
1007  */
1008 static enum i40iw_status_code i40iw_sc_query_fpm_values_done(struct i40iw_sc_cqp *cqp)
1009 {
1010         return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_QUERY_FPM_VALUES, NULL);
1011 }
1012
1013 /**
1014  * i40iw_sc_query_fpm_values - cqp wqe query fpm values
1015  * @cqp: struct for cqp hw
1016  * @scratch: u64 saved to be used during cqp completion
1017  * @hmc_fn_id: hmc function id
1018  * @query_fpm_mem: memory for return fpm values
1019  * @post_sq: flag for cqp db to ring
1020  * @wait_type: poll ccq or cqp registers for cqp completion
1021  */
1022 static enum i40iw_status_code i40iw_sc_query_fpm_values(
1023                                         struct i40iw_sc_cqp *cqp,
1024                                         u64 scratch,
1025                                         u8 hmc_fn_id,
1026                                         struct i40iw_dma_mem *query_fpm_mem,
1027                                         bool post_sq,
1028                                         u8 wait_type)
1029 {
1030         u64 *wqe;
1031         u64 header;
1032         u32 tail, val, error;
1033         enum i40iw_status_code ret_code = 0;
1034
1035         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1036         if (!wqe)
1037                 return I40IW_ERR_RING_FULL;
1038
1039         set_64bit_val(wqe, 16, hmc_fn_id);
1040         set_64bit_val(wqe, 32, query_fpm_mem->pa);
1041
1042         header = LS_64(I40IW_CQP_OP_QUERY_FPM_VALUES, I40IW_CQPSQ_OPCODE) |
1043                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1044
1045         i40iw_insert_wqe_hdr(wqe, header);
1046
1047         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_FPM WQE",
1048                         wqe, I40IW_CQP_WQE_SIZE * 8);
1049
1050         /* read the tail from CQP_TAIL register */
1051         i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
1052
1053         if (error)
1054                 return I40IW_ERR_CQP_COMPL_ERROR;
1055
1056         if (post_sq) {
1057                 i40iw_sc_cqp_post_sq(cqp);
1058                 if (wait_type == I40IW_CQP_WAIT_POLL_REGS)
1059                         ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
1060                 else if (wait_type == I40IW_CQP_WAIT_POLL_CQ)
1061                         ret_code = i40iw_sc_query_fpm_values_done(cqp);
1062         }
1063
1064         return ret_code;
1065 }
1066
1067 /**
1068  * i40iw_sc_add_arp_cache_entry - cqp wqe add arp cache entry
1069  * @cqp: struct for cqp hw
1070  * @info: arp entry information
1071  * @scratch: u64 saved to be used during cqp completion
1072  * @post_sq: flag for cqp db to ring
1073  */
1074 static enum i40iw_status_code i40iw_sc_add_arp_cache_entry(
1075                                 struct i40iw_sc_cqp *cqp,
1076                                 struct i40iw_add_arp_cache_entry_info *info,
1077                                 u64 scratch,
1078                                 bool post_sq)
1079 {
1080         u64 *wqe;
1081         u64 temp, header;
1082
1083         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1084         if (!wqe)
1085                 return I40IW_ERR_RING_FULL;
1086         set_64bit_val(wqe, 8, info->reach_max);
1087
1088         temp = info->mac_addr[5] |
1089                LS_64_1(info->mac_addr[4], 8) |
1090                LS_64_1(info->mac_addr[3], 16) |
1091                LS_64_1(info->mac_addr[2], 24) |
1092                LS_64_1(info->mac_addr[1], 32) |
1093                LS_64_1(info->mac_addr[0], 40);
1094
1095         set_64bit_val(wqe, 16, temp);
1096
1097         header = info->arp_index |
1098                  LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
1099                  LS_64((info->permanent ? 1 : 0), I40IW_CQPSQ_MAT_PERMANENT) |
1100                  LS_64(1, I40IW_CQPSQ_MAT_ENTRYVALID) |
1101                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1102
1103         i40iw_insert_wqe_hdr(wqe, header);
1104
1105         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_ENTRY WQE",
1106                         wqe, I40IW_CQP_WQE_SIZE * 8);
1107
1108         if (post_sq)
1109                 i40iw_sc_cqp_post_sq(cqp);
1110         return 0;
1111 }
1112
1113 /**
1114  * i40iw_sc_del_arp_cache_entry - dele arp cache entry
1115  * @cqp: struct for cqp hw
1116  * @scratch: u64 saved to be used during cqp completion
1117  * @arp_index: arp index to delete arp entry
1118  * @post_sq: flag for cqp db to ring
1119  */
1120 static enum i40iw_status_code i40iw_sc_del_arp_cache_entry(
1121                                         struct i40iw_sc_cqp *cqp,
1122                                         u64 scratch,
1123                                         u16 arp_index,
1124                                         bool post_sq)
1125 {
1126         u64 *wqe;
1127         u64 header;
1128
1129         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1130         if (!wqe)
1131                 return I40IW_ERR_RING_FULL;
1132
1133         header = arp_index |
1134                  LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
1135                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1136         i40iw_insert_wqe_hdr(wqe, header);
1137
1138         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_DEL_ENTRY WQE",
1139                         wqe, I40IW_CQP_WQE_SIZE * 8);
1140
1141         if (post_sq)
1142                 i40iw_sc_cqp_post_sq(cqp);
1143         return 0;
1144 }
1145
1146 /**
1147  * i40iw_sc_query_arp_cache_entry - cqp wqe to query arp and arp index
1148  * @cqp: struct for cqp hw
1149  * @scratch: u64 saved to be used during cqp completion
1150  * @arp_index: arp index to delete arp entry
1151  * @post_sq: flag for cqp db to ring
1152  */
1153 static enum i40iw_status_code i40iw_sc_query_arp_cache_entry(
1154                                 struct i40iw_sc_cqp *cqp,
1155                                 u64 scratch,
1156                                 u16 arp_index,
1157                                 bool post_sq)
1158 {
1159         u64 *wqe;
1160         u64 header;
1161
1162         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1163         if (!wqe)
1164                 return I40IW_ERR_RING_FULL;
1165
1166         header = arp_index |
1167                  LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
1168                  LS_64(1, I40IW_CQPSQ_MAT_QUERY) |
1169                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1170
1171         i40iw_insert_wqe_hdr(wqe, header);
1172
1173         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_ARP_CACHE_ENTRY WQE",
1174                         wqe, I40IW_CQP_WQE_SIZE * 8);
1175
1176         if (post_sq)
1177                 i40iw_sc_cqp_post_sq(cqp);
1178         return 0;
1179 }
1180
1181 /**
1182  * i40iw_sc_manage_apbvt_entry - for adding and deleting apbvt entries
1183  * @cqp: struct for cqp hw
1184  * @info: info for apbvt entry to add or delete
1185  * @scratch: u64 saved to be used during cqp completion
1186  * @post_sq: flag for cqp db to ring
1187  */
1188 static enum i40iw_status_code i40iw_sc_manage_apbvt_entry(
1189                                 struct i40iw_sc_cqp *cqp,
1190                                 struct i40iw_apbvt_info *info,
1191                                 u64 scratch,
1192                                 bool post_sq)
1193 {
1194         u64 *wqe;
1195         u64 header;
1196
1197         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1198         if (!wqe)
1199                 return I40IW_ERR_RING_FULL;
1200
1201         set_64bit_val(wqe, 16, info->port);
1202
1203         header = LS_64(I40IW_CQP_OP_MANAGE_APBVT, I40IW_CQPSQ_OPCODE) |
1204                  LS_64(info->add, I40IW_CQPSQ_MAPT_ADDPORT) |
1205                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1206
1207         i40iw_insert_wqe_hdr(wqe, header);
1208
1209         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_APBVT WQE",
1210                         wqe, I40IW_CQP_WQE_SIZE * 8);
1211
1212         if (post_sq)
1213                 i40iw_sc_cqp_post_sq(cqp);
1214         return 0;
1215 }
1216
1217 /**
1218  * i40iw_sc_manage_qhash_table_entry - manage quad hash entries
1219  * @cqp: struct for cqp hw
1220  * @info: info for quad hash to manage
1221  * @scratch: u64 saved to be used during cqp completion
1222  * @post_sq: flag for cqp db to ring
1223  *
1224  * This is called before connection establishment is started. For passive connections, when
1225  * listener is created, it will call with entry type of  I40IW_QHASH_TYPE_TCP_SYN with local
1226  * ip address and tcp port. When SYN is received (passive connections) or
1227  * sent (active connections), this routine is called with entry type of
1228  * I40IW_QHASH_TYPE_TCP_ESTABLISHED and quad is passed in info.
1229  *
1230  * When iwarp connection is done and its state moves to RTS, the quad hash entry in
1231  * the hardware will point to iwarp's qp number and requires no calls from the driver.
1232  */
1233 static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry(
1234                                         struct i40iw_sc_cqp *cqp,
1235                                         struct i40iw_qhash_table_info *info,
1236                                         u64 scratch,
1237                                         bool post_sq)
1238 {
1239         u64 *wqe;
1240         u64 qw1 = 0;
1241         u64 qw2 = 0;
1242         u64 temp;
1243         struct i40iw_sc_vsi *vsi = info->vsi;
1244
1245         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1246         if (!wqe)
1247                 return I40IW_ERR_RING_FULL;
1248
1249         temp = info->mac_addr[5] |
1250                 LS_64_1(info->mac_addr[4], 8) |
1251                 LS_64_1(info->mac_addr[3], 16) |
1252                 LS_64_1(info->mac_addr[2], 24) |
1253                 LS_64_1(info->mac_addr[1], 32) |
1254                 LS_64_1(info->mac_addr[0], 40);
1255
1256         set_64bit_val(wqe, 0, temp);
1257
1258         qw1 = LS_64(info->qp_num, I40IW_CQPSQ_QHASH_QPN) |
1259               LS_64(info->dest_port, I40IW_CQPSQ_QHASH_DEST_PORT);
1260         if (info->ipv4_valid) {
1261                 set_64bit_val(wqe,
1262                               48,
1263                               LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR3));
1264         } else {
1265                 set_64bit_val(wqe,
1266                               56,
1267                               LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR0) |
1268                               LS_64(info->dest_ip[1], I40IW_CQPSQ_QHASH_ADDR1));
1269
1270                 set_64bit_val(wqe,
1271                               48,
1272                               LS_64(info->dest_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
1273                               LS_64(info->dest_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
1274         }
1275         qw2 = LS_64(vsi->qos[info->user_pri].qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE);
1276         if (info->vlan_valid)
1277                 qw2 |= LS_64(info->vlan_id, I40IW_CQPSQ_QHASH_VLANID);
1278         set_64bit_val(wqe, 16, qw2);
1279         if (info->entry_type == I40IW_QHASH_TYPE_TCP_ESTABLISHED) {
1280                 qw1 |= LS_64(info->src_port, I40IW_CQPSQ_QHASH_SRC_PORT);
1281                 if (!info->ipv4_valid) {
1282                         set_64bit_val(wqe,
1283                                       40,
1284                                       LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR0) |
1285                                       LS_64(info->src_ip[1], I40IW_CQPSQ_QHASH_ADDR1));
1286                         set_64bit_val(wqe,
1287                                       32,
1288                                       LS_64(info->src_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
1289                                       LS_64(info->src_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
1290                 } else {
1291                         set_64bit_val(wqe,
1292                                       32,
1293                                       LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR3));
1294                 }
1295         }
1296
1297         set_64bit_val(wqe, 8, qw1);
1298         temp = LS_64(cqp->polarity, I40IW_CQPSQ_QHASH_WQEVALID) |
1299                LS_64(I40IW_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY, I40IW_CQPSQ_QHASH_OPCODE) |
1300                LS_64(info->manage, I40IW_CQPSQ_QHASH_MANAGE) |
1301                LS_64(info->ipv4_valid, I40IW_CQPSQ_QHASH_IPV4VALID) |
1302                LS_64(info->vlan_valid, I40IW_CQPSQ_QHASH_VLANVALID) |
1303                LS_64(info->entry_type, I40IW_CQPSQ_QHASH_ENTRYTYPE);
1304
1305         i40iw_insert_wqe_hdr(wqe, temp);
1306
1307         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_QHASH WQE",
1308                         wqe, I40IW_CQP_WQE_SIZE * 8);
1309
1310         if (post_sq)
1311                 i40iw_sc_cqp_post_sq(cqp);
1312         return 0;
1313 }
1314
1315 /**
1316  * i40iw_sc_alloc_local_mac_ipaddr_entry - cqp wqe for loc mac entry
1317  * @cqp: struct for cqp hw
1318  * @scratch: u64 saved to be used during cqp completion
1319  * @post_sq: flag for cqp db to ring
1320  */
1321 static enum i40iw_status_code i40iw_sc_alloc_local_mac_ipaddr_entry(
1322                                         struct i40iw_sc_cqp *cqp,
1323                                         u64 scratch,
1324                                         bool post_sq)
1325 {
1326         u64 *wqe;
1327         u64 header;
1328
1329         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1330         if (!wqe)
1331                 return I40IW_ERR_RING_FULL;
1332         header = LS_64(I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY, I40IW_CQPSQ_OPCODE) |
1333                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1334
1335         i40iw_insert_wqe_hdr(wqe, header);
1336         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ALLOCATE_LOCAL_MAC_IPADDR WQE",
1337                         wqe, I40IW_CQP_WQE_SIZE * 8);
1338         if (post_sq)
1339                 i40iw_sc_cqp_post_sq(cqp);
1340         return 0;
1341 }
1342
1343 /**
1344  * i40iw_sc_add_local_mac_ipaddr_entry - add mac enry
1345  * @cqp: struct for cqp hw
1346  * @info:mac addr info
1347  * @scratch: u64 saved to be used during cqp completion
1348  * @post_sq: flag for cqp db to ring
1349  */
1350 static enum i40iw_status_code i40iw_sc_add_local_mac_ipaddr_entry(
1351                                 struct i40iw_sc_cqp *cqp,
1352                                 struct i40iw_local_mac_ipaddr_entry_info *info,
1353                                 u64 scratch,
1354                                 bool post_sq)
1355 {
1356         u64 *wqe;
1357         u64 temp, header;
1358
1359         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1360         if (!wqe)
1361                 return I40IW_ERR_RING_FULL;
1362         temp = info->mac_addr[5] |
1363                 LS_64_1(info->mac_addr[4], 8) |
1364                 LS_64_1(info->mac_addr[3], 16) |
1365                 LS_64_1(info->mac_addr[2], 24) |
1366                 LS_64_1(info->mac_addr[1], 32) |
1367                 LS_64_1(info->mac_addr[0], 40);
1368
1369         set_64bit_val(wqe, 32, temp);
1370
1371         header = LS_64(info->entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) |
1372                  LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) |
1373                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1374
1375         i40iw_insert_wqe_hdr(wqe, header);
1376
1377         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ADD_LOCAL_MAC_IPADDR WQE",
1378                         wqe, I40IW_CQP_WQE_SIZE * 8);
1379
1380         if (post_sq)
1381                 i40iw_sc_cqp_post_sq(cqp);
1382         return 0;
1383 }
1384
1385 /**
1386  * i40iw_sc_del_local_mac_ipaddr_entry - cqp wqe to dele local mac
1387  * @cqp: struct for cqp hw
1388  * @scratch: u64 saved to be used during cqp completion
1389  * @entry_idx: index of mac entry
1390  * @ ignore_ref_count: to force mac adde delete
1391  * @post_sq: flag for cqp db to ring
1392  */
1393 static enum i40iw_status_code i40iw_sc_del_local_mac_ipaddr_entry(
1394                                 struct i40iw_sc_cqp *cqp,
1395                                 u64 scratch,
1396                                 u8 entry_idx,
1397                                 u8 ignore_ref_count,
1398                                 bool post_sq)
1399 {
1400         u64 *wqe;
1401         u64 header;
1402
1403         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1404         if (!wqe)
1405                 return I40IW_ERR_RING_FULL;
1406         header = LS_64(entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) |
1407                  LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) |
1408                  LS_64(1, I40IW_CQPSQ_MLIPA_FREEENTRY) |
1409                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
1410                  LS_64(ignore_ref_count, I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT);
1411
1412         i40iw_insert_wqe_hdr(wqe, header);
1413
1414         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "DEL_LOCAL_MAC_IPADDR WQE",
1415                         wqe, I40IW_CQP_WQE_SIZE * 8);
1416
1417         if (post_sq)
1418                 i40iw_sc_cqp_post_sq(cqp);
1419         return 0;
1420 }
1421
1422 /**
1423  * i40iw_sc_cqp_nop - send a nop wqe
1424  * @cqp: struct for cqp hw
1425  * @scratch: u64 saved to be used during cqp completion
1426  * @post_sq: flag for cqp db to ring
1427  */
1428 static enum i40iw_status_code i40iw_sc_cqp_nop(struct i40iw_sc_cqp *cqp,
1429                                                u64 scratch,
1430                                                bool post_sq)
1431 {
1432         u64 *wqe;
1433         u64 header;
1434
1435         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1436         if (!wqe)
1437                 return I40IW_ERR_RING_FULL;
1438         header = LS_64(I40IW_CQP_OP_NOP, I40IW_CQPSQ_OPCODE) |
1439                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1440         i40iw_insert_wqe_hdr(wqe, header);
1441         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "NOP WQE",
1442                         wqe, I40IW_CQP_WQE_SIZE * 8);
1443
1444         if (post_sq)
1445                 i40iw_sc_cqp_post_sq(cqp);
1446         return 0;
1447 }
1448
1449 /**
1450  * i40iw_sc_ceq_init - initialize ceq
1451  * @ceq: ceq sc structure
1452  * @info: ceq initialization info
1453  */
1454 static enum i40iw_status_code i40iw_sc_ceq_init(struct i40iw_sc_ceq *ceq,
1455                                                 struct i40iw_ceq_init_info *info)
1456 {
1457         u32 pble_obj_cnt;
1458
1459         if ((info->elem_cnt < I40IW_MIN_CEQ_ENTRIES) ||
1460             (info->elem_cnt > I40IW_MAX_CEQ_ENTRIES))
1461                 return I40IW_ERR_INVALID_SIZE;
1462
1463         if (info->ceq_id >= I40IW_MAX_CEQID)
1464                 return I40IW_ERR_INVALID_CEQ_ID;
1465
1466         pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
1467
1468         if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
1469                 return I40IW_ERR_INVALID_PBLE_INDEX;
1470
1471         ceq->size = sizeof(*ceq);
1472         ceq->ceqe_base = (struct i40iw_ceqe *)info->ceqe_base;
1473         ceq->ceq_id = info->ceq_id;
1474         ceq->dev = info->dev;
1475         ceq->elem_cnt = info->elem_cnt;
1476         ceq->ceq_elem_pa = info->ceqe_pa;
1477         ceq->virtual_map = info->virtual_map;
1478
1479         ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);
1480         ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);
1481         ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
1482
1483         ceq->tph_en = info->tph_en;
1484         ceq->tph_val = info->tph_val;
1485         ceq->polarity = 1;
1486         I40IW_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);
1487         ceq->dev->ceq[info->ceq_id] = ceq;
1488
1489         return 0;
1490 }
1491
1492 /**
1493  * i40iw_sc_ceq_create - create ceq wqe
1494  * @ceq: ceq sc structure
1495  * @scratch: u64 saved to be used during cqp completion
1496  * @post_sq: flag for cqp db to ring
1497  */
1498 static enum i40iw_status_code i40iw_sc_ceq_create(struct i40iw_sc_ceq *ceq,
1499                                                   u64 scratch,
1500                                                   bool post_sq)
1501 {
1502         struct i40iw_sc_cqp *cqp;
1503         u64 *wqe;
1504         u64 header;
1505
1506         cqp = ceq->dev->cqp;
1507         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1508         if (!wqe)
1509                 return I40IW_ERR_RING_FULL;
1510         set_64bit_val(wqe, 16, ceq->elem_cnt);
1511         set_64bit_val(wqe, 32, (ceq->virtual_map ? 0 : ceq->ceq_elem_pa));
1512         set_64bit_val(wqe, 48, (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
1513         set_64bit_val(wqe, 56, LS_64(ceq->tph_val, I40IW_CQPSQ_TPHVAL));
1514
1515         header = ceq->ceq_id |
1516                  LS_64(I40IW_CQP_OP_CREATE_CEQ, I40IW_CQPSQ_OPCODE) |
1517                  LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) |
1518                  LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) |
1519                  LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) |
1520                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1521
1522         i40iw_insert_wqe_hdr(wqe, header);
1523
1524         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_CREATE WQE",
1525                         wqe, I40IW_CQP_WQE_SIZE * 8);
1526
1527         if (post_sq)
1528                 i40iw_sc_cqp_post_sq(cqp);
1529         return 0;
1530 }
1531
1532 /**
1533  * i40iw_sc_cceq_create_done - poll for control ceq wqe to complete
1534  * @ceq: ceq sc structure
1535  */
1536 static enum i40iw_status_code i40iw_sc_cceq_create_done(struct i40iw_sc_ceq *ceq)
1537 {
1538         struct i40iw_sc_cqp *cqp;
1539
1540         cqp = ceq->dev->cqp;
1541         return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CEQ, NULL);
1542 }
1543
1544 /**
1545  * i40iw_sc_cceq_destroy_done - poll for destroy cceq to complete
1546  * @ceq: ceq sc structure
1547  */
1548 static enum i40iw_status_code i40iw_sc_cceq_destroy_done(struct i40iw_sc_ceq *ceq)
1549 {
1550         struct i40iw_sc_cqp *cqp;
1551
1552         cqp = ceq->dev->cqp;
1553         cqp->process_cqp_sds = i40iw_update_sds_noccq;
1554         return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_CEQ, NULL);
1555 }
1556
1557 /**
1558  * i40iw_sc_cceq_create - create cceq
1559  * @ceq: ceq sc structure
1560  * @scratch: u64 saved to be used during cqp completion
1561  */
1562 static enum i40iw_status_code i40iw_sc_cceq_create(struct i40iw_sc_ceq *ceq, u64 scratch)
1563 {
1564         enum i40iw_status_code ret_code;
1565
1566         ret_code = i40iw_sc_ceq_create(ceq, scratch, true);
1567         if (!ret_code)
1568                 ret_code = i40iw_sc_cceq_create_done(ceq);
1569         return ret_code;
1570 }
1571
1572 /**
1573  * i40iw_sc_ceq_destroy - destroy ceq
1574  * @ceq: ceq sc structure
1575  * @scratch: u64 saved to be used during cqp completion
1576  * @post_sq: flag for cqp db to ring
1577  */
1578 static enum i40iw_status_code i40iw_sc_ceq_destroy(struct i40iw_sc_ceq *ceq,
1579                                                    u64 scratch,
1580                                                    bool post_sq)
1581 {
1582         struct i40iw_sc_cqp *cqp;
1583         u64 *wqe;
1584         u64 header;
1585
1586         cqp = ceq->dev->cqp;
1587         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1588         if (!wqe)
1589                 return I40IW_ERR_RING_FULL;
1590         set_64bit_val(wqe, 16, ceq->elem_cnt);
1591         set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx);
1592         header = ceq->ceq_id |
1593                  LS_64(I40IW_CQP_OP_DESTROY_CEQ, I40IW_CQPSQ_OPCODE) |
1594                  LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) |
1595                  LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) |
1596                  LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) |
1597                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1598         i40iw_insert_wqe_hdr(wqe, header);
1599         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_DESTROY WQE",
1600                         wqe, I40IW_CQP_WQE_SIZE * 8);
1601
1602         if (post_sq)
1603                 i40iw_sc_cqp_post_sq(cqp);
1604         return 0;
1605 }
1606
1607 /**
1608  * i40iw_sc_process_ceq - process ceq
1609  * @dev: sc device struct
1610  * @ceq: ceq sc structure
1611  */
1612 static void *i40iw_sc_process_ceq(struct i40iw_sc_dev *dev, struct i40iw_sc_ceq *ceq)
1613 {
1614         u64 temp;
1615         u64 *ceqe;
1616         struct i40iw_sc_cq *cq = NULL;
1617         u8 polarity;
1618
1619         ceqe = (u64 *)I40IW_GET_CURRENT_CEQ_ELEMENT(ceq);
1620         get_64bit_val(ceqe, 0, &temp);
1621         polarity = (u8)RS_64(temp, I40IW_CEQE_VALID);
1622         if (polarity != ceq->polarity)
1623                 return cq;
1624
1625         cq = (struct i40iw_sc_cq *)(unsigned long)LS_64_1(temp, 1);
1626
1627         I40IW_RING_MOVE_TAIL(ceq->ceq_ring);
1628         if (I40IW_RING_GETCURRENT_TAIL(ceq->ceq_ring) == 0)
1629                 ceq->polarity ^= 1;
1630
1631         if (dev->is_pf)
1632                 i40iw_wr32(dev->hw, I40E_PFPE_CQACK, cq->cq_uk.cq_id);
1633         else
1634                 i40iw_wr32(dev->hw, I40E_VFPE_CQACK1, cq->cq_uk.cq_id);
1635
1636         return cq;
1637 }
1638
1639 /**
1640  * i40iw_sc_aeq_init - initialize aeq
1641  * @aeq: aeq structure ptr
1642  * @info: aeq initialization info
1643  */
1644 static enum i40iw_status_code i40iw_sc_aeq_init(struct i40iw_sc_aeq *aeq,
1645                                                 struct i40iw_aeq_init_info *info)
1646 {
1647         u32 pble_obj_cnt;
1648
1649         if ((info->elem_cnt < I40IW_MIN_AEQ_ENTRIES) ||
1650             (info->elem_cnt > I40IW_MAX_AEQ_ENTRIES))
1651                 return I40IW_ERR_INVALID_SIZE;
1652         pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
1653
1654         if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
1655                 return I40IW_ERR_INVALID_PBLE_INDEX;
1656
1657         aeq->size = sizeof(*aeq);
1658         aeq->polarity = 1;
1659         aeq->aeqe_base = (struct i40iw_sc_aeqe *)info->aeqe_base;
1660         aeq->dev = info->dev;
1661         aeq->elem_cnt = info->elem_cnt;
1662
1663         aeq->aeq_elem_pa = info->aeq_elem_pa;
1664         I40IW_RING_INIT(aeq->aeq_ring, aeq->elem_cnt);
1665         info->dev->aeq = aeq;
1666
1667         aeq->virtual_map = info->virtual_map;
1668         aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL);
1669         aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0);
1670         aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0);
1671         info->dev->aeq = aeq;
1672         return 0;
1673 }
1674
1675 /**
1676  * i40iw_sc_aeq_create - create aeq
1677  * @aeq: aeq structure ptr
1678  * @scratch: u64 saved to be used during cqp completion
1679  * @post_sq: flag for cqp db to ring
1680  */
1681 static enum i40iw_status_code i40iw_sc_aeq_create(struct i40iw_sc_aeq *aeq,
1682                                                   u64 scratch,
1683                                                   bool post_sq)
1684 {
1685         u64 *wqe;
1686         struct i40iw_sc_cqp *cqp;
1687         u64 header;
1688
1689         cqp = aeq->dev->cqp;
1690         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1691         if (!wqe)
1692                 return I40IW_ERR_RING_FULL;
1693         set_64bit_val(wqe, 16, aeq->elem_cnt);
1694         set_64bit_val(wqe, 32,
1695                       (aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
1696         set_64bit_val(wqe, 48,
1697                       (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
1698
1699         header = LS_64(I40IW_CQP_OP_CREATE_AEQ, I40IW_CQPSQ_OPCODE) |
1700                  LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) |
1701                  LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) |
1702                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1703
1704         i40iw_insert_wqe_hdr(wqe, header);
1705         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_CREATE WQE",
1706                         wqe, I40IW_CQP_WQE_SIZE * 8);
1707         if (post_sq)
1708                 i40iw_sc_cqp_post_sq(cqp);
1709         return 0;
1710 }
1711
1712 /**
1713  * i40iw_sc_aeq_destroy - destroy aeq during close
1714  * @aeq: aeq structure ptr
1715  * @scratch: u64 saved to be used during cqp completion
1716  * @post_sq: flag for cqp db to ring
1717  */
1718 static enum i40iw_status_code i40iw_sc_aeq_destroy(struct i40iw_sc_aeq *aeq,
1719                                                    u64 scratch,
1720                                                    bool post_sq)
1721 {
1722         u64 *wqe;
1723         struct i40iw_sc_cqp *cqp;
1724         u64 header;
1725
1726         cqp = aeq->dev->cqp;
1727         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1728         if (!wqe)
1729                 return I40IW_ERR_RING_FULL;
1730         set_64bit_val(wqe, 16, aeq->elem_cnt);
1731         set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx);
1732         header = LS_64(I40IW_CQP_OP_DESTROY_AEQ, I40IW_CQPSQ_OPCODE) |
1733                  LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) |
1734                  LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) |
1735                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1736         i40iw_insert_wqe_hdr(wqe, header);
1737
1738         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_DESTROY WQE",
1739                         wqe, I40IW_CQP_WQE_SIZE * 8);
1740         if (post_sq)
1741                 i40iw_sc_cqp_post_sq(cqp);
1742         return 0;
1743 }
1744
1745 /**
1746  * i40iw_sc_get_next_aeqe - get next aeq entry
1747  * @aeq: aeq structure ptr
1748  * @info: aeqe info to be returned
1749  */
1750 static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq,
1751                                                      struct i40iw_aeqe_info *info)
1752 {
1753         u64 temp, compl_ctx;
1754         u64 *aeqe;
1755         u16 wqe_idx;
1756         u8 ae_src;
1757         u8 polarity;
1758
1759         aeqe = (u64 *)I40IW_GET_CURRENT_AEQ_ELEMENT(aeq);
1760         get_64bit_val(aeqe, 0, &compl_ctx);
1761         get_64bit_val(aeqe, 8, &temp);
1762         polarity = (u8)RS_64(temp, I40IW_AEQE_VALID);
1763
1764         if (aeq->polarity != polarity)
1765                 return I40IW_ERR_QUEUE_EMPTY;
1766
1767         i40iw_debug_buf(aeq->dev, I40IW_DEBUG_WQE, "AEQ_ENTRY", aeqe, 16);
1768
1769         ae_src = (u8)RS_64(temp, I40IW_AEQE_AESRC);
1770         wqe_idx = (u16)RS_64(temp, I40IW_AEQE_WQDESCIDX);
1771         info->qp_cq_id = (u32)RS_64(temp, I40IW_AEQE_QPCQID);
1772         info->ae_id = (u16)RS_64(temp, I40IW_AEQE_AECODE);
1773         info->tcp_state = (u8)RS_64(temp, I40IW_AEQE_TCPSTATE);
1774         info->iwarp_state = (u8)RS_64(temp, I40IW_AEQE_IWSTATE);
1775         info->q2_data_written = (u8)RS_64(temp, I40IW_AEQE_Q2DATA);
1776         info->aeqe_overflow = (bool)RS_64(temp, I40IW_AEQE_OVERFLOW);
1777         switch (ae_src) {
1778         case I40IW_AE_SOURCE_RQ:
1779         case I40IW_AE_SOURCE_RQ_0011:
1780                 info->qp = true;
1781                 info->wqe_idx = wqe_idx;
1782                 info->compl_ctx = compl_ctx;
1783                 break;
1784         case I40IW_AE_SOURCE_CQ:
1785         case I40IW_AE_SOURCE_CQ_0110:
1786         case I40IW_AE_SOURCE_CQ_1010:
1787         case I40IW_AE_SOURCE_CQ_1110:
1788                 info->cq = true;
1789                 info->compl_ctx = LS_64_1(compl_ctx, 1);
1790                 break;
1791         case I40IW_AE_SOURCE_SQ:
1792         case I40IW_AE_SOURCE_SQ_0111:
1793                 info->qp = true;
1794                 info->sq = true;
1795                 info->wqe_idx = wqe_idx;
1796                 info->compl_ctx = compl_ctx;
1797                 break;
1798         case I40IW_AE_SOURCE_IN_RR_WR:
1799         case I40IW_AE_SOURCE_IN_RR_WR_1011:
1800                 info->qp = true;
1801                 info->compl_ctx = compl_ctx;
1802                 info->in_rdrsp_wr = true;
1803                 break;
1804         case I40IW_AE_SOURCE_OUT_RR:
1805         case I40IW_AE_SOURCE_OUT_RR_1111:
1806                 info->qp = true;
1807                 info->compl_ctx = compl_ctx;
1808                 info->out_rdrsp = true;
1809                 break;
1810         default:
1811                 break;
1812         }
1813         I40IW_RING_MOVE_TAIL(aeq->aeq_ring);
1814         if (I40IW_RING_GETCURRENT_TAIL(aeq->aeq_ring) == 0)
1815                 aeq->polarity ^= 1;
1816         return 0;
1817 }
1818
1819 /**
1820  * i40iw_sc_repost_aeq_entries - repost completed aeq entries
1821  * @dev: sc device struct
1822  * @count: allocate count
1823  */
1824 static enum i40iw_status_code i40iw_sc_repost_aeq_entries(struct i40iw_sc_dev *dev,
1825                                                           u32 count)
1826 {
1827         if (count > I40IW_MAX_AEQ_ALLOCATE_COUNT)
1828                 return I40IW_ERR_INVALID_SIZE;
1829
1830         if (dev->is_pf)
1831                 i40iw_wr32(dev->hw, I40E_PFPE_AEQALLOC, count);
1832         else
1833                 i40iw_wr32(dev->hw, I40E_VFPE_AEQALLOC1, count);
1834
1835         return 0;
1836 }
1837
1838 /**
1839  * i40iw_sc_aeq_create_done - create aeq
1840  * @aeq: aeq structure ptr
1841  */
1842 static enum i40iw_status_code i40iw_sc_aeq_create_done(struct i40iw_sc_aeq *aeq)
1843 {
1844         struct i40iw_sc_cqp *cqp;
1845
1846         cqp = aeq->dev->cqp;
1847         return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_AEQ, NULL);
1848 }
1849
1850 /**
1851  * i40iw_sc_aeq_destroy_done - destroy of aeq during close
1852  * @aeq: aeq structure ptr
1853  */
1854 static enum i40iw_status_code i40iw_sc_aeq_destroy_done(struct i40iw_sc_aeq *aeq)
1855 {
1856         struct i40iw_sc_cqp *cqp;
1857
1858         cqp = aeq->dev->cqp;
1859         return  i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_AEQ, NULL);
1860 }
1861
1862 /**
1863  * i40iw_sc_ccq_init - initialize control cq
1864  * @cq: sc's cq ctruct
1865  * @info: info for control cq initialization
1866  */
1867 static enum i40iw_status_code i40iw_sc_ccq_init(struct i40iw_sc_cq *cq,
1868                                                 struct i40iw_ccq_init_info *info)
1869 {
1870         u32 pble_obj_cnt;
1871
1872         if (info->num_elem < I40IW_MIN_CQ_SIZE || info->num_elem > I40IW_MAX_CQ_SIZE)
1873                 return I40IW_ERR_INVALID_SIZE;
1874
1875         if (info->ceq_id > I40IW_MAX_CEQID)
1876                 return I40IW_ERR_INVALID_CEQ_ID;
1877
1878         pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
1879
1880         if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
1881                 return I40IW_ERR_INVALID_PBLE_INDEX;
1882
1883         cq->cq_pa = info->cq_pa;
1884         cq->cq_uk.cq_base = info->cq_base;
1885         cq->shadow_area_pa = info->shadow_area_pa;
1886         cq->cq_uk.shadow_area = info->shadow_area;
1887         cq->shadow_read_threshold = info->shadow_read_threshold;
1888         cq->dev = info->dev;
1889         cq->ceq_id = info->ceq_id;
1890         cq->cq_uk.cq_size = info->num_elem;
1891         cq->cq_type = I40IW_CQ_TYPE_CQP;
1892         cq->ceqe_mask = info->ceqe_mask;
1893         I40IW_RING_INIT(cq->cq_uk.cq_ring, info->num_elem);
1894
1895         cq->cq_uk.cq_id = 0;    /* control cq is id 0 always */
1896         cq->ceq_id_valid = info->ceq_id_valid;
1897         cq->tph_en = info->tph_en;
1898         cq->tph_val = info->tph_val;
1899         cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct;
1900
1901         cq->pbl_list = info->pbl_list;
1902         cq->virtual_map = info->virtual_map;
1903         cq->pbl_chunk_size = info->pbl_chunk_size;
1904         cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
1905         cq->cq_uk.polarity = true;
1906
1907         /* following are only for iw cqs so initialize them to zero */
1908         cq->cq_uk.cqe_alloc_reg = NULL;
1909         info->dev->ccq = cq;
1910         return 0;
1911 }
1912
1913 /**
1914  * i40iw_sc_ccq_create_done - poll cqp for ccq create
1915  * @ccq: ccq sc struct
1916  */
1917 static enum i40iw_status_code i40iw_sc_ccq_create_done(struct i40iw_sc_cq *ccq)
1918 {
1919         struct i40iw_sc_cqp *cqp;
1920
1921         cqp = ccq->dev->cqp;
1922         return  i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CQ, NULL);
1923 }
1924
1925 /**
1926  * i40iw_sc_ccq_create - create control cq
1927  * @ccq: ccq sc struct
1928  * @scratch: u64 saved to be used during cqp completion
1929  * @check_overflow: overlow flag for ccq
1930  * @post_sq: flag for cqp db to ring
1931  */
1932 static enum i40iw_status_code i40iw_sc_ccq_create(struct i40iw_sc_cq *ccq,
1933                                                   u64 scratch,
1934                                                   bool check_overflow,
1935                                                   bool post_sq)
1936 {
1937         u64 *wqe;
1938         struct i40iw_sc_cqp *cqp;
1939         u64 header;
1940         enum i40iw_status_code ret_code;
1941
1942         cqp = ccq->dev->cqp;
1943         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1944         if (!wqe)
1945                 return I40IW_ERR_RING_FULL;
1946         set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
1947         set_64bit_val(wqe, 8, RS_64_1(ccq, 1));
1948         set_64bit_val(wqe, 16,
1949                       LS_64(ccq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
1950         set_64bit_val(wqe, 32, (ccq->virtual_map ? 0 : ccq->cq_pa));
1951         set_64bit_val(wqe, 40, ccq->shadow_area_pa);
1952         set_64bit_val(wqe, 48,
1953                       (ccq->virtual_map ? ccq->first_pm_pbl_idx : 0));
1954         set_64bit_val(wqe, 56,
1955                       LS_64(ccq->tph_val, I40IW_CQPSQ_TPHVAL));
1956
1957         header = ccq->cq_uk.cq_id |
1958                  LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
1959                  LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
1960                  LS_64(ccq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
1961                  LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
1962                  LS_64(ccq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
1963                  LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
1964                  LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
1965                  LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) |
1966                  LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
1967                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1968
1969         i40iw_insert_wqe_hdr(wqe, header);
1970
1971         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_CREATE WQE",
1972                         wqe, I40IW_CQP_WQE_SIZE * 8);
1973
1974         if (post_sq) {
1975                 i40iw_sc_cqp_post_sq(cqp);
1976                 ret_code = i40iw_sc_ccq_create_done(ccq);
1977                 if (ret_code)
1978                         return ret_code;
1979         }
1980         cqp->process_cqp_sds = i40iw_cqp_sds_cmd;
1981
1982         return 0;
1983 }
1984
1985 /**
1986  * i40iw_sc_ccq_destroy - destroy ccq during close
1987  * @ccq: ccq sc struct
1988  * @scratch: u64 saved to be used during cqp completion
1989  * @post_sq: flag for cqp db to ring
1990  */
1991 static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq,
1992                                                    u64 scratch,
1993                                                    bool post_sq)
1994 {
1995         struct i40iw_sc_cqp *cqp;
1996         u64 *wqe;
1997         u64 header;
1998         enum i40iw_status_code ret_code = 0;
1999         u32 tail, val, error;
2000
2001         cqp = ccq->dev->cqp;
2002         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2003         if (!wqe)
2004                 return I40IW_ERR_RING_FULL;
2005         set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
2006         set_64bit_val(wqe, 8, RS_64_1(ccq, 1));
2007         set_64bit_val(wqe, 40, ccq->shadow_area_pa);
2008
2009         header = ccq->cq_uk.cq_id |
2010                  LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
2011                  LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) |
2012                  LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
2013                  LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
2014                  LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) |
2015                  LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
2016                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2017
2018         i40iw_insert_wqe_hdr(wqe, header);
2019
2020         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_DESTROY WQE",
2021                         wqe, I40IW_CQP_WQE_SIZE * 8);
2022
2023         i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
2024         if (error)
2025                 return I40IW_ERR_CQP_COMPL_ERROR;
2026
2027         if (post_sq) {
2028                 i40iw_sc_cqp_post_sq(cqp);
2029                 ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
2030         }
2031
2032         cqp->process_cqp_sds = i40iw_update_sds_noccq;
2033
2034         return ret_code;
2035 }
2036
2037 /**
2038  * i40iw_sc_cq_init - initialize completion q
2039  * @cq: cq struct
2040  * @info: cq initialization info
2041  */
2042 static enum i40iw_status_code i40iw_sc_cq_init(struct i40iw_sc_cq *cq,
2043                                                struct i40iw_cq_init_info *info)
2044 {
2045         u32 __iomem *cqe_alloc_reg = NULL;
2046         enum i40iw_status_code ret_code;
2047         u32 pble_obj_cnt;
2048         u32 arm_offset;
2049
2050         pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
2051
2052         if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
2053                 return I40IW_ERR_INVALID_PBLE_INDEX;
2054
2055         cq->cq_pa = info->cq_base_pa;
2056         cq->dev = info->dev;
2057         cq->ceq_id = info->ceq_id;
2058         arm_offset = (info->dev->is_pf) ? I40E_PFPE_CQARM : I40E_VFPE_CQARM1;
2059         if (i40iw_get_hw_addr(cq->dev))
2060                 cqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(cq->dev) +
2061                                               arm_offset);
2062         info->cq_uk_init_info.cqe_alloc_reg = cqe_alloc_reg;
2063         ret_code = i40iw_cq_uk_init(&cq->cq_uk, &info->cq_uk_init_info);
2064         if (ret_code)
2065                 return ret_code;
2066         cq->virtual_map = info->virtual_map;
2067         cq->pbl_chunk_size = info->pbl_chunk_size;
2068         cq->ceqe_mask = info->ceqe_mask;
2069         cq->cq_type = (info->type) ? info->type : I40IW_CQ_TYPE_IWARP;
2070
2071         cq->shadow_area_pa = info->shadow_area_pa;
2072         cq->shadow_read_threshold = info->shadow_read_threshold;
2073
2074         cq->ceq_id_valid = info->ceq_id_valid;
2075         cq->tph_en = info->tph_en;
2076         cq->tph_val = info->tph_val;
2077
2078         cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
2079
2080         return 0;
2081 }
2082
2083 /**
2084  * i40iw_sc_cq_create - create completion q
2085  * @cq: cq struct
2086  * @scratch: u64 saved to be used during cqp completion
2087  * @check_overflow: flag for overflow check
2088  * @post_sq: flag for cqp db to ring
2089  */
2090 static enum i40iw_status_code i40iw_sc_cq_create(struct i40iw_sc_cq *cq,
2091                                                  u64 scratch,
2092                                                  bool check_overflow,
2093                                                  bool post_sq)
2094 {
2095         u64 *wqe;
2096         struct i40iw_sc_cqp *cqp;
2097         u64 header;
2098
2099         if (cq->cq_uk.cq_id > I40IW_MAX_CQID)
2100                 return I40IW_ERR_INVALID_CQ_ID;
2101
2102         if (cq->ceq_id > I40IW_MAX_CEQID)
2103                 return I40IW_ERR_INVALID_CEQ_ID;
2104
2105         cqp = cq->dev->cqp;
2106         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2107         if (!wqe)
2108                 return I40IW_ERR_RING_FULL;
2109
2110         set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
2111         set_64bit_val(wqe, 8, RS_64_1(cq, 1));
2112         set_64bit_val(wqe,
2113                       16,
2114                       LS_64(cq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
2115
2116         set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
2117
2118         set_64bit_val(wqe, 40, cq->shadow_area_pa);
2119         set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
2120         set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL));
2121
2122         header = cq->cq_uk.cq_id |
2123                  LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
2124                  LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
2125                  LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
2126                  LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
2127                  LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
2128                  LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
2129                  LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
2130                  LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
2131                  LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
2132                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2133
2134         i40iw_insert_wqe_hdr(wqe, header);
2135
2136         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_CREATE WQE",
2137                         wqe, I40IW_CQP_WQE_SIZE * 8);
2138
2139         if (post_sq)
2140                 i40iw_sc_cqp_post_sq(cqp);
2141         return 0;
2142 }
2143
2144 /**
2145  * i40iw_sc_cq_destroy - destroy completion q
2146  * @cq: cq struct
2147  * @scratch: u64 saved to be used during cqp completion
2148  * @post_sq: flag for cqp db to ring
2149  */
2150 static enum i40iw_status_code i40iw_sc_cq_destroy(struct i40iw_sc_cq *cq,
2151                                                   u64 scratch,
2152                                                   bool post_sq)
2153 {
2154         struct i40iw_sc_cqp *cqp;
2155         u64 *wqe;
2156         u64 header;
2157
2158         cqp = cq->dev->cqp;
2159         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2160         if (!wqe)
2161                 return I40IW_ERR_RING_FULL;
2162         set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
2163         set_64bit_val(wqe, 8, RS_64_1(cq, 1));
2164         set_64bit_val(wqe, 40, cq->shadow_area_pa);
2165         set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
2166
2167         header = cq->cq_uk.cq_id |
2168                  LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
2169                  LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) |
2170                  LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
2171                  LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
2172                  LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
2173                  LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
2174                  LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
2175                  LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
2176                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2177
2178         i40iw_insert_wqe_hdr(wqe, header);
2179
2180         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_DESTROY WQE",
2181                         wqe, I40IW_CQP_WQE_SIZE * 8);
2182
2183         if (post_sq)
2184                 i40iw_sc_cqp_post_sq(cqp);
2185         return 0;
2186 }
2187
2188 /**
2189  * i40iw_sc_cq_modify - modify a Completion Queue
2190  * @cq: cq struct
2191  * @info: modification info struct
2192  * @scratch:
2193  * @post_sq: flag to post to sq
2194  */
2195 static enum i40iw_status_code i40iw_sc_cq_modify(struct i40iw_sc_cq *cq,
2196                                                  struct i40iw_modify_cq_info *info,
2197                                                  u64 scratch,
2198                                                  bool post_sq)
2199 {
2200         struct i40iw_sc_cqp *cqp;
2201         u64 *wqe;
2202         u64 header;
2203         u32 cq_size, ceq_id, first_pm_pbl_idx;
2204         u8 pbl_chunk_size;
2205         bool virtual_map, ceq_id_valid, check_overflow;
2206         u32 pble_obj_cnt;
2207
2208         if (info->ceq_valid && (info->ceq_id > I40IW_MAX_CEQID))
2209                 return I40IW_ERR_INVALID_CEQ_ID;
2210
2211         pble_obj_cnt = cq->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
2212
2213         if (info->cq_resize && info->virtual_map &&
2214             (info->first_pm_pbl_idx >= pble_obj_cnt))
2215                 return I40IW_ERR_INVALID_PBLE_INDEX;
2216
2217         cqp = cq->dev->cqp;
2218         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2219         if (!wqe)
2220                 return I40IW_ERR_RING_FULL;
2221
2222         cq->pbl_list = info->pbl_list;
2223         cq->cq_pa = info->cq_pa;
2224         cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
2225
2226         cq_size = info->cq_resize ? info->cq_size : cq->cq_uk.cq_size;
2227         if (info->ceq_change) {
2228                 ceq_id_valid = true;
2229                 ceq_id = info->ceq_id;
2230         } else {
2231                 ceq_id_valid = cq->ceq_id_valid;
2232                 ceq_id = ceq_id_valid ? cq->ceq_id : 0;
2233         }
2234         virtual_map = info->cq_resize ? info->virtual_map : cq->virtual_map;
2235         first_pm_pbl_idx = (info->cq_resize ?
2236                             (info->virtual_map ? info->first_pm_pbl_idx : 0) :
2237                             (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
2238         pbl_chunk_size = (info->cq_resize ?
2239                           (info->virtual_map ? info->pbl_chunk_size : 0) :
2240                           (cq->virtual_map ? cq->pbl_chunk_size : 0));
2241         check_overflow = info->check_overflow_change ? info->check_overflow :
2242                          cq->check_overflow;
2243         cq->cq_uk.cq_size = cq_size;
2244         cq->ceq_id_valid = ceq_id_valid;
2245         cq->ceq_id = ceq_id;
2246         cq->virtual_map = virtual_map;
2247         cq->first_pm_pbl_idx = first_pm_pbl_idx;
2248         cq->pbl_chunk_size = pbl_chunk_size;
2249         cq->check_overflow = check_overflow;
2250
2251         set_64bit_val(wqe, 0, cq_size);
2252         set_64bit_val(wqe, 8, RS_64_1(cq, 1));
2253         set_64bit_val(wqe, 16,
2254                       LS_64(info->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
2255         set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
2256         set_64bit_val(wqe, 40, cq->shadow_area_pa);
2257         set_64bit_val(wqe, 48, (cq->virtual_map ? first_pm_pbl_idx : 0));
2258         set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL));
2259
2260         header = cq->cq_uk.cq_id |
2261                  LS_64(ceq_id, I40IW_CQPSQ_CQ_CEQID) |
2262                  LS_64(I40IW_CQP_OP_MODIFY_CQ, I40IW_CQPSQ_OPCODE) |
2263                  LS_64(info->cq_resize, I40IW_CQPSQ_CQ_CQRESIZE) |
2264                  LS_64(pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
2265                  LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
2266                  LS_64(virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
2267                  LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
2268                  LS_64(ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
2269                  LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
2270                  LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
2271                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2272
2273         i40iw_insert_wqe_hdr(wqe, header);
2274
2275         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_MODIFY WQE",
2276                         wqe, I40IW_CQP_WQE_SIZE * 8);
2277
2278         if (post_sq)
2279                 i40iw_sc_cqp_post_sq(cqp);
2280         return 0;
2281 }
2282
2283 /**
2284  * i40iw_sc_qp_init - initialize qp
2285  * @qp: sc qp
2286  * @info: initialization qp info
2287  */
2288 static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
2289                                                struct i40iw_qp_init_info *info)
2290 {
2291         u32 __iomem *wqe_alloc_reg = NULL;
2292         enum i40iw_status_code ret_code;
2293         u32 pble_obj_cnt;
2294         u8 wqe_size;
2295         u32 offset;
2296
2297         qp->dev = info->pd->dev;
2298         qp->vsi = info->vsi;
2299         qp->sq_pa = info->sq_pa;
2300         qp->rq_pa = info->rq_pa;
2301         qp->hw_host_ctx_pa = info->host_ctx_pa;
2302         qp->q2_pa = info->q2_pa;
2303         qp->shadow_area_pa = info->shadow_area_pa;
2304
2305         qp->q2_buf = info->q2;
2306         qp->pd = info->pd;
2307         qp->hw_host_ctx = info->host_ctx;
2308         offset = (qp->pd->dev->is_pf) ? I40E_PFPE_WQEALLOC : I40E_VFPE_WQEALLOC1;
2309         if (i40iw_get_hw_addr(qp->pd->dev))
2310                 wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
2311                                               offset);
2312
2313         info->qp_uk_init_info.wqe_alloc_reg = wqe_alloc_reg;
2314         info->qp_uk_init_info.abi_ver = qp->pd->abi_ver;
2315         ret_code = i40iw_qp_uk_init(&qp->qp_uk, &info->qp_uk_init_info);
2316         if (ret_code)
2317                 return ret_code;
2318         qp->virtual_map = info->virtual_map;
2319
2320         pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
2321
2322         if ((info->virtual_map && (info->sq_pa >= pble_obj_cnt)) ||
2323             (info->virtual_map && (info->rq_pa >= pble_obj_cnt)))
2324                 return I40IW_ERR_INVALID_PBLE_INDEX;
2325
2326         qp->llp_stream_handle = (void *)(-1);
2327         qp->qp_type = (info->type) ? info->type : I40IW_QP_TYPE_IWARP;
2328
2329         qp->hw_sq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
2330                                                     false);
2331         i40iw_debug(qp->dev, I40IW_DEBUG_WQE, "%s: hw_sq_size[%04d] sq_ring.size[%04d]\n",
2332                     __func__, qp->hw_sq_size, qp->qp_uk.sq_ring.size);
2333
2334         switch (qp->pd->abi_ver) {
2335         case 4:
2336                 ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
2337                                                        &wqe_size);
2338                 if (ret_code)
2339                         return ret_code;
2340                 break;
2341         case 5: /* fallthrough until next ABI version */
2342         default:
2343                 if (qp->qp_uk.max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
2344                         return I40IW_ERR_INVALID_FRAG_COUNT;
2345                 wqe_size = I40IW_MAX_WQE_SIZE_RQ;
2346                 break;
2347         }
2348         qp->hw_rq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.rq_size *
2349                                 (wqe_size / I40IW_QP_WQE_MIN_SIZE), false);
2350         i40iw_debug(qp->dev, I40IW_DEBUG_WQE,
2351                     "%s: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
2352                     __func__, qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size);
2353         qp->sq_tph_val = info->sq_tph_val;
2354         qp->rq_tph_val = info->rq_tph_val;
2355         qp->sq_tph_en = info->sq_tph_en;
2356         qp->rq_tph_en = info->rq_tph_en;
2357         qp->rcv_tph_en = info->rcv_tph_en;
2358         qp->xmit_tph_en = info->xmit_tph_en;
2359         qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
2360         qp->exception_lan_queue = qp->pd->dev->exception_lan_queue;
2361
2362         return 0;
2363 }
2364
2365 /**
2366  * i40iw_sc_qp_create - create qp
2367  * @qp: sc qp
2368  * @info: qp create info
2369  * @scratch: u64 saved to be used during cqp completion
2370  * @post_sq: flag for cqp db to ring
2371  */
2372 static enum i40iw_status_code i40iw_sc_qp_create(
2373                                 struct i40iw_sc_qp *qp,
2374                                 struct i40iw_create_qp_info *info,
2375                                 u64 scratch,
2376                                 bool post_sq)
2377 {
2378         struct i40iw_sc_cqp *cqp;
2379         u64 *wqe;
2380         u64 header;
2381
2382         if ((qp->qp_uk.qp_id < I40IW_MIN_IW_QP_ID) ||
2383             (qp->qp_uk.qp_id > I40IW_MAX_IW_QP_ID))
2384                 return I40IW_ERR_INVALID_QP_ID;
2385
2386         cqp = qp->pd->dev->cqp;
2387         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2388         if (!wqe)
2389                 return I40IW_ERR_RING_FULL;
2390
2391         set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
2392
2393         set_64bit_val(wqe, 40, qp->shadow_area_pa);
2394
2395         header = qp->qp_uk.qp_id |
2396                  LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) |
2397                  LS_64((info->ord_valid ? 1 : 0), I40IW_CQPSQ_QP_ORDVALID) |
2398                  LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) |
2399                  LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
2400                  LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
2401                  LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
2402                  LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
2403                  LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
2404                  LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
2405                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2406
2407         i40iw_insert_wqe_hdr(wqe, header);
2408         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_CREATE WQE",
2409                         wqe, I40IW_CQP_WQE_SIZE * 8);
2410
2411         if (post_sq)
2412                 i40iw_sc_cqp_post_sq(cqp);
2413         return 0;
2414 }
2415
2416 /**
2417  * i40iw_sc_qp_modify - modify qp cqp wqe
2418  * @qp: sc qp
2419  * @info: modify qp info
2420  * @scratch: u64 saved to be used during cqp completion
2421  * @post_sq: flag for cqp db to ring
2422  */
2423 static enum i40iw_status_code i40iw_sc_qp_modify(
2424                                 struct i40iw_sc_qp *qp,
2425                                 struct i40iw_modify_qp_info *info,
2426                                 u64 scratch,
2427                                 bool post_sq)
2428 {
2429         u64 *wqe;
2430         struct i40iw_sc_cqp *cqp;
2431         u64 header;
2432         u8 term_actions = 0;
2433         u8 term_len = 0;
2434
2435         cqp = qp->pd->dev->cqp;
2436         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2437         if (!wqe)
2438                 return I40IW_ERR_RING_FULL;
2439         if (info->next_iwarp_state == I40IW_QP_STATE_TERMINATE) {
2440                 if (info->dont_send_fin)
2441                         term_actions += I40IWQP_TERM_SEND_TERM_ONLY;
2442                 if (info->dont_send_term)
2443                         term_actions += I40IWQP_TERM_SEND_FIN_ONLY;
2444                 if ((term_actions == I40IWQP_TERM_SEND_TERM_AND_FIN) ||
2445                     (term_actions == I40IWQP_TERM_SEND_TERM_ONLY))
2446                         term_len = info->termlen;
2447         }
2448
2449         set_64bit_val(wqe,
2450                       8,
2451                       LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN));
2452
2453         set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
2454         set_64bit_val(wqe, 40, qp->shadow_area_pa);
2455
2456         header = qp->qp_uk.qp_id |
2457                  LS_64(I40IW_CQP_OP_MODIFY_QP, I40IW_CQPSQ_OPCODE) |
2458                  LS_64(info->ord_valid, I40IW_CQPSQ_QP_ORDVALID) |
2459                  LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) |
2460                  LS_64(info->cached_var_valid, I40IW_CQPSQ_QP_CACHEDVARVALID) |
2461                  LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
2462                  LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
2463                  LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) |
2464                  LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
2465                  LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
2466                  LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
2467                  LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |
2468                  LS_64(info->reset_tcp_conn, I40IW_CQPSQ_QP_RESETCON) |
2469                  LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
2470                  LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
2471                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2472
2473         i40iw_insert_wqe_hdr(wqe, header);
2474
2475         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_MODIFY WQE",
2476                         wqe, I40IW_CQP_WQE_SIZE * 8);
2477
2478         if (post_sq)
2479                 i40iw_sc_cqp_post_sq(cqp);
2480         return 0;
2481 }
2482
2483 /**
2484  * i40iw_sc_qp_destroy - cqp destroy qp
2485  * @qp: sc qp
2486  * @scratch: u64 saved to be used during cqp completion
2487  * @remove_hash_idx: flag if to remove hash idx
2488  * @ignore_mw_bnd: memory window bind flag
2489  * @post_sq: flag for cqp db to ring
2490  */
2491 static enum i40iw_status_code i40iw_sc_qp_destroy(
2492                                         struct i40iw_sc_qp *qp,
2493                                         u64 scratch,
2494                                         bool remove_hash_idx,
2495                                         bool ignore_mw_bnd,
2496                                         bool post_sq)
2497 {
2498         u64 *wqe;
2499         struct i40iw_sc_cqp *cqp;
2500         u64 header;
2501
2502         i40iw_qp_rem_qos(qp);
2503         cqp = qp->pd->dev->cqp;
2504         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2505         if (!wqe)
2506                 return I40IW_ERR_RING_FULL;
2507         set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
2508         set_64bit_val(wqe, 40, qp->shadow_area_pa);
2509
2510         header = qp->qp_uk.qp_id |
2511                  LS_64(I40IW_CQP_OP_DESTROY_QP, I40IW_CQPSQ_OPCODE) |
2512                  LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
2513                  LS_64(ignore_mw_bnd, I40IW_CQPSQ_QP_IGNOREMWBOUND) |
2514                  LS_64(remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
2515                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2516
2517         i40iw_insert_wqe_hdr(wqe, header);
2518         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_DESTROY WQE",
2519                         wqe, I40IW_CQP_WQE_SIZE * 8);
2520
2521         if (post_sq)
2522                 i40iw_sc_cqp_post_sq(cqp);
2523         return 0;
2524 }
2525
2526 /**
2527  * i40iw_sc_qp_flush_wqes - flush qp's wqe
2528  * @qp: sc qp
2529  * @info: dlush information
2530  * @scratch: u64 saved to be used during cqp completion
2531  * @post_sq: flag for cqp db to ring
2532  */
2533 static enum i40iw_status_code i40iw_sc_qp_flush_wqes(
2534                                 struct i40iw_sc_qp *qp,
2535                                 struct i40iw_qp_flush_info *info,
2536                                 u64 scratch,
2537                                 bool post_sq)
2538 {
2539         u64 temp = 0;
2540         u64 *wqe;
2541         struct i40iw_sc_cqp *cqp;
2542         u64 header;
2543         bool flush_sq = false, flush_rq = false;
2544
2545         if (info->rq && !qp->flush_rq)
2546                 flush_rq = true;
2547
2548         if (info->sq && !qp->flush_sq)
2549                 flush_sq = true;
2550
2551         qp->flush_sq |= flush_sq;
2552         qp->flush_rq |= flush_rq;
2553         if (!flush_sq && !flush_rq) {
2554                 if (info->ae_code != I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR)
2555                         return 0;
2556         }
2557
2558         cqp = qp->pd->dev->cqp;
2559         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2560         if (!wqe)
2561                 return I40IW_ERR_RING_FULL;
2562         if (info->userflushcode) {
2563                 if (flush_rq) {
2564                         temp |= LS_64(info->rq_minor_code, I40IW_CQPSQ_FWQE_RQMNERR) |
2565                                 LS_64(info->rq_major_code, I40IW_CQPSQ_FWQE_RQMJERR);
2566                 }
2567                 if (flush_sq) {
2568                         temp |= LS_64(info->sq_minor_code, I40IW_CQPSQ_FWQE_SQMNERR) |
2569                                 LS_64(info->sq_major_code, I40IW_CQPSQ_FWQE_SQMJERR);
2570                 }
2571         }
2572         set_64bit_val(wqe, 16, temp);
2573
2574         temp = (info->generate_ae) ?
2575                 info->ae_code | LS_64(info->ae_source, I40IW_CQPSQ_FWQE_AESOURCE) : 0;
2576
2577         set_64bit_val(wqe, 8, temp);
2578
2579         header = qp->qp_uk.qp_id |
2580                  LS_64(I40IW_CQP_OP_FLUSH_WQES, I40IW_CQPSQ_OPCODE) |
2581                  LS_64(info->generate_ae, I40IW_CQPSQ_FWQE_GENERATE_AE) |
2582                  LS_64(info->userflushcode, I40IW_CQPSQ_FWQE_USERFLCODE) |
2583                  LS_64(flush_sq, I40IW_CQPSQ_FWQE_FLUSHSQ) |
2584                  LS_64(flush_rq, I40IW_CQPSQ_FWQE_FLUSHRQ) |
2585                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2586
2587         i40iw_insert_wqe_hdr(wqe, header);
2588
2589         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_FLUSH WQE",
2590                         wqe, I40IW_CQP_WQE_SIZE * 8);
2591
2592         if (post_sq)
2593                 i40iw_sc_cqp_post_sq(cqp);
2594         return 0;
2595 }
2596
2597 /**
2598  * i40iw_sc_qp_upload_context - upload qp's context
2599  * @dev: sc device struct
2600  * @info: upload context info ptr for return
2601  * @scratch: u64 saved to be used during cqp completion
2602  * @post_sq: flag for cqp db to ring
2603  */
2604 static enum i40iw_status_code i40iw_sc_qp_upload_context(
2605                                         struct i40iw_sc_dev *dev,
2606                                         struct i40iw_upload_context_info *info,
2607                                         u64 scratch,
2608                                         bool post_sq)
2609 {
2610         u64 *wqe;
2611         struct i40iw_sc_cqp *cqp;
2612         u64 header;
2613
2614         cqp = dev->cqp;
2615         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2616         if (!wqe)
2617                 return I40IW_ERR_RING_FULL;
2618         set_64bit_val(wqe, 16, info->buf_pa);
2619
2620         header = LS_64(info->qp_id, I40IW_CQPSQ_UCTX_QPID) |
2621                  LS_64(I40IW_CQP_OP_UPLOAD_CONTEXT, I40IW_CQPSQ_OPCODE) |
2622                  LS_64(info->qp_type, I40IW_CQPSQ_UCTX_QPTYPE) |
2623                  LS_64(info->raw_format, I40IW_CQPSQ_UCTX_RAWFORMAT) |
2624                  LS_64(info->freeze_qp, I40IW_CQPSQ_UCTX_FREEZEQP) |
2625                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2626
2627         i40iw_insert_wqe_hdr(wqe, header);
2628
2629         i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QP_UPLOAD_CTX WQE",
2630                         wqe, I40IW_CQP_WQE_SIZE * 8);
2631
2632         if (post_sq)
2633                 i40iw_sc_cqp_post_sq(cqp);
2634         return 0;
2635 }
2636
2637 /**
2638  * i40iw_sc_qp_setctx - set qp's context
2639  * @qp: sc qp
2640  * @qp_ctx: context ptr
2641  * @info: ctx info
2642  */
2643 static enum i40iw_status_code i40iw_sc_qp_setctx(
2644                                 struct i40iw_sc_qp *qp,
2645                                 u64 *qp_ctx,
2646                                 struct i40iw_qp_host_ctx_info *info)
2647 {
2648         struct i40iwarp_offload_info *iw;
2649         struct i40iw_tcp_offload_info *tcp;
2650         struct i40iw_sc_vsi *vsi;
2651         struct i40iw_sc_dev *dev;
2652         u64 qw0, qw3, qw7 = 0;
2653
2654         iw = info->iwarp_info;
2655         tcp = info->tcp_info;
2656         vsi = qp->vsi;
2657         dev = qp->dev;
2658         if (info->add_to_qoslist) {
2659                 qp->user_pri = info->user_pri;
2660                 i40iw_qp_add_qos(qp);
2661                 i40iw_debug(qp->dev, I40IW_DEBUG_DCB, "%s qp[%d] UP[%d] qset[%d]\n",
2662                             __func__, qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle);
2663         }
2664         qw0 = LS_64(qp->qp_uk.rq_wqe_size, I40IWQPC_RQWQESIZE) |
2665               LS_64(info->err_rq_idx_valid, I40IWQPC_ERR_RQ_IDX_VALID) |
2666               LS_64(qp->rcv_tph_en, I40IWQPC_RCVTPHEN) |
2667               LS_64(qp->xmit_tph_en, I40IWQPC_XMITTPHEN) |
2668               LS_64(qp->rq_tph_en, I40IWQPC_RQTPHEN) |
2669               LS_64(qp->sq_tph_en, I40IWQPC_SQTPHEN) |
2670               LS_64(info->push_idx, I40IWQPC_PPIDX) |
2671               LS_64(info->push_mode_en, I40IWQPC_PMENA);
2672
2673         set_64bit_val(qp_ctx, 8, qp->sq_pa);
2674         set_64bit_val(qp_ctx, 16, qp->rq_pa);
2675
2676         qw3 = LS_64(qp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) |
2677               LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |
2678               LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE);
2679
2680         set_64bit_val(qp_ctx,
2681                       128,
2682                       LS_64(info->err_rq_idx, I40IWQPC_ERR_RQ_IDX));
2683
2684         set_64bit_val(qp_ctx,
2685                       136,
2686                       LS_64(info->send_cq_num, I40IWQPC_TXCQNUM) |
2687                       LS_64(info->rcv_cq_num, I40IWQPC_RXCQNUM));
2688
2689         set_64bit_val(qp_ctx,
2690                       168,
2691                       LS_64(info->qp_compl_ctx, I40IWQPC_QPCOMPCTX));
2692         set_64bit_val(qp_ctx,
2693                       176,
2694                       LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |
2695                       LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |
2696                       LS_64(qp->qs_handle, I40IWQPC_QSHANDLE) |
2697                       LS_64(qp->exception_lan_queue, I40IWQPC_EXCEPTION_LAN_QUEUE));
2698
2699         if (info->iwarp_info_valid) {
2700                 qw0 |= LS_64(iw->ddp_ver, I40IWQPC_DDP_VER) |
2701                        LS_64(iw->rdmap_ver, I40IWQPC_RDMAP_VER);
2702
2703                 qw7 |= LS_64(iw->pd_id, I40IWQPC_PDIDX);
2704                 set_64bit_val(qp_ctx,
2705                               144,
2706                               LS_64(qp->q2_pa, I40IWQPC_Q2ADDR) |
2707                               LS_64(vsi->fcn_id, I40IWQPC_STAT_INDEX));
2708                 set_64bit_val(qp_ctx,
2709                               152,
2710                               LS_64(iw->last_byte_sent, I40IWQPC_LASTBYTESENT));
2711
2712                 set_64bit_val(qp_ctx,
2713                               160,
2714                               LS_64(iw->ord_size, I40IWQPC_ORDSIZE) |
2715                               LS_64(iw->ird_size, I40IWQPC_IRDSIZE) |
2716                               LS_64(iw->wr_rdresp_en, I40IWQPC_WRRDRSPOK) |
2717                               LS_64(iw->rd_enable, I40IWQPC_RDOK) |
2718                               LS_64(iw->snd_mark_en, I40IWQPC_SNDMARKERS) |
2719                               LS_64(iw->bind_en, I40IWQPC_BINDEN) |
2720                               LS_64(iw->fast_reg_en, I40IWQPC_FASTREGEN) |
2721                               LS_64(iw->priv_mode_en, I40IWQPC_PRIVEN) |
2722                               LS_64((((vsi->stats_fcn_id_alloc) &&
2723                                       (dev->is_pf) && (vsi->fcn_id >= I40IW_FIRST_NON_PF_STAT)) ? 1 : 0),
2724                                     I40IWQPC_USESTATSINSTANCE) |
2725                               LS_64(1, I40IWQPC_IWARPMODE) |
2726                               LS_64(iw->rcv_mark_en, I40IWQPC_RCVMARKERS) |
2727                               LS_64(iw->align_hdrs, I40IWQPC_ALIGNHDRS) |
2728                               LS_64(iw->rcv_no_mpa_crc, I40IWQPC_RCVNOMPACRC) |
2729                               LS_64(iw->rcv_mark_offset, I40IWQPC_RCVMARKOFFSET) |
2730                               LS_64(iw->snd_mark_offset, I40IWQPC_SNDMARKOFFSET));
2731         }
2732         if (info->tcp_info_valid) {
2733                 qw0 |= LS_64(tcp->ipv4, I40IWQPC_IPV4) |
2734                        LS_64(tcp->no_nagle, I40IWQPC_NONAGLE) |
2735                        LS_64(tcp->insert_vlan_tag, I40IWQPC_INSERTVLANTAG) |
2736                        LS_64(tcp->time_stamp, I40IWQPC_TIMESTAMP) |
2737                        LS_64(tcp->cwnd_inc_limit, I40IWQPC_LIMIT) |
2738                        LS_64(tcp->drop_ooo_seg, I40IWQPC_DROPOOOSEG) |
2739                        LS_64(tcp->dup_ack_thresh, I40IWQPC_DUPACK_THRESH);
2740
2741                 qw3 |= LS_64(tcp->ttl, I40IWQPC_TTL) |
2742                        LS_64(tcp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) |
2743                        LS_64(tcp->avoid_stretch_ack, I40IWQPC_AVOIDSTRETCHACK) |
2744                        LS_64(tcp->tos, I40IWQPC_TOS) |
2745                        LS_64(tcp->src_port, I40IWQPC_SRCPORTNUM) |
2746                        LS_64(tcp->dst_port, I40IWQPC_DESTPORTNUM);
2747
2748                 qp->src_mac_addr_idx = tcp->src_mac_addr_idx;
2749                 set_64bit_val(qp_ctx,
2750                               32,
2751                               LS_64(tcp->dest_ip_addr2, I40IWQPC_DESTIPADDR2) |
2752                               LS_64(tcp->dest_ip_addr3, I40IWQPC_DESTIPADDR3));
2753
2754                 set_64bit_val(qp_ctx,
2755                               40,
2756                               LS_64(tcp->dest_ip_addr0, I40IWQPC_DESTIPADDR0) |
2757                               LS_64(tcp->dest_ip_addr1, I40IWQPC_DESTIPADDR1));
2758
2759                 set_64bit_val(qp_ctx,
2760                               48,
2761                               LS_64(tcp->snd_mss, I40IWQPC_SNDMSS) |
2762                                 LS_64(tcp->vlan_tag, I40IWQPC_VLANTAG) |
2763                                 LS_64(tcp->arp_idx, I40IWQPC_ARPIDX));
2764
2765                 qw7 |= LS_64(tcp->flow_label, I40IWQPC_FLOWLABEL) |
2766                        LS_64(tcp->wscale, I40IWQPC_WSCALE) |
2767                        LS_64(tcp->ignore_tcp_opt, I40IWQPC_IGNORE_TCP_OPT) |
2768                        LS_64(tcp->ignore_tcp_uns_opt, I40IWQPC_IGNORE_TCP_UNS_OPT) |
2769                        LS_64(tcp->tcp_state, I40IWQPC_TCPSTATE) |
2770                        LS_64(tcp->rcv_wscale, I40IWQPC_RCVSCALE) |
2771                        LS_64(tcp->snd_wscale, I40IWQPC_SNDSCALE);
2772
2773                 set_64bit_val(qp_ctx,
2774                               72,
2775                               LS_64(tcp->time_stamp_recent, I40IWQPC_TIMESTAMP_RECENT) |
2776                               LS_64(tcp->time_stamp_age, I40IWQPC_TIMESTAMP_AGE));
2777                 set_64bit_val(qp_ctx,
2778                               80,
2779                               LS_64(tcp->snd_nxt, I40IWQPC_SNDNXT) |
2780                               LS_64(tcp->snd_wnd, I40IWQPC_SNDWND));
2781
2782                 set_64bit_val(qp_ctx,
2783                               88,
2784                               LS_64(tcp->rcv_nxt, I40IWQPC_RCVNXT) |
2785                               LS_64(tcp->rcv_wnd, I40IWQPC_RCVWND));
2786                 set_64bit_val(qp_ctx,
2787                               96,
2788                               LS_64(tcp->snd_max, I40IWQPC_SNDMAX) |
2789                               LS_64(tcp->snd_una, I40IWQPC_SNDUNA));
2790                 set_64bit_val(qp_ctx,
2791                               104,
2792                               LS_64(tcp->srtt, I40IWQPC_SRTT) |
2793                               LS_64(tcp->rtt_var, I40IWQPC_RTTVAR));
2794                 set_64bit_val(qp_ctx,
2795                               112,
2796                               LS_64(tcp->ss_thresh, I40IWQPC_SSTHRESH) |
2797                               LS_64(tcp->cwnd, I40IWQPC_CWND));
2798                 set_64bit_val(qp_ctx,
2799                               120,
2800                               LS_64(tcp->snd_wl1, I40IWQPC_SNDWL1) |
2801                               LS_64(tcp->snd_wl2, I40IWQPC_SNDWL2));
2802                 set_64bit_val(qp_ctx,
2803                               128,
2804                               LS_64(tcp->max_snd_window, I40IWQPC_MAXSNDWND) |
2805                               LS_64(tcp->rexmit_thresh, I40IWQPC_REXMIT_THRESH));
2806                 set_64bit_val(qp_ctx,
2807                               184,
2808                               LS_64(tcp->local_ipaddr3, I40IWQPC_LOCAL_IPADDR3) |
2809                               LS_64(tcp->local_ipaddr2, I40IWQPC_LOCAL_IPADDR2));
2810                 set_64bit_val(qp_ctx,
2811                               192,
2812                               LS_64(tcp->local_ipaddr1, I40IWQPC_LOCAL_IPADDR1) |
2813                               LS_64(tcp->local_ipaddr0, I40IWQPC_LOCAL_IPADDR0));
2814         }
2815
2816         set_64bit_val(qp_ctx, 0, qw0);
2817         set_64bit_val(qp_ctx, 24, qw3);
2818         set_64bit_val(qp_ctx, 56, qw7);
2819
2820         i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "QP_HOST)CTX WQE",
2821                         qp_ctx, I40IW_QP_CTX_SIZE);
2822         return 0;
2823 }
2824
2825 /**
2826  * i40iw_sc_alloc_stag - mr stag alloc
2827  * @dev: sc device struct
2828  * @info: stag info
2829  * @scratch: u64 saved to be used during cqp completion
2830  * @post_sq: flag for cqp db to ring
2831  */
2832 static enum i40iw_status_code i40iw_sc_alloc_stag(
2833                                 struct i40iw_sc_dev *dev,
2834                                 struct i40iw_allocate_stag_info *info,
2835                                 u64 scratch,
2836                                 bool post_sq)
2837 {
2838         u64 *wqe;
2839         struct i40iw_sc_cqp *cqp;
2840         u64 header;
2841         enum i40iw_page_size page_size;
2842
2843         page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
2844         cqp = dev->cqp;
2845         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2846         if (!wqe)
2847                 return I40IW_ERR_RING_FULL;
2848         set_64bit_val(wqe,
2849                       8,
2850                       LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID) |
2851                       LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN));
2852         set_64bit_val(wqe,
2853                       16,
2854                       LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
2855         set_64bit_val(wqe,
2856                       40,
2857                       LS_64(info->hmc_fcn_index, I40IW_CQPSQ_STAG_HMCFNIDX));
2858
2859         header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) |
2860                  LS_64(1, I40IW_CQPSQ_STAG_MR) |
2861                  LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
2862                  LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
2863                  LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
2864                  LS_64(info->remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
2865                  LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
2866                  LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
2867                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2868
2869         i40iw_insert_wqe_hdr(wqe, header);
2870
2871         i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "ALLOC_STAG WQE",
2872                         wqe, I40IW_CQP_WQE_SIZE * 8);
2873
2874         if (post_sq)
2875                 i40iw_sc_cqp_post_sq(cqp);
2876         return 0;
2877 }
2878
2879 /**
2880  * i40iw_sc_mr_reg_non_shared - non-shared mr registration
2881  * @dev: sc device struct
2882  * @info: mr info
2883  * @scratch: u64 saved to be used during cqp completion
2884  * @post_sq: flag for cqp db to ring
2885  */
2886 static enum i40iw_status_code i40iw_sc_mr_reg_non_shared(
2887                                 struct i40iw_sc_dev *dev,
2888                                 struct i40iw_reg_ns_stag_info *info,
2889                                 u64 scratch,
2890                                 bool post_sq)
2891 {
2892         u64 *wqe;
2893         u64 temp;
2894         struct i40iw_sc_cqp *cqp;
2895         u64 header;
2896         u32 pble_obj_cnt;
2897         bool remote_access;
2898         u8 addr_type;
2899         enum i40iw_page_size page_size;
2900
2901         page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
2902         if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
2903                                    I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
2904                 remote_access = true;
2905         else
2906                 remote_access = false;
2907
2908         pble_obj_cnt = dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
2909
2910         if (info->chunk_size && (info->first_pm_pbl_index >= pble_obj_cnt))
2911                 return I40IW_ERR_INVALID_PBLE_INDEX;
2912
2913         cqp = dev->cqp;
2914         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2915         if (!wqe)
2916                 return I40IW_ERR_RING_FULL;
2917
2918         temp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo;
2919         set_64bit_val(wqe, 0, temp);
2920
2921         set_64bit_val(wqe,
2922                       8,
2923                       LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN) |
2924                       LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
2925
2926         set_64bit_val(wqe,
2927                       16,
2928                       LS_64(info->stag_key, I40IW_CQPSQ_STAG_KEY) |
2929                       LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
2930         if (!info->chunk_size) {
2931                 set_64bit_val(wqe, 32, info->reg_addr_pa);
2932                 set_64bit_val(wqe, 48, 0);
2933         } else {
2934                 set_64bit_val(wqe, 32, 0);
2935                 set_64bit_val(wqe, 48, info->first_pm_pbl_index);
2936         }
2937         set_64bit_val(wqe, 40, info->hmc_fcn_index);
2938         set_64bit_val(wqe, 56, 0);
2939
2940         addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0;
2941         header = LS_64(I40IW_CQP_OP_REG_MR, I40IW_CQPSQ_OPCODE) |
2942                  LS_64(1, I40IW_CQPSQ_STAG_MR) |
2943                  LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
2944                  LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
2945                  LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
2946                  LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
2947                  LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
2948                  LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
2949                  LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
2950                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2951
2952         i40iw_insert_wqe_hdr(wqe, header);
2953
2954         i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_NS WQE",
2955                         wqe, I40IW_CQP_WQE_SIZE * 8);
2956
2957         if (post_sq)
2958                 i40iw_sc_cqp_post_sq(cqp);
2959         return 0;
2960 }
2961
2962 /**
2963  * i40iw_sc_mr_reg_shared - registered shared memory region
2964  * @dev: sc device struct
2965  * @info: info for shared memory registeration
2966  * @scratch: u64 saved to be used during cqp completion
2967  * @post_sq: flag for cqp db to ring
2968  */
2969 static enum i40iw_status_code i40iw_sc_mr_reg_shared(
2970                                         struct i40iw_sc_dev *dev,
2971                                         struct i40iw_register_shared_stag *info,
2972                                         u64 scratch,
2973                                         bool post_sq)
2974 {
2975         u64 *wqe;
2976         struct i40iw_sc_cqp *cqp;
2977         u64 temp, va64, fbo, header;
2978         u32 va32;
2979         bool remote_access;
2980         u8 addr_type;
2981
2982         if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
2983                                    I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
2984                 remote_access = true;
2985         else
2986                 remote_access = false;
2987         cqp = dev->cqp;
2988         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2989         if (!wqe)
2990                 return I40IW_ERR_RING_FULL;
2991         va64 = (uintptr_t)(info->va);
2992         va32 = (u32)(va64 & 0x00000000FFFFFFFF);
2993         fbo = (u64)(va32 & (4096 - 1));
2994
2995         set_64bit_val(wqe,
2996                       0,
2997                       (info->addr_type == I40IW_ADDR_TYPE_VA_BASED ? (uintptr_t)info->va : fbo));
2998
2999         set_64bit_val(wqe,
3000                       8,
3001                       LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
3002         temp = LS_64(info->new_stag_key, I40IW_CQPSQ_STAG_KEY) |
3003                LS_64(info->new_stag_idx, I40IW_CQPSQ_STAG_IDX) |
3004                LS_64(info->parent_stag_idx, I40IW_CQPSQ_STAG_PARENTSTAGIDX);
3005         set_64bit_val(wqe, 16, temp);
3006
3007         addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0;
3008         header = LS_64(I40IW_CQP_OP_REG_SMR, I40IW_CQPSQ_OPCODE) |
3009                  LS_64(1, I40IW_CQPSQ_STAG_MR) |
3010                  LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
3011                  LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
3012                  LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
3013                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3014
3015         i40iw_insert_wqe_hdr(wqe, header);
3016
3017         i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_SHARED WQE",
3018                         wqe, I40IW_CQP_WQE_SIZE * 8);
3019
3020         if (post_sq)
3021                 i40iw_sc_cqp_post_sq(cqp);
3022         return 0;
3023 }
3024
3025 /**
3026  * i40iw_sc_dealloc_stag - deallocate stag
3027  * @dev: sc device struct
3028  * @info: dealloc stag info
3029  * @scratch: u64 saved to be used during cqp completion
3030  * @post_sq: flag for cqp db to ring
3031  */
3032 static enum i40iw_status_code i40iw_sc_dealloc_stag(
3033                                         struct i40iw_sc_dev *dev,
3034                                         struct i40iw_dealloc_stag_info *info,
3035                                         u64 scratch,
3036                                         bool post_sq)
3037 {
3038         u64 header;
3039         u64 *wqe;
3040         struct i40iw_sc_cqp *cqp;
3041
3042         cqp = dev->cqp;
3043         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3044         if (!wqe)
3045                 return I40IW_ERR_RING_FULL;
3046         set_64bit_val(wqe,
3047                       8,
3048                       LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
3049         set_64bit_val(wqe,
3050                       16,
3051                       LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
3052
3053         header = LS_64(I40IW_CQP_OP_DEALLOC_STAG, I40IW_CQPSQ_OPCODE) |
3054                  LS_64(info->mr, I40IW_CQPSQ_STAG_MR) |
3055                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3056
3057         i40iw_insert_wqe_hdr(wqe, header);
3058
3059         i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "DEALLOC_STAG WQE",
3060                         wqe, I40IW_CQP_WQE_SIZE * 8);
3061
3062         if (post_sq)
3063                 i40iw_sc_cqp_post_sq(cqp);
3064         return 0;
3065 }
3066
3067 /**
3068  * i40iw_sc_query_stag - query hardware for stag
3069  * @dev: sc device struct
3070  * @scratch: u64 saved to be used during cqp completion
3071  * @stag_index: stag index for query
3072  * @post_sq: flag for cqp db to ring
3073  */
3074 static enum i40iw_status_code i40iw_sc_query_stag(struct i40iw_sc_dev *dev,
3075                                                   u64 scratch,
3076                                                   u32 stag_index,
3077                                                   bool post_sq)
3078 {
3079         u64 header;
3080         u64 *wqe;
3081         struct i40iw_sc_cqp *cqp;
3082
3083         cqp = dev->cqp;
3084         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3085         if (!wqe)
3086                 return I40IW_ERR_RING_FULL;
3087         set_64bit_val(wqe,
3088                       16,
3089                       LS_64(stag_index, I40IW_CQPSQ_QUERYSTAG_IDX));
3090
3091         header = LS_64(I40IW_CQP_OP_QUERY_STAG, I40IW_CQPSQ_OPCODE) |
3092                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3093
3094         i40iw_insert_wqe_hdr(wqe, header);
3095
3096         i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QUERY_STAG WQE",
3097                         wqe, I40IW_CQP_WQE_SIZE * 8);
3098
3099         if (post_sq)
3100                 i40iw_sc_cqp_post_sq(cqp);
3101         return 0;
3102 }
3103
3104 /**
3105  * i40iw_sc_mw_alloc - mw allocate
3106  * @dev: sc device struct
3107  * @scratch: u64 saved to be used during cqp completion
3108  * @mw_stag_index:stag index
3109  * @pd_id: pd is for this mw
3110  * @post_sq: flag for cqp db to ring
3111  */
3112 static enum i40iw_status_code i40iw_sc_mw_alloc(
3113                                         struct i40iw_sc_dev *dev,
3114                                         u64 scratch,
3115                                         u32 mw_stag_index,
3116                                         u16 pd_id,
3117                                         bool post_sq)
3118 {
3119         u64 header;
3120         struct i40iw_sc_cqp *cqp;
3121         u64 *wqe;
3122
3123         cqp = dev->cqp;
3124         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3125         if (!wqe)
3126                 return I40IW_ERR_RING_FULL;
3127         set_64bit_val(wqe, 8, LS_64(pd_id, I40IW_CQPSQ_STAG_PDID));
3128         set_64bit_val(wqe,
3129                       16,
3130                       LS_64(mw_stag_index, I40IW_CQPSQ_STAG_IDX));
3131
3132         header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) |
3133                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3134
3135         i40iw_insert_wqe_hdr(wqe, header);
3136
3137         i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MW_ALLOC WQE",
3138                         wqe, I40IW_CQP_WQE_SIZE * 8);
3139
3140         if (post_sq)
3141                 i40iw_sc_cqp_post_sq(cqp);
3142         return 0;
3143 }
3144
3145 /**
3146  * i40iw_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp
3147  * @qp: sc qp struct
3148  * @info: fast mr info
3149  * @post_sq: flag for cqp db to ring
3150  */
3151 enum i40iw_status_code i40iw_sc_mr_fast_register(
3152                                 struct i40iw_sc_qp *qp,
3153                                 struct i40iw_fast_reg_stag_info *info,
3154                                 bool post_sq)
3155 {
3156         u64 temp, header;
3157         u64 *wqe;
3158         u32 wqe_idx;
3159         enum i40iw_page_size page_size;
3160
3161         page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
3162         wqe = i40iw_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, I40IW_QP_WQE_MIN_SIZE,
3163                                          0, info->wr_id);
3164         if (!wqe)
3165                 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
3166
3167         i40iw_debug(qp->dev, I40IW_DEBUG_MR, "%s: wr_id[%llxh] wqe_idx[%04d] location[%p]\n",
3168                     __func__, info->wr_id, wqe_idx,
3169                     &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid);
3170         temp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo;
3171         set_64bit_val(wqe, 0, temp);
3172
3173         temp = RS_64(info->first_pm_pbl_index >> 16, I40IWQPSQ_FIRSTPMPBLIDXHI);
3174         set_64bit_val(wqe,
3175                       8,
3176                       LS_64(temp, I40IWQPSQ_FIRSTPMPBLIDXHI) |
3177                       LS_64(info->reg_addr_pa >> I40IWQPSQ_PBLADDR_SHIFT, I40IWQPSQ_PBLADDR));
3178
3179         set_64bit_val(wqe,
3180                       16,
3181                       info->total_len |
3182                       LS_64(info->first_pm_pbl_index, I40IWQPSQ_FIRSTPMPBLIDXLO));
3183
3184         header = LS_64(info->stag_key, I40IWQPSQ_STAGKEY) |
3185                  LS_64(info->stag_idx, I40IWQPSQ_STAGINDEX) |
3186                  LS_64(I40IWQP_OP_FAST_REGISTER, I40IWQPSQ_OPCODE) |
3187                  LS_64(info->chunk_size, I40IWQPSQ_LPBLSIZE) |
3188                  LS_64(page_size, I40IWQPSQ_HPAGESIZE) |
3189                  LS_64(info->access_rights, I40IWQPSQ_STAGRIGHTS) |
3190                  LS_64(info->addr_type, I40IWQPSQ_VABASEDTO) |
3191                  LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
3192                  LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
3193                  LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
3194                  LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3195
3196         i40iw_insert_wqe_hdr(wqe, header);
3197
3198         i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "FAST_REG WQE",
3199                         wqe, I40IW_QP_WQE_MIN_SIZE);
3200
3201         if (post_sq)
3202                 i40iw_qp_post_wr(&qp->qp_uk);
3203         return 0;
3204 }
3205
3206 /**
3207  * i40iw_sc_send_lsmm - send last streaming mode message
3208  * @qp: sc qp struct
3209  * @lsmm_buf: buffer with lsmm message
3210  * @size: size of lsmm buffer
3211  * @stag: stag of lsmm buffer
3212  */
3213 static void i40iw_sc_send_lsmm(struct i40iw_sc_qp *qp,
3214                                void *lsmm_buf,
3215                                u32 size,
3216                                i40iw_stag stag)
3217 {
3218         u64 *wqe;
3219         u64 header;
3220         struct i40iw_qp_uk *qp_uk;
3221
3222         qp_uk = &qp->qp_uk;
3223         wqe = qp_uk->sq_base->elem;
3224
3225         set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
3226
3227         set_64bit_val(wqe, 8, (size | LS_64(stag, I40IWQPSQ_FRAG_STAG)));
3228
3229         set_64bit_val(wqe, 16, 0);
3230
3231         header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
3232                  LS_64(1, I40IWQPSQ_STREAMMODE) |
3233                  LS_64(1, I40IWQPSQ_WAITFORRCVPDU) |
3234                  LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3235
3236         i40iw_insert_wqe_hdr(wqe, header);
3237
3238         i40iw_debug_buf(qp->dev, I40IW_DEBUG_QP, "SEND_LSMM WQE",
3239                         wqe, I40IW_QP_WQE_MIN_SIZE);
3240 }
3241
3242 /**
3243  * i40iw_sc_send_lsmm_nostag - for privilege qp
3244  * @qp: sc qp struct
3245  * @lsmm_buf: buffer with lsmm message
3246  * @size: size of lsmm buffer
3247  */
3248 static void i40iw_sc_send_lsmm_nostag(struct i40iw_sc_qp *qp,
3249                                       void *lsmm_buf,
3250                                       u32 size)
3251 {
3252         u64 *wqe;
3253         u64 header;
3254         struct i40iw_qp_uk *qp_uk;
3255
3256         qp_uk = &qp->qp_uk;
3257         wqe = qp_uk->sq_base->elem;
3258
3259         set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
3260
3261         set_64bit_val(wqe, 8, size);
3262
3263         set_64bit_val(wqe, 16, 0);
3264
3265         header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
3266                  LS_64(1, I40IWQPSQ_STREAMMODE) |
3267                  LS_64(1, I40IWQPSQ_WAITFORRCVPDU) |
3268                  LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3269
3270         i40iw_insert_wqe_hdr(wqe, header);
3271
3272         i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "SEND_LSMM_NOSTAG WQE",
3273                         wqe, I40IW_QP_WQE_MIN_SIZE);
3274 }
3275
3276 /**
3277  * i40iw_sc_send_rtt - send last read0 or write0
3278  * @qp: sc qp struct
3279  * @read: Do read0 or write0
3280  */
3281 static void i40iw_sc_send_rtt(struct i40iw_sc_qp *qp, bool read)
3282 {
3283         u64 *wqe;
3284         u64 header;
3285         struct i40iw_qp_uk *qp_uk;
3286
3287         qp_uk = &qp->qp_uk;
3288         wqe = qp_uk->sq_base->elem;
3289
3290         set_64bit_val(wqe, 0, 0);
3291         set_64bit_val(wqe, 8, 0);
3292         set_64bit_val(wqe, 16, 0);
3293         if (read) {
3294                 header = LS_64(0x1234, I40IWQPSQ_REMSTAG) |
3295                          LS_64(I40IWQP_OP_RDMA_READ, I40IWQPSQ_OPCODE) |
3296                          LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3297                 set_64bit_val(wqe, 8, ((u64)0xabcd << 32));
3298         } else {
3299                 header = LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
3300                          LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3301         }
3302
3303         i40iw_insert_wqe_hdr(wqe, header);
3304
3305         i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "RTR WQE",
3306                         wqe, I40IW_QP_WQE_MIN_SIZE);
3307 }
3308
3309 /**
3310  * i40iw_sc_post_wqe0 - send wqe with opcode
3311  * @qp: sc qp struct
3312  * @opcode: opcode to use for wqe0
3313  */
3314 static enum i40iw_status_code i40iw_sc_post_wqe0(struct i40iw_sc_qp *qp, u8 opcode)
3315 {
3316         u64 *wqe;
3317         u64 header;
3318         struct i40iw_qp_uk *qp_uk;
3319
3320         qp_uk = &qp->qp_uk;
3321         wqe = qp_uk->sq_base->elem;
3322
3323         if (!wqe)
3324                 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
3325         switch (opcode) {
3326         case I40IWQP_OP_NOP:
3327                 set_64bit_val(wqe, 0, 0);
3328                 set_64bit_val(wqe, 8, 0);
3329                 set_64bit_val(wqe, 16, 0);
3330                 header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
3331                          LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3332
3333                 i40iw_insert_wqe_hdr(wqe, header);
3334                 break;
3335         case I40IWQP_OP_RDMA_SEND:
3336                 set_64bit_val(wqe, 0, 0);
3337                 set_64bit_val(wqe, 8, 0);
3338                 set_64bit_val(wqe, 16, 0);
3339                 header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
3340                          LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID) |
3341                          LS_64(1, I40IWQPSQ_STREAMMODE) |
3342                          LS_64(1, I40IWQPSQ_WAITFORRCVPDU);
3343
3344                 i40iw_insert_wqe_hdr(wqe, header);
3345                 break;
3346         default:
3347                 i40iw_debug(qp->dev, I40IW_DEBUG_QP, "%s: Invalid WQE zero opcode\n",
3348                             __func__);
3349                 break;
3350         }
3351         return 0;
3352 }
3353
3354 /**
3355  * i40iw_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
3356  * @dev : ptr to i40iw_dev struct
3357  * @hmc_fn_id: hmc function id
3358  */
3359 enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_id)
3360 {
3361         struct i40iw_hmc_info *hmc_info;
3362         struct i40iw_dma_mem query_fpm_mem;
3363         struct i40iw_virt_mem virt_mem;
3364         struct i40iw_vfdev *vf_dev = NULL;
3365         u32 mem_size;
3366         enum i40iw_status_code ret_code = 0;
3367         bool poll_registers = true;
3368         u16 iw_vf_idx;
3369         u8 wait_type;
3370
3371         if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID ||
3372             (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID))
3373                 return I40IW_ERR_INVALID_HMCFN_ID;
3374
3375         i40iw_debug(dev, I40IW_DEBUG_HMC, "hmc_fn_id %u, dev->hmc_fn_id %u\n", hmc_fn_id,
3376                     dev->hmc_fn_id);
3377         if (hmc_fn_id == dev->hmc_fn_id) {
3378                 hmc_info = dev->hmc_info;
3379                 query_fpm_mem.pa = dev->fpm_query_buf_pa;
3380                 query_fpm_mem.va = dev->fpm_query_buf;
3381         } else {
3382                 vf_dev = i40iw_vfdev_from_fpm(dev, hmc_fn_id);
3383                 if (!vf_dev)
3384                         return I40IW_ERR_INVALID_VF_ID;
3385
3386                 hmc_info = &vf_dev->hmc_info;
3387                 iw_vf_idx = vf_dev->iw_vf_idx;
3388                 i40iw_debug(dev, I40IW_DEBUG_HMC, "vf_dev %p, hmc_info %p, hmc_obj %p\n", vf_dev,
3389                             hmc_info, hmc_info->hmc_obj);
3390                 if (!vf_dev->fpm_query_buf) {
3391                         if (!dev->vf_fpm_query_buf[iw_vf_idx].va) {
3392                                 ret_code = i40iw_alloc_query_fpm_buf(dev,
3393                                                                      &dev->vf_fpm_query_buf[iw_vf_idx]);
3394                                 if (ret_code)
3395                                         return ret_code;
3396                         }
3397                         vf_dev->fpm_query_buf = dev->vf_fpm_query_buf[iw_vf_idx].va;
3398                         vf_dev->fpm_query_buf_pa = dev->vf_fpm_query_buf[iw_vf_idx].pa;
3399                 }
3400                 query_fpm_mem.pa = vf_dev->fpm_query_buf_pa;
3401                 query_fpm_mem.va = vf_dev->fpm_query_buf;
3402                 /**
3403                  * It is HARDWARE specific:
3404                  * this call is done by PF for VF and
3405                  * i40iw_sc_query_fpm_values needs ccq poll
3406                  * because PF ccq is already created.
3407                  */
3408                 poll_registers = false;
3409         }
3410
3411         hmc_info->hmc_fn_id = hmc_fn_id;
3412
3413         if (hmc_fn_id != dev->hmc_fn_id) {
3414                 ret_code =
3415                         i40iw_cqp_query_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);
3416         } else {
3417                 wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS :
3418                             (u8)I40IW_CQP_WAIT_POLL_CQ;
3419
3420                 ret_code = i40iw_sc_query_fpm_values(
3421                                         dev->cqp,
3422                                         0,
3423                                         hmc_info->hmc_fn_id,
3424                                         &query_fpm_mem,
3425                                         true,
3426                                         wait_type);
3427         }
3428         if (ret_code)
3429                 return ret_code;
3430
3431         /* parse the fpm_query_buf and fill hmc obj info */
3432         ret_code =
3433                 i40iw_sc_parse_fpm_query_buf((u64 *)query_fpm_mem.va,
3434                                              hmc_info,
3435                                              &dev->hmc_fpm_misc);
3436         if (ret_code)
3437                 return ret_code;
3438         i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "QUERY FPM BUFFER",
3439                         query_fpm_mem.va, I40IW_QUERY_FPM_BUF_SIZE);
3440
3441         if (hmc_fn_id != dev->hmc_fn_id) {
3442                 i40iw_cqp_commit_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);
3443
3444                 /* parse the fpm_commit_buf and fill hmc obj info */
3445                 i40iw_sc_parse_fpm_commit_buf((u64 *)query_fpm_mem.va, hmc_info->hmc_obj, &hmc_info->sd_table.sd_cnt);
3446                 mem_size = sizeof(struct i40iw_hmc_sd_entry) *
3447                            (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index);
3448                 ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);
3449                 if (ret_code)
3450                         return ret_code;
3451                 hmc_info->sd_table.sd_entry = virt_mem.va;
3452         }
3453
3454         return ret_code;
3455 }
3456
3457 /**
3458  * i40iw_sc_configure_iw_fpm() - commits hmc obj cnt values using cqp command and
3459  * populates fpm base address in hmc_info
3460  * @dev : ptr to i40iw_dev struct
3461  * @hmc_fn_id: hmc function id
3462  */
3463 static enum i40iw_status_code i40iw_sc_configure_iw_fpm(struct i40iw_sc_dev *dev,
3464                                                         u8 hmc_fn_id)
3465 {
3466         struct i40iw_hmc_info *hmc_info;
3467         struct i40iw_hmc_obj_info *obj_info;
3468         u64 *buf;
3469         struct i40iw_dma_mem commit_fpm_mem;
3470         u32 i, j;
3471         enum i40iw_status_code ret_code = 0;
3472         bool poll_registers = true;
3473         u8 wait_type;
3474
3475         if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID ||
3476             (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID))
3477                 return I40IW_ERR_INVALID_HMCFN_ID;
3478
3479         if (hmc_fn_id == dev->hmc_fn_id) {
3480                 hmc_info = dev->hmc_info;
3481         } else {
3482                 hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, hmc_fn_id);
3483                 poll_registers = false;
3484         }
3485         if (!hmc_info)
3486                 return I40IW_ERR_BAD_PTR;
3487
3488         obj_info = hmc_info->hmc_obj;
3489         buf = dev->fpm_commit_buf;
3490
3491         /* copy cnt values in commit buf */
3492         for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE;
3493              i++, j += 8)
3494                 set_64bit_val(buf, j, (u64)obj_info[i].cnt);
3495
3496         set_64bit_val(buf, 40, 0);   /* APBVT rsvd */
3497
3498         commit_fpm_mem.pa = dev->fpm_commit_buf_pa;
3499         commit_fpm_mem.va = dev->fpm_commit_buf;
3500         wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS :
3501                         (u8)I40IW_CQP_WAIT_POLL_CQ;
3502         ret_code = i40iw_sc_commit_fpm_values(
3503                                         dev->cqp,
3504                                         0,
3505                                         hmc_info->hmc_fn_id,
3506                                         &commit_fpm_mem,
3507                                         true,
3508                                         wait_type);
3509
3510         /* parse the fpm_commit_buf and fill hmc obj info */
3511         if (!ret_code)
3512                 ret_code = i40iw_sc_parse_fpm_commit_buf(dev->fpm_commit_buf,
3513                                                          hmc_info->hmc_obj,
3514                                                          &hmc_info->sd_table.sd_cnt);
3515
3516         i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "COMMIT FPM BUFFER",
3517                         commit_fpm_mem.va, I40IW_COMMIT_FPM_BUF_SIZE);
3518
3519         return ret_code;
3520 }
3521
3522 /**
3523  * cqp_sds_wqe_fill - fill cqp wqe doe sd
3524  * @cqp: struct for cqp hw
3525  * @info; sd info for wqe
3526  * @scratch: u64 saved to be used during cqp completion
3527  */
3528 static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
3529                                                struct i40iw_update_sds_info *info,
3530                                                u64 scratch)
3531 {
3532         u64 data;
3533         u64 header;
3534         u64 *wqe;
3535         int mem_entries, wqe_entries;
3536         struct i40iw_dma_mem *sdbuf = &cqp->sdbuf;
3537
3538         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3539         if (!wqe)
3540                 return I40IW_ERR_RING_FULL;
3541
3542         I40IW_CQP_INIT_WQE(wqe);
3543         wqe_entries = (info->cnt > 3) ? 3 : info->cnt;
3544         mem_entries = info->cnt - wqe_entries;
3545
3546         header = LS_64(I40IW_CQP_OP_UPDATE_PE_SDS, I40IW_CQPSQ_OPCODE) |
3547                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
3548                  LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT);
3549
3550         if (mem_entries) {
3551                 memcpy(sdbuf->va, &info->entry[3], (mem_entries << 4));
3552                 data = sdbuf->pa;
3553         } else {
3554                 data = 0;
3555         }
3556         data |= LS_64(info->hmc_fn_id, I40IW_CQPSQ_UPESD_HMCFNID);
3557
3558         set_64bit_val(wqe, 16, data);
3559
3560         switch (wqe_entries) {
3561         case 3:
3562                 set_64bit_val(wqe, 48,
3563                               (LS_64(info->entry[2].cmd, I40IW_CQPSQ_UPESD_SDCMD) |
3564                                         LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
3565
3566                 set_64bit_val(wqe, 56, info->entry[2].data);
3567                 /* fallthrough */
3568         case 2:
3569                 set_64bit_val(wqe, 32,
3570                               (LS_64(info->entry[1].cmd, I40IW_CQPSQ_UPESD_SDCMD) |
3571                                         LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
3572
3573                 set_64bit_val(wqe, 40, info->entry[1].data);
3574                 /* fallthrough */
3575         case 1:
3576                 set_64bit_val(wqe, 0,
3577                               LS_64(info->entry[0].cmd, I40IW_CQPSQ_UPESD_SDCMD));
3578
3579                 set_64bit_val(wqe, 8, info->entry[0].data);
3580                 break;
3581         default:
3582                 break;
3583         }
3584
3585         i40iw_insert_wqe_hdr(wqe, header);
3586
3587         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "UPDATE_PE_SDS WQE",
3588                         wqe, I40IW_CQP_WQE_SIZE * 8);
3589         return 0;
3590 }
3591
3592 /**
3593  * i40iw_update_pe_sds - cqp wqe for sd
3594  * @dev: ptr to i40iw_dev struct
3595  * @info: sd info for sd's
3596  * @scratch: u64 saved to be used during cqp completion
3597  */
3598 static enum i40iw_status_code i40iw_update_pe_sds(struct i40iw_sc_dev *dev,
3599                                                   struct i40iw_update_sds_info *info,
3600                                                   u64 scratch)
3601 {
3602         struct i40iw_sc_cqp *cqp = dev->cqp;
3603         enum i40iw_status_code ret_code;
3604
3605         ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
3606         if (!ret_code)
3607                 i40iw_sc_cqp_post_sq(cqp);
3608
3609         return ret_code;
3610 }
3611
3612 /**
3613  * i40iw_update_sds_noccq - update sd before ccq created
3614  * @dev: sc device struct
3615  * @info: sd info for sd's
3616  */
3617 enum i40iw_status_code i40iw_update_sds_noccq(struct i40iw_sc_dev *dev,
3618                                               struct i40iw_update_sds_info *info)
3619 {
3620         u32 error, val, tail;
3621         struct i40iw_sc_cqp *cqp = dev->cqp;
3622         enum i40iw_status_code ret_code;
3623
3624         ret_code = cqp_sds_wqe_fill(cqp, info, 0);
3625         if (ret_code)
3626                 return ret_code;
3627         i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
3628         if (error)
3629                 return I40IW_ERR_CQP_COMPL_ERROR;
3630
3631         i40iw_sc_cqp_post_sq(cqp);
3632         ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
3633
3634         return ret_code;
3635 }
3636
3637 /**
3638  * i40iw_sc_suspend_qp - suspend qp for param change
3639  * @cqp: struct for cqp hw
3640  * @qp: sc qp struct
3641  * @scratch: u64 saved to be used during cqp completion
3642  */
3643 enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp,
3644                                            struct i40iw_sc_qp *qp,
3645                                            u64 scratch)
3646 {
3647         u64 header;
3648         u64 *wqe;
3649
3650         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3651         if (!wqe)
3652                 return I40IW_ERR_RING_FULL;
3653         header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_SUSPENDQP_QPID) |
3654                  LS_64(I40IW_CQP_OP_SUSPEND_QP, I40IW_CQPSQ_OPCODE) |
3655                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3656
3657         i40iw_insert_wqe_hdr(wqe, header);
3658
3659         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SUSPEND_QP WQE",
3660                         wqe, I40IW_CQP_WQE_SIZE * 8);
3661
3662         i40iw_sc_cqp_post_sq(cqp);
3663         return 0;
3664 }
3665
3666 /**
3667  * i40iw_sc_resume_qp - resume qp after suspend
3668  * @cqp: struct for cqp hw
3669  * @qp: sc qp struct
3670  * @scratch: u64 saved to be used during cqp completion
3671  */
3672 enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp,
3673                                           struct i40iw_sc_qp *qp,
3674                                           u64 scratch)
3675 {
3676         u64 header;
3677         u64 *wqe;
3678
3679         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3680         if (!wqe)
3681                 return I40IW_ERR_RING_FULL;
3682         set_64bit_val(wqe,
3683                       16,
3684                         LS_64(qp->qs_handle, I40IW_CQPSQ_RESUMEQP_QSHANDLE));
3685
3686         header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_RESUMEQP_QPID) |
3687                  LS_64(I40IW_CQP_OP_RESUME_QP, I40IW_CQPSQ_OPCODE) |
3688                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3689
3690         i40iw_insert_wqe_hdr(wqe, header);
3691
3692         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "RESUME_QP WQE",
3693                         wqe, I40IW_CQP_WQE_SIZE * 8);
3694
3695         i40iw_sc_cqp_post_sq(cqp);
3696         return 0;
3697 }
3698
3699 /**
3700  * i40iw_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
3701  * @cqp: struct for cqp hw
3702  * @scratch: u64 saved to be used during cqp completion
3703  * @hmc_fn_id: hmc function id
3704  * @post_sq: flag for cqp db to ring
3705  * @poll_registers: flag to poll register for cqp completion
3706  */
3707 enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated(
3708                                         struct i40iw_sc_cqp *cqp,
3709                                         u64 scratch,
3710                                         u8 hmc_fn_id,
3711                                         bool post_sq,
3712                                         bool poll_registers)
3713 {
3714         u64 header;
3715         u64 *wqe;
3716         u32 tail, val, error;
3717         enum i40iw_status_code ret_code = 0;
3718
3719         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3720         if (!wqe)
3721                 return I40IW_ERR_RING_FULL;
3722         set_64bit_val(wqe,
3723                       16,
3724                       LS_64(hmc_fn_id, I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID));
3725
3726         header = LS_64(I40IW_CQP_OP_SHMC_PAGES_ALLOCATED, I40IW_CQPSQ_OPCODE) |
3727                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3728
3729         i40iw_insert_wqe_hdr(wqe, header);
3730
3731         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SHMC_PAGES_ALLOCATED WQE",
3732                         wqe, I40IW_CQP_WQE_SIZE * 8);
3733         i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
3734         if (error) {
3735                 ret_code = I40IW_ERR_CQP_COMPL_ERROR;
3736                 return ret_code;
3737         }
3738         if (post_sq) {
3739                 i40iw_sc_cqp_post_sq(cqp);
3740                 if (poll_registers)
3741                         /* check for cqp sq tail update */
3742                         ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
3743                 else
3744                         ret_code = i40iw_sc_poll_for_cqp_op_done(cqp,
3745                                                                  I40IW_CQP_OP_SHMC_PAGES_ALLOCATED,
3746                                                                  NULL);
3747         }
3748
3749         return ret_code;
3750 }
3751
3752 /**
3753  * i40iw_ring_full - check if cqp ring is full
3754  * @cqp: struct for cqp hw
3755  */
3756 static bool i40iw_ring_full(struct i40iw_sc_cqp *cqp)
3757 {
3758         return I40IW_RING_FULL_ERR(cqp->sq_ring);
3759 }
3760
3761 /**
3762  * i40iw_est_sd - returns approximate number of SDs for HMC
3763  * @dev: sc device struct
3764  * @hmc_info: hmc structure, size and count for HMC objects
3765  */
3766 static u64 i40iw_est_sd(struct i40iw_sc_dev *dev, struct i40iw_hmc_info *hmc_info)
3767 {
3768         int i;
3769         u64 size = 0;
3770         u64 sd;
3771
3772         for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_PBLE; i++)
3773                 size += hmc_info->hmc_obj[i].cnt * hmc_info->hmc_obj[i].size;
3774
3775         if (dev->is_pf)
3776                 size += hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt * hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size;
3777
3778         if (size & 0x1FFFFF)
3779                 sd = (size >> 21) + 1; /* add 1 for remainder */
3780         else
3781                 sd = size >> 21;
3782
3783         if (!dev->is_pf) {
3784                 /* 2MB alignment for VF PBLE HMC */
3785                 size = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt * hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size;
3786                 if (size & 0x1FFFFF)
3787                         sd += (size >> 21) + 1; /* add 1 for remainder */
3788                 else
3789                         sd += size >> 21;
3790         }
3791
3792         return sd;
3793 }
3794
3795 /**
3796  * i40iw_config_fpm_values - configure HMC objects
3797  * @dev: sc device struct
3798  * @qp_count: desired qp count
3799  */
3800 enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count)
3801 {
3802         struct i40iw_virt_mem virt_mem;
3803         u32 i, mem_size;
3804         u32 qpwantedoriginal, qpwanted, mrwanted, pblewanted;
3805         u32 powerof2;
3806         u64 sd_needed;
3807         u32 loop_count = 0;
3808
3809         struct i40iw_hmc_info *hmc_info;
3810         struct i40iw_hmc_fpm_misc *hmc_fpm_misc;
3811         enum i40iw_status_code ret_code = 0;
3812
3813         hmc_info = dev->hmc_info;
3814         hmc_fpm_misc = &dev->hmc_fpm_misc;
3815
3816         ret_code = i40iw_sc_init_iw_hmc(dev, dev->hmc_fn_id);
3817         if (ret_code) {
3818                 i40iw_debug(dev, I40IW_DEBUG_HMC,
3819                             "i40iw_sc_init_iw_hmc returned error_code = %d\n",
3820                             ret_code);
3821                 return ret_code;
3822         }
3823
3824         for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)
3825                 hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
3826         sd_needed = i40iw_est_sd(dev, hmc_info);
3827         i40iw_debug(dev, I40IW_DEBUG_HMC,
3828                     "%s: FW initial max sd_count[%08lld] first_sd_index[%04d]\n",
3829                     __func__, sd_needed, hmc_info->first_sd_index);
3830         i40iw_debug(dev, I40IW_DEBUG_HMC,
3831                     "%s: sd count %d where max sd is %d\n",
3832                     __func__, hmc_info->sd_table.sd_cnt,
3833                     hmc_fpm_misc->max_sds);
3834
3835         qpwanted = min(qp_count, hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt);
3836         qpwantedoriginal = qpwanted;
3837         mrwanted = hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt;
3838         pblewanted = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt;
3839
3840         i40iw_debug(dev, I40IW_DEBUG_HMC,
3841                     "req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d\n",
3842                     qp_count, hmc_fpm_misc->max_sds,
3843                     hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt,
3844                     hmc_info->hmc_obj[I40IW_HMC_IW_CQ].max_cnt,
3845                     hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt,
3846                     hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt);
3847
3848         do {
3849                 ++loop_count;
3850                 hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt = qpwanted;
3851                 hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt =
3852                         min(2 * qpwanted, hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt);
3853                 hmc_info->hmc_obj[I40IW_HMC_IW_SRQ].cnt = 0x00; /* Reserved */
3854                 hmc_info->hmc_obj[I40IW_HMC_IW_HTE].cnt =
3855                                         qpwanted * hmc_fpm_misc->ht_multiplier;
3856                 hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt =
3857                         hmc_info->hmc_obj[I40IW_HMC_IW_ARP].max_cnt;
3858                 hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1;
3859                 hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted;
3860
3861                 hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt = I40IW_MAX_WQ_ENTRIES * qpwanted;
3862                 hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt = 4 * I40IW_MAX_IRD_SIZE * qpwanted;
3863                 hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt =
3864                         hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
3865                 hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt =
3866                         hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
3867                 hmc_info->hmc_obj[I40IW_HMC_IW_TIMER].cnt =
3868                         ((qpwanted) / 512 + 1) * hmc_fpm_misc->timer_bucket;
3869                 hmc_info->hmc_obj[I40IW_HMC_IW_FSIMC].cnt = 0x00;
3870                 hmc_info->hmc_obj[I40IW_HMC_IW_FSIAV].cnt = 0x00;
3871                 hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt = pblewanted;
3872
3873                 /* How much memory is needed for all the objects. */
3874                 sd_needed = i40iw_est_sd(dev, hmc_info);
3875                 if ((loop_count > 1000) ||
3876                     ((!(loop_count % 10)) &&
3877                     (qpwanted > qpwantedoriginal * 2 / 3))) {
3878                         if (qpwanted > FPM_MULTIPLIER) {
3879                                 qpwanted -= FPM_MULTIPLIER;
3880                                 powerof2 = 1;
3881                                 while (powerof2 < qpwanted)
3882                                         powerof2 *= 2;
3883                                 powerof2 /= 2;
3884                                 qpwanted = powerof2;
3885                         } else {
3886                                 qpwanted /= 2;
3887                         }
3888                 }
3889                 if (mrwanted > FPM_MULTIPLIER * 10)
3890                         mrwanted -= FPM_MULTIPLIER * 10;
3891                 if (pblewanted > FPM_MULTIPLIER * 1000)
3892                         pblewanted -= FPM_MULTIPLIER * 1000;
3893         } while (sd_needed > hmc_fpm_misc->max_sds && loop_count < 2000);
3894
3895         sd_needed = i40iw_est_sd(dev, hmc_info);
3896
3897         i40iw_debug(dev, I40IW_DEBUG_HMC,
3898                     "loop_cnt=%d, sd_needed=%lld, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d\n",
3899                     loop_count, sd_needed,
3900                     hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt,
3901                     hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt,
3902                     hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt,
3903                     hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt);
3904
3905         ret_code = i40iw_sc_configure_iw_fpm(dev, dev->hmc_fn_id);
3906         if (ret_code) {
3907                 i40iw_debug(dev, I40IW_DEBUG_HMC,
3908                             "configure_iw_fpm returned error_code[x%08X]\n",
3909                             i40iw_rd32(dev->hw, dev->is_pf ? I40E_PFPE_CQPERRCODES : I40E_VFPE_CQPERRCODES1));
3910                 return ret_code;
3911         }
3912
3913         mem_size = sizeof(struct i40iw_hmc_sd_entry) *
3914                    (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
3915         ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);
3916         if (ret_code) {
3917                 i40iw_debug(dev, I40IW_DEBUG_HMC,
3918                             "%s: failed to allocate memory for sd_entry buffer\n",
3919                             __func__);
3920                 return ret_code;
3921         }
3922         hmc_info->sd_table.sd_entry = virt_mem.va;
3923
3924         return ret_code;
3925 }
3926
3927 /**
3928  * i40iw_exec_cqp_cmd - execute cqp cmd when wqe are available
3929  * @dev: rdma device
3930  * @pcmdinfo: cqp command info
3931  */
3932 static enum i40iw_status_code i40iw_exec_cqp_cmd(struct i40iw_sc_dev *dev,
3933                                                  struct cqp_commands_info *pcmdinfo)
3934 {
3935         enum i40iw_status_code status;
3936         struct i40iw_dma_mem values_mem;
3937
3938         dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++;
3939         switch (pcmdinfo->cqp_cmd) {
3940         case OP_DELETE_LOCAL_MAC_IPADDR_ENTRY:
3941                 status = i40iw_sc_del_local_mac_ipaddr_entry(
3942                                 pcmdinfo->in.u.del_local_mac_ipaddr_entry.cqp,
3943                                 pcmdinfo->in.u.del_local_mac_ipaddr_entry.scratch,
3944                                 pcmdinfo->in.u.del_local_mac_ipaddr_entry.entry_idx,
3945                                 pcmdinfo->in.u.del_local_mac_ipaddr_entry.ignore_ref_count,
3946                                 pcmdinfo->post_sq);
3947                 break;
3948         case OP_CEQ_DESTROY:
3949                 status = i40iw_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq,
3950                                               pcmdinfo->in.u.ceq_destroy.scratch,
3951                                               pcmdinfo->post_sq);
3952                 break;
3953         case OP_AEQ_DESTROY:
3954                 status = i40iw_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq,
3955                                               pcmdinfo->in.u.aeq_destroy.scratch,
3956                                               pcmdinfo->post_sq);
3957
3958                 break;
3959         case OP_DELETE_ARP_CACHE_ENTRY:
3960                 status = i40iw_sc_del_arp_cache_entry(
3961                                 pcmdinfo->in.u.del_arp_cache_entry.cqp,
3962                                 pcmdinfo->in.u.del_arp_cache_entry.scratch,
3963                                 pcmdinfo->in.u.del_arp_cache_entry.arp_index,
3964                                 pcmdinfo->post_sq);
3965                 break;
3966         case OP_MANAGE_APBVT_ENTRY:
3967                 status = i40iw_sc_manage_apbvt_entry(
3968                                 pcmdinfo->in.u.manage_apbvt_entry.cqp,
3969                                 &pcmdinfo->in.u.manage_apbvt_entry.info,
3970                                 pcmdinfo->in.u.manage_apbvt_entry.scratch,
3971                                 pcmdinfo->post_sq);
3972                 break;
3973         case OP_CEQ_CREATE:
3974                 status = i40iw_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq,
3975                                              pcmdinfo->in.u.ceq_create.scratch,
3976                                              pcmdinfo->post_sq);
3977                 break;
3978         case OP_AEQ_CREATE:
3979                 status = i40iw_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq,
3980                                              pcmdinfo->in.u.aeq_create.scratch,
3981                                              pcmdinfo->post_sq);
3982                 break;
3983         case OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY:
3984                 status = i40iw_sc_alloc_local_mac_ipaddr_entry(
3985                                 pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.cqp,
3986                                 pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.scratch,
3987                                 pcmdinfo->post_sq);
3988                 break;
3989         case OP_ADD_LOCAL_MAC_IPADDR_ENTRY:
3990                 status = i40iw_sc_add_local_mac_ipaddr_entry(
3991                                 pcmdinfo->in.u.add_local_mac_ipaddr_entry.cqp,
3992                                 &pcmdinfo->in.u.add_local_mac_ipaddr_entry.info,
3993                                 pcmdinfo->in.u.add_local_mac_ipaddr_entry.scratch,
3994                                 pcmdinfo->post_sq);
3995                 break;
3996         case OP_MANAGE_QHASH_TABLE_ENTRY:
3997                 status = i40iw_sc_manage_qhash_table_entry(
3998                                 pcmdinfo->in.u.manage_qhash_table_entry.cqp,
3999                                 &pcmdinfo->in.u.manage_qhash_table_entry.info,
4000                                 pcmdinfo->in.u.manage_qhash_table_entry.scratch,
4001                                 pcmdinfo->post_sq);
4002
4003                 break;
4004         case OP_QP_MODIFY:
4005                 status = i40iw_sc_qp_modify(
4006                                 pcmdinfo->in.u.qp_modify.qp,
4007                                 &pcmdinfo->in.u.qp_modify.info,
4008                                 pcmdinfo->in.u.qp_modify.scratch,
4009                                 pcmdinfo->post_sq);
4010
4011                 break;
4012         case OP_QP_UPLOAD_CONTEXT:
4013                 status = i40iw_sc_qp_upload_context(
4014                                 pcmdinfo->in.u.qp_upload_context.dev,
4015                                 &pcmdinfo->in.u.qp_upload_context.info,
4016                                 pcmdinfo->in.u.qp_upload_context.scratch,
4017                                 pcmdinfo->post_sq);
4018
4019                 break;
4020         case OP_CQ_CREATE:
4021                 status = i40iw_sc_cq_create(
4022                                 pcmdinfo->in.u.cq_create.cq,
4023                                 pcmdinfo->in.u.cq_create.scratch,
4024                                 pcmdinfo->in.u.cq_create.check_overflow,
4025                                 pcmdinfo->post_sq);
4026                 break;
4027         case OP_CQ_DESTROY:
4028                 status = i40iw_sc_cq_destroy(
4029                                 pcmdinfo->in.u.cq_destroy.cq,
4030                                 pcmdinfo->in.u.cq_destroy.scratch,
4031                                 pcmdinfo->post_sq);
4032
4033                 break;
4034         case OP_QP_CREATE:
4035                 status = i40iw_sc_qp_create(
4036                                 pcmdinfo->in.u.qp_create.qp,
4037                                 &pcmdinfo->in.u.qp_create.info,
4038                                 pcmdinfo->in.u.qp_create.scratch,
4039                                 pcmdinfo->post_sq);
4040                 break;
4041         case OP_QP_DESTROY:
4042                 status = i40iw_sc_qp_destroy(
4043                                 pcmdinfo->in.u.qp_destroy.qp,
4044                                 pcmdinfo->in.u.qp_destroy.scratch,
4045                                 pcmdinfo->in.u.qp_destroy.remove_hash_idx,
4046                                 pcmdinfo->in.u.qp_destroy.
4047                                 ignore_mw_bnd,
4048                                 pcmdinfo->post_sq);
4049
4050                 break;
4051         case OP_ALLOC_STAG:
4052                 status = i40iw_sc_alloc_stag(
4053                                 pcmdinfo->in.u.alloc_stag.dev,
4054                                 &pcmdinfo->in.u.alloc_stag.info,
4055                                 pcmdinfo->in.u.alloc_stag.scratch,
4056                                 pcmdinfo->post_sq);
4057                 break;
4058         case OP_MR_REG_NON_SHARED:
4059                 status = i40iw_sc_mr_reg_non_shared(
4060                                 pcmdinfo->in.u.mr_reg_non_shared.dev,
4061                                 &pcmdinfo->in.u.mr_reg_non_shared.info,
4062                                 pcmdinfo->in.u.mr_reg_non_shared.scratch,
4063                                 pcmdinfo->post_sq);
4064
4065                 break;
4066         case OP_DEALLOC_STAG:
4067                 status = i40iw_sc_dealloc_stag(
4068                                 pcmdinfo->in.u.dealloc_stag.dev,
4069                                 &pcmdinfo->in.u.dealloc_stag.info,
4070                                 pcmdinfo->in.u.dealloc_stag.scratch,
4071                                 pcmdinfo->post_sq);
4072
4073                 break;
4074         case OP_MW_ALLOC:
4075                 status = i40iw_sc_mw_alloc(
4076                                 pcmdinfo->in.u.mw_alloc.dev,
4077                                 pcmdinfo->in.u.mw_alloc.scratch,
4078                                 pcmdinfo->in.u.mw_alloc.mw_stag_index,
4079                                 pcmdinfo->in.u.mw_alloc.pd_id,
4080                                 pcmdinfo->post_sq);
4081
4082                 break;
4083         case OP_QP_FLUSH_WQES:
4084                 status = i40iw_sc_qp_flush_wqes(
4085                                 pcmdinfo->in.u.qp_flush_wqes.qp,
4086                                 &pcmdinfo->in.u.qp_flush_wqes.info,
4087                                 pcmdinfo->in.u.qp_flush_wqes.
4088                                 scratch, pcmdinfo->post_sq);
4089                 break;
4090         case OP_ADD_ARP_CACHE_ENTRY:
4091                 status = i40iw_sc_add_arp_cache_entry(
4092                                 pcmdinfo->in.u.add_arp_cache_entry.cqp,
4093                                 &pcmdinfo->in.u.add_arp_cache_entry.info,
4094                                 pcmdinfo->in.u.add_arp_cache_entry.scratch,
4095                                 pcmdinfo->post_sq);
4096                 break;
4097         case OP_MANAGE_PUSH_PAGE:
4098                 status = i40iw_sc_manage_push_page(
4099                                 pcmdinfo->in.u.manage_push_page.cqp,
4100                                 &pcmdinfo->in.u.manage_push_page.info,
4101                                 pcmdinfo->in.u.manage_push_page.scratch,
4102                                 pcmdinfo->post_sq);
4103                 break;
4104         case OP_UPDATE_PE_SDS:
4105                 /* case I40IW_CQP_OP_UPDATE_PE_SDS */
4106                 status = i40iw_update_pe_sds(
4107                                 pcmdinfo->in.u.update_pe_sds.dev,
4108                                 &pcmdinfo->in.u.update_pe_sds.info,
4109                                 pcmdinfo->in.u.update_pe_sds.
4110                                 scratch);
4111
4112                 break;
4113         case OP_MANAGE_HMC_PM_FUNC_TABLE:
4114                 status = i40iw_sc_manage_hmc_pm_func_table(
4115                                 pcmdinfo->in.u.manage_hmc_pm.dev->cqp,
4116                                 pcmdinfo->in.u.manage_hmc_pm.scratch,
4117                                 (u8)pcmdinfo->in.u.manage_hmc_pm.info.vf_id,
4118                                 pcmdinfo->in.u.manage_hmc_pm.info.free_fcn,
4119                                 true);
4120                 break;
4121         case OP_SUSPEND:
4122                 status = i40iw_sc_suspend_qp(
4123                                 pcmdinfo->in.u.suspend_resume.cqp,
4124                                 pcmdinfo->in.u.suspend_resume.qp,
4125                                 pcmdinfo->in.u.suspend_resume.scratch);
4126                 break;
4127         case OP_RESUME:
4128                 status = i40iw_sc_resume_qp(
4129                                 pcmdinfo->in.u.suspend_resume.cqp,
4130                                 pcmdinfo->in.u.suspend_resume.qp,
4131                                 pcmdinfo->in.u.suspend_resume.scratch);
4132                 break;
4133         case OP_MANAGE_VF_PBLE_BP:
4134                 status = i40iw_manage_vf_pble_bp(
4135                                 pcmdinfo->in.u.manage_vf_pble_bp.cqp,
4136                                 &pcmdinfo->in.u.manage_vf_pble_bp.info,
4137                                 pcmdinfo->in.u.manage_vf_pble_bp.scratch, true);
4138                 break;
4139         case OP_QUERY_FPM_VALUES:
4140                 values_mem.pa = pcmdinfo->in.u.query_fpm_values.fpm_values_pa;
4141                 values_mem.va = pcmdinfo->in.u.query_fpm_values.fpm_values_va;
4142                 status = i40iw_sc_query_fpm_values(
4143                                 pcmdinfo->in.u.query_fpm_values.cqp,
4144                                 pcmdinfo->in.u.query_fpm_values.scratch,
4145                                 pcmdinfo->in.u.query_fpm_values.hmc_fn_id,
4146                                 &values_mem, true, I40IW_CQP_WAIT_EVENT);
4147                 break;
4148         case OP_COMMIT_FPM_VALUES:
4149                 values_mem.pa = pcmdinfo->in.u.commit_fpm_values.fpm_values_pa;
4150                 values_mem.va = pcmdinfo->in.u.commit_fpm_values.fpm_values_va;
4151                 status = i40iw_sc_commit_fpm_values(
4152                                 pcmdinfo->in.u.commit_fpm_values.cqp,
4153                                 pcmdinfo->in.u.commit_fpm_values.scratch,
4154                                 pcmdinfo->in.u.commit_fpm_values.hmc_fn_id,
4155                                 &values_mem,
4156                                 true,
4157                                 I40IW_CQP_WAIT_EVENT);
4158                 break;
4159         default:
4160                 status = I40IW_NOT_SUPPORTED;
4161                 break;
4162         }
4163
4164         return status;
4165 }
4166
4167 /**
4168  * i40iw_process_cqp_cmd - process all cqp commands
4169  * @dev: sc device struct
4170  * @pcmdinfo: cqp command info
4171  */
4172 enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev,
4173                                              struct cqp_commands_info *pcmdinfo)
4174 {
4175         enum i40iw_status_code status = 0;
4176         unsigned long flags;
4177
4178         spin_lock_irqsave(&dev->cqp_lock, flags);
4179         if (list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp))
4180                 status = i40iw_exec_cqp_cmd(dev, pcmdinfo);
4181         else
4182                 list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head);
4183         spin_unlock_irqrestore(&dev->cqp_lock, flags);
4184         return status;
4185 }
4186
4187 /**
4188  * i40iw_process_bh - called from tasklet for cqp list
4189  * @dev: sc device struct
4190  */
4191 enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev)
4192 {
4193         enum i40iw_status_code status = 0;
4194         struct cqp_commands_info *pcmdinfo;
4195         unsigned long flags;
4196
4197         spin_lock_irqsave(&dev->cqp_lock, flags);
4198         while (!list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp)) {
4199                 pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);
4200
4201                 status = i40iw_exec_cqp_cmd(dev, pcmdinfo);
4202                 if (status)
4203                         break;
4204         }
4205         spin_unlock_irqrestore(&dev->cqp_lock, flags);
4206         return status;
4207 }
4208
4209 /**
4210  * i40iw_iwarp_opcode - determine if incoming is rdma layer
4211  * @info: aeq info for the packet
4212  * @pkt: packet for error
4213  */
4214 static u32 i40iw_iwarp_opcode(struct i40iw_aeqe_info *info, u8 *pkt)
4215 {
4216         __be16 *mpa;
4217         u32 opcode = 0xffffffff;
4218
4219         if (info->q2_data_written) {
4220                 mpa = (__be16 *)pkt;
4221                 opcode = ntohs(mpa[1]) & 0xf;
4222         }
4223         return opcode;
4224 }
4225
4226 /**
4227  * i40iw_locate_mpa - return pointer to mpa in the pkt
4228  * @pkt: packet with data
4229  */
4230 static u8 *i40iw_locate_mpa(u8 *pkt)
4231 {
4232         /* skip over ethernet header */
4233         pkt += I40IW_MAC_HLEN;
4234
4235         /* Skip over IP and TCP headers */
4236         pkt += 4 * (pkt[0] & 0x0f);
4237         pkt += 4 * ((pkt[12] >> 4) & 0x0f);
4238         return pkt;
4239 }
4240
4241 /**
4242  * i40iw_setup_termhdr - termhdr for terminate pkt
4243  * @qp: sc qp ptr for pkt
4244  * @hdr: term hdr
4245  * @opcode: flush opcode for termhdr
4246  * @layer_etype: error layer + error type
4247  * @err: error cod ein the header
4248  */
4249 static void i40iw_setup_termhdr(struct i40iw_sc_qp *qp,
4250                                 struct i40iw_terminate_hdr *hdr,
4251                                 enum i40iw_flush_opcode opcode,
4252                                 u8 layer_etype,
4253                                 u8 err)
4254 {
4255         qp->flush_code = opcode;
4256         hdr->layer_etype = layer_etype;
4257         hdr->error_code = err;
4258 }
4259
4260 /**
4261  * i40iw_bld_terminate_hdr - build terminate message header
4262  * @qp: qp associated with received terminate AE
4263  * @info: the struct contiaing AE information
4264  */
4265 static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
4266                                    struct i40iw_aeqe_info *info)
4267 {
4268         u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
4269         u16 ddp_seg_len;
4270         int copy_len = 0;
4271         u8 is_tagged = 0;
4272         u32 opcode;
4273         struct i40iw_terminate_hdr *termhdr;
4274
4275         termhdr = (struct i40iw_terminate_hdr *)qp->q2_buf;
4276         memset(termhdr, 0, Q2_BAD_FRAME_OFFSET);
4277
4278         if (info->q2_data_written) {
4279                 /* Use data from offending packet to fill in ddp & rdma hdrs */
4280                 pkt = i40iw_locate_mpa(pkt);
4281                 ddp_seg_len = ntohs(*(__be16 *)pkt);
4282                 if (ddp_seg_len) {
4283                         copy_len = 2;
4284                         termhdr->hdrct = DDP_LEN_FLAG;
4285                         if (pkt[2] & 0x80) {
4286                                 is_tagged = 1;
4287                                 if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
4288                                         copy_len += TERM_DDP_LEN_TAGGED;
4289                                         termhdr->hdrct |= DDP_HDR_FLAG;
4290                                 }
4291                         } else {
4292                                 if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
4293                                         copy_len += TERM_DDP_LEN_UNTAGGED;
4294                                         termhdr->hdrct |= DDP_HDR_FLAG;
4295                                 }
4296
4297                                 if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) {
4298                                         if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) {
4299                                                 copy_len += TERM_RDMA_LEN;
4300                                                 termhdr->hdrct |= RDMA_HDR_FLAG;
4301                                         }
4302                                 }
4303                         }
4304                 }
4305         }
4306
4307         opcode = i40iw_iwarp_opcode(info, pkt);
4308
4309         switch (info->ae_id) {
4310         case I40IW_AE_AMP_UNALLOCATED_STAG:
4311                 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4312                 if (opcode == I40IW_OP_TYPE_RDMA_WRITE)
4313                         i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
4314                                             (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_STAG);
4315                 else
4316                         i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4317                                             (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG);
4318                 break;
4319         case I40IW_AE_AMP_BOUNDS_VIOLATION:
4320                 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4321                 if (info->q2_data_written)
4322                         i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
4323                                             (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_BOUNDS);
4324                 else
4325                         i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4326                                             (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_BOUNDS);
4327                 break;
4328         case I40IW_AE_AMP_BAD_PD:
4329                 switch (opcode) {
4330                 case I40IW_OP_TYPE_RDMA_WRITE:
4331                         i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
4332                                             (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_UNASSOC_STAG);
4333                         break;
4334                 case I40IW_OP_TYPE_SEND_INV:
4335                 case I40IW_OP_TYPE_SEND_SOL_INV:
4336                         i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4337                                             (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_CANT_INV_STAG);
4338                         break;
4339                 default:
4340                         i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4341                                             (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_UNASSOC_STAG);
4342                 }
4343                 break;
4344         case I40IW_AE_AMP_INVALID_STAG:
4345                 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4346                 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4347                                     (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG);
4348                 break;
4349         case I40IW_AE_AMP_BAD_QP:
4350                 i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
4351                                     (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN);
4352                 break;
4353         case I40IW_AE_AMP_BAD_STAG_KEY:
4354         case I40IW_AE_AMP_BAD_STAG_INDEX:
4355                 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4356                 switch (opcode) {
4357                 case I40IW_OP_TYPE_SEND_INV:
4358                 case I40IW_OP_TYPE_SEND_SOL_INV:
4359                         i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR,
4360                                             (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_CANT_INV_STAG);
4361                         break;
4362                 default:
4363                         i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4364                                             (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_STAG);
4365                 }
4366                 break;
4367         case I40IW_AE_AMP_RIGHTS_VIOLATION:
4368         case I40IW_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
4369         case I40IW_AE_PRIV_OPERATION_DENIED:
4370                 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4371                 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4372                                     (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_ACCESS);
4373                 break;
4374         case I40IW_AE_AMP_TO_WRAP:
4375                 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4376                 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4377                                     (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_TO_WRAP);
4378                 break;
4379         case I40IW_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
4380                 i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
4381                                     (LAYER_MPA << 4) | DDP_LLP, MPA_MARKER);
4382                 break;
4383         case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
4384                 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4385                                     (LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
4386                 break;
4387         case I40IW_AE_LLP_SEGMENT_TOO_LARGE:
4388         case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
4389                 i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
4390                                     (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
4391                 break;
4392         case I40IW_AE_LCE_QP_CATASTROPHIC:
4393         case I40IW_AE_DDP_NO_L_BIT:
4394                 i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR,
4395                                     (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
4396                 break;
4397         case I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN:
4398         case I40IW_AE_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
4399                 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4400                                     (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_RANGE);
4401                 break;
4402         case I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
4403                 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4404                 i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
4405                                     (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_TOO_LONG);
4406                 break;
4407         case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
4408                 if (is_tagged)
4409                         i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4410                                             (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_DDP_VER);
4411                 else
4412                         i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4413                                             (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_DDP_VER);
4414                 break;
4415         case I40IW_AE_DDP_UBE_INVALID_MO:
4416                 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4417                                     (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MO);
4418                 break;
4419         case I40IW_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
4420                 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR,
4421                                     (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_NO_BUF);
4422                 break;
4423         case I40IW_AE_DDP_UBE_INVALID_QN:
4424                 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4425                                     (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN);
4426                 break;
4427         case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
4428                 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4429                                     (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_RDMAP_VER);
4430                 break;
4431         case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
4432                 i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
4433                                     (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNEXPECTED_OP);
4434                 break;
4435         default:
4436                 i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR,
4437                                     (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNSPECIFIED);
4438                 break;
4439         }
4440
4441         if (copy_len)
4442                 memcpy(termhdr + 1, pkt, copy_len);
4443
4444         return sizeof(struct i40iw_terminate_hdr) + copy_len;
4445 }
4446
4447 /**
4448  * i40iw_terminate_send_fin() - Send fin for terminate message
4449  * @qp: qp associated with received terminate AE
4450  */
4451 void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp)
4452 {
4453         /* Send the fin only */
4454         i40iw_term_modify_qp(qp,
4455                              I40IW_QP_STATE_TERMINATE,
4456                              I40IWQP_TERM_SEND_FIN_ONLY,
4457                              0);
4458 }
4459
4460 /**
4461  * i40iw_terminate_connection() - Bad AE and send terminate to remote QP
4462  * @qp: qp associated with received terminate AE
4463  * @info: the struct contiaing AE information
4464  */
4465 void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)
4466 {
4467         u8 termlen = 0;
4468
4469         if (qp->term_flags & I40IW_TERM_SENT)
4470                 return;         /* Sanity check */
4471
4472         /* Eventtype can change from bld_terminate_hdr */
4473         qp->eventtype = TERM_EVENT_QP_FATAL;
4474         termlen = i40iw_bld_terminate_hdr(qp, info);
4475         i40iw_terminate_start_timer(qp);
4476         qp->term_flags |= I40IW_TERM_SENT;
4477         i40iw_term_modify_qp(qp, I40IW_QP_STATE_TERMINATE,
4478                              I40IWQP_TERM_SEND_TERM_ONLY, termlen);
4479 }
4480
4481 /**
4482  * i40iw_terminate_received - handle terminate received AE
4483  * @qp: qp associated with received terminate AE
4484  * @info: the struct contiaing AE information
4485  */
4486 void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)
4487 {
4488         u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
4489         __be32 *mpa;
4490         u8 ddp_ctl;
4491         u8 rdma_ctl;
4492         u16 aeq_id = 0;
4493         struct i40iw_terminate_hdr *termhdr;
4494
4495         mpa = (__be32 *)i40iw_locate_mpa(pkt);
4496         if (info->q2_data_written) {
4497                 /* did not validate the frame - do it now */
4498                 ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff;
4499                 rdma_ctl = ntohl(mpa[0]) & 0xff;
4500                 if ((ddp_ctl & 0xc0) != 0x40)
4501                         aeq_id = I40IW_AE_LCE_QP_CATASTROPHIC;
4502                 else if ((ddp_ctl & 0x03) != 1)
4503                         aeq_id = I40IW_AE_DDP_UBE_INVALID_DDP_VERSION;
4504                 else if (ntohl(mpa[2]) != 2)
4505                         aeq_id = I40IW_AE_DDP_UBE_INVALID_QN;
4506                 else if (ntohl(mpa[3]) != 1)
4507                         aeq_id = I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN;
4508                 else if (ntohl(mpa[4]) != 0)
4509                         aeq_id = I40IW_AE_DDP_UBE_INVALID_MO;
4510                 else if ((rdma_ctl & 0xc0) != 0x40)
4511                         aeq_id = I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION;
4512
4513                 info->ae_id = aeq_id;
4514                 if (info->ae_id) {
4515                         /* Bad terminate recvd - send back a terminate */
4516                         i40iw_terminate_connection(qp, info);
4517                         return;
4518                 }
4519         }
4520
4521         qp->term_flags |= I40IW_TERM_RCVD;
4522         qp->eventtype = TERM_EVENT_QP_FATAL;
4523         termhdr = (struct i40iw_terminate_hdr *)&mpa[5];
4524         if (termhdr->layer_etype == RDMAP_REMOTE_PROT ||
4525             termhdr->layer_etype == RDMAP_REMOTE_OP) {
4526                 i40iw_terminate_done(qp, 0);
4527         } else {
4528                 i40iw_terminate_start_timer(qp);
4529                 i40iw_terminate_send_fin(qp);
4530         }
4531 }
4532
4533 /**
4534  * i40iw_sc_vsi_init - Initialize virtual device
4535  * @vsi: pointer to the vsi structure
4536  * @info: parameters to initialize vsi
4537  **/
4538 void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *info)
4539 {
4540         int i;
4541
4542         vsi->dev = info->dev;
4543         vsi->back_vsi = info->back_vsi;
4544         vsi->mss = info->params->mss;
4545         i40iw_fill_qos_list(info->params->qs_handle_list);
4546
4547         for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
4548                 vsi->qos[i].qs_handle = info->params->qs_handle_list[i];
4549                 i40iw_debug(vsi->dev, I40IW_DEBUG_DCB, "qset[%d]: %d\n", i,
4550                             vsi->qos[i].qs_handle);
4551                 spin_lock_init(&vsi->qos[i].lock);
4552                 INIT_LIST_HEAD(&vsi->qos[i].qplist);
4553         }
4554 }
4555
4556 /**
4557  * i40iw_hw_stats_init - Initiliaze HW stats table
4558  * @stats: pestat struct
4559  * @fcn_idx: PCI fn id
4560  * @is_pf: Is it a PF?
4561  *
4562  * Populate the HW stats table with register offset addr for each
4563  * stats. And start the perioidic stats timer.
4564  */
4565 void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 fcn_idx, bool is_pf)
4566 {
4567         u32 stats_reg_offset;
4568         u32 stats_index;
4569         struct i40iw_dev_hw_stats_offsets *stats_table =
4570                 &stats->hw_stats_offsets;
4571         struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
4572
4573         if (is_pf) {
4574                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
4575                                 I40E_GLPES_PFIP4RXDISCARD(fcn_idx);
4576                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
4577                                 I40E_GLPES_PFIP4RXTRUNC(fcn_idx);
4578                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
4579                                 I40E_GLPES_PFIP4TXNOROUTE(fcn_idx);
4580                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
4581                                 I40E_GLPES_PFIP6RXDISCARD(fcn_idx);
4582                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
4583                                 I40E_GLPES_PFIP6RXTRUNC(fcn_idx);
4584                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
4585                                 I40E_GLPES_PFIP6TXNOROUTE(fcn_idx);
4586                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
4587                                 I40E_GLPES_PFTCPRTXSEG(fcn_idx);
4588                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
4589                                 I40E_GLPES_PFTCPRXOPTERR(fcn_idx);
4590                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
4591                                 I40E_GLPES_PFTCPRXPROTOERR(fcn_idx);
4592
4593                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
4594                                 I40E_GLPES_PFIP4RXOCTSLO(fcn_idx);
4595                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
4596                                 I40E_GLPES_PFIP4RXPKTSLO(fcn_idx);
4597                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
4598                                 I40E_GLPES_PFIP4RXFRAGSLO(fcn_idx);
4599                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
4600                                 I40E_GLPES_PFIP4RXMCPKTSLO(fcn_idx);
4601                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
4602                                 I40E_GLPES_PFIP4TXOCTSLO(fcn_idx);
4603                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
4604                                 I40E_GLPES_PFIP4TXPKTSLO(fcn_idx);
4605                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
4606                                 I40E_GLPES_PFIP4TXFRAGSLO(fcn_idx);
4607                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
4608                                 I40E_GLPES_PFIP4TXMCPKTSLO(fcn_idx);
4609                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
4610                                 I40E_GLPES_PFIP6RXOCTSLO(fcn_idx);
4611                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
4612                                 I40E_GLPES_PFIP6RXPKTSLO(fcn_idx);
4613                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
4614                                 I40E_GLPES_PFIP6RXFRAGSLO(fcn_idx);
4615                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
4616                                 I40E_GLPES_PFIP6RXMCPKTSLO(fcn_idx);
4617                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
4618                                 I40E_GLPES_PFIP6TXOCTSLO(fcn_idx);
4619                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
4620                                 I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
4621                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
4622                                 I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
4623                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
4624                                 I40E_GLPES_PFIP6TXFRAGSLO(fcn_idx);
4625                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
4626                                 I40E_GLPES_PFTCPRXSEGSLO(fcn_idx);
4627                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
4628                                 I40E_GLPES_PFTCPTXSEGLO(fcn_idx);
4629                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
4630                                 I40E_GLPES_PFRDMARXRDSLO(fcn_idx);
4631                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
4632                                 I40E_GLPES_PFRDMARXSNDSLO(fcn_idx);
4633                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
4634                                 I40E_GLPES_PFRDMARXWRSLO(fcn_idx);
4635                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
4636                                 I40E_GLPES_PFRDMATXRDSLO(fcn_idx);
4637                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
4638                                 I40E_GLPES_PFRDMATXSNDSLO(fcn_idx);
4639                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
4640                                 I40E_GLPES_PFRDMATXWRSLO(fcn_idx);
4641                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
4642                                 I40E_GLPES_PFRDMAVBNDLO(fcn_idx);
4643                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
4644                                 I40E_GLPES_PFRDMAVINVLO(fcn_idx);
4645         } else {
4646                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
4647                                 I40E_GLPES_VFIP4RXDISCARD(fcn_idx);
4648                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
4649                                 I40E_GLPES_VFIP4RXTRUNC(fcn_idx);
4650                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
4651                                 I40E_GLPES_VFIP4TXNOROUTE(fcn_idx);
4652                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
4653                                 I40E_GLPES_VFIP6RXDISCARD(fcn_idx);
4654                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
4655                                 I40E_GLPES_VFIP6RXTRUNC(fcn_idx);
4656                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
4657                                 I40E_GLPES_VFIP6TXNOROUTE(fcn_idx);
4658                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
4659                                 I40E_GLPES_VFTCPRTXSEG(fcn_idx);
4660                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
4661                                 I40E_GLPES_VFTCPRXOPTERR(fcn_idx);
4662                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
4663                                 I40E_GLPES_VFTCPRXPROTOERR(fcn_idx);
4664
4665                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
4666                                 I40E_GLPES_VFIP4RXOCTSLO(fcn_idx);
4667                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
4668                                 I40E_GLPES_VFIP4RXPKTSLO(fcn_idx);
4669                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
4670                                 I40E_GLPES_VFIP4RXFRAGSLO(fcn_idx);
4671                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
4672                                 I40E_GLPES_VFIP4RXMCPKTSLO(fcn_idx);
4673                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
4674                                 I40E_GLPES_VFIP4TXOCTSLO(fcn_idx);
4675                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
4676                                 I40E_GLPES_VFIP4TXPKTSLO(fcn_idx);
4677                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
4678                                 I40E_GLPES_VFIP4TXFRAGSLO(fcn_idx);
4679                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
4680                                 I40E_GLPES_VFIP4TXMCPKTSLO(fcn_idx);
4681                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
4682                                 I40E_GLPES_VFIP6RXOCTSLO(fcn_idx);
4683                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
4684                                 I40E_GLPES_VFIP6RXPKTSLO(fcn_idx);
4685                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
4686                                 I40E_GLPES_VFIP6RXFRAGSLO(fcn_idx);
4687                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
4688                                 I40E_GLPES_VFIP6RXMCPKTSLO(fcn_idx);
4689                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
4690                                 I40E_GLPES_VFIP6TXOCTSLO(fcn_idx);
4691                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
4692                                 I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
4693                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
4694                                 I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
4695                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
4696                                 I40E_GLPES_VFIP6TXFRAGSLO(fcn_idx);
4697                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
4698                                 I40E_GLPES_VFTCPRXSEGSLO(fcn_idx);
4699                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
4700                                 I40E_GLPES_VFTCPTXSEGLO(fcn_idx);
4701                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
4702                                 I40E_GLPES_VFRDMARXRDSLO(fcn_idx);
4703                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
4704                                 I40E_GLPES_VFRDMARXSNDSLO(fcn_idx);
4705                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
4706                                 I40E_GLPES_VFRDMARXWRSLO(fcn_idx);
4707                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
4708                                 I40E_GLPES_VFRDMATXRDSLO(fcn_idx);
4709                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
4710                                 I40E_GLPES_VFRDMATXSNDSLO(fcn_idx);
4711                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
4712                                 I40E_GLPES_VFRDMATXWRSLO(fcn_idx);
4713                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
4714                                 I40E_GLPES_VFRDMAVBNDLO(fcn_idx);
4715                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
4716                                 I40E_GLPES_VFRDMAVINVLO(fcn_idx);
4717         }
4718
4719         for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
4720              stats_index++) {
4721                 stats_reg_offset = stats_table->stats_offset_64[stats_index];
4722                 last_rd_stats->stats_value_64[stats_index] =
4723                         readq(stats->hw->hw_addr + stats_reg_offset);
4724         }
4725
4726         for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
4727              stats_index++) {
4728                 stats_reg_offset = stats_table->stats_offset_32[stats_index];
4729                 last_rd_stats->stats_value_32[stats_index] =
4730                         i40iw_rd32(stats->hw, stats_reg_offset);
4731         }
4732 }
4733
4734 /**
4735  * i40iw_hw_stats_read_32 - Read 32-bit HW stats counters and accommodates for roll-overs.
4736  * @stat: pestat struct
4737  * @index: index in HW stats table which contains offset reg-addr
4738  * @value: hw stats value
4739  */
4740 void i40iw_hw_stats_read_32(struct i40iw_vsi_pestat *stats,
4741                             enum i40iw_hw_stats_index_32b index,
4742                             u64 *value)
4743 {
4744         struct i40iw_dev_hw_stats_offsets *stats_table =
4745                 &stats->hw_stats_offsets;
4746         struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
4747         struct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats;
4748         u64 new_stats_value = 0;
4749         u32 stats_reg_offset = stats_table->stats_offset_32[index];
4750
4751         new_stats_value = i40iw_rd32(stats->hw, stats_reg_offset);
4752         /*roll-over case */
4753         if (new_stats_value < last_rd_stats->stats_value_32[index])
4754                 hw_stats->stats_value_32[index] += new_stats_value;
4755         else
4756                 hw_stats->stats_value_32[index] +=
4757                         new_stats_value - last_rd_stats->stats_value_32[index];
4758         last_rd_stats->stats_value_32[index] = new_stats_value;
4759         *value = hw_stats->stats_value_32[index];
4760 }
4761
4762 /**
4763  * i40iw_hw_stats_read_64 - Read HW stats counters (greater than 32-bit) and accommodates for roll-overs.
4764  * @stats: pestat struct
4765  * @index: index in HW stats table which contains offset reg-addr
4766  * @value: hw stats value
4767  */
4768 void i40iw_hw_stats_read_64(struct i40iw_vsi_pestat *stats,
4769                             enum i40iw_hw_stats_index_64b index,
4770                             u64 *value)
4771 {
4772         struct i40iw_dev_hw_stats_offsets *stats_table =
4773                 &stats->hw_stats_offsets;
4774         struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
4775         struct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats;
4776         u64 new_stats_value = 0;
4777         u32 stats_reg_offset = stats_table->stats_offset_64[index];
4778
4779         new_stats_value = readq(stats->hw->hw_addr + stats_reg_offset);
4780         /*roll-over case */
4781         if (new_stats_value < last_rd_stats->stats_value_64[index])
4782                 hw_stats->stats_value_64[index] += new_stats_value;
4783         else
4784                 hw_stats->stats_value_64[index] +=
4785                         new_stats_value - last_rd_stats->stats_value_64[index];
4786         last_rd_stats->stats_value_64[index] = new_stats_value;
4787         *value = hw_stats->stats_value_64[index];
4788 }
4789
4790 /**
4791  * i40iw_hw_stats_read_all - read all HW stat counters
4792  * @stats: pestat struct
4793  * @stats_values: hw stats structure
4794  *
4795  * Read all the HW stat counters and populates hw_stats structure
4796  * of passed-in vsi's pestat as well as copy created in stat_values.
4797  */
4798 void i40iw_hw_stats_read_all(struct i40iw_vsi_pestat *stats,
4799                              struct i40iw_dev_hw_stats *stats_values)
4800 {
4801         u32 stats_index;
4802         unsigned long flags;
4803
4804         spin_lock_irqsave(&stats->lock, flags);
4805
4806         for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
4807              stats_index++)
4808                 i40iw_hw_stats_read_32(stats, stats_index,
4809                                        &stats_values->stats_value_32[stats_index]);
4810         for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
4811              stats_index++)
4812                 i40iw_hw_stats_read_64(stats, stats_index,
4813                                        &stats_values->stats_value_64[stats_index]);
4814         spin_unlock_irqrestore(&stats->lock, flags);
4815 }
4816
4817 /**
4818  * i40iw_hw_stats_refresh_all - Update all HW stats structs
4819  * @stats: pestat struct
4820  *
4821  * Read all the HW stats counters to refresh values in hw_stats structure
4822  * of passed-in dev's pestat
4823  */
4824 void i40iw_hw_stats_refresh_all(struct i40iw_vsi_pestat *stats)
4825 {
4826         u64 stats_value;
4827         u32 stats_index;
4828         unsigned long flags;
4829
4830         spin_lock_irqsave(&stats->lock, flags);
4831
4832         for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
4833              stats_index++)
4834                 i40iw_hw_stats_read_32(stats, stats_index, &stats_value);
4835         for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
4836              stats_index++)
4837                 i40iw_hw_stats_read_64(stats, stats_index, &stats_value);
4838         spin_unlock_irqrestore(&stats->lock, flags);
4839 }
4840
4841 /**
4842  * i40iw_get_fcn_id - Return the function id
4843  * @dev: pointer to the device
4844  */
4845 static u8 i40iw_get_fcn_id(struct i40iw_sc_dev *dev)
4846 {
4847         u8 fcn_id = I40IW_INVALID_FCN_ID;
4848         u8 i;
4849
4850         for (i = I40IW_FIRST_NON_PF_STAT; i < I40IW_MAX_STATS_COUNT; i++)
4851                 if (!dev->fcn_id_array[i]) {
4852                         fcn_id = i;
4853                         dev->fcn_id_array[i] = true;
4854                         break;
4855                 }
4856         return fcn_id;
4857 }
4858
4859 /**
4860  * i40iw_vsi_stats_init - Initialize the vsi statistics
4861  * @vsi: pointer to the vsi structure
4862  * @info: The info structure used for initialization
4863  */
4864 enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_stats_info *info)
4865 {
4866         u8 fcn_id = info->fcn_id;
4867
4868         if (info->alloc_fcn_id)
4869                 fcn_id = i40iw_get_fcn_id(vsi->dev);
4870
4871         if (fcn_id == I40IW_INVALID_FCN_ID)
4872                 return I40IW_ERR_NOT_READY;
4873
4874         vsi->pestat = info->pestat;
4875         vsi->pestat->hw = vsi->dev->hw;
4876
4877         if (info->stats_initialize) {
4878                 i40iw_hw_stats_init(vsi->pestat, fcn_id, true);
4879                 spin_lock_init(&vsi->pestat->lock);
4880                 i40iw_hw_stats_start_timer(vsi);
4881         }
4882         vsi->stats_fcn_id_alloc = info->alloc_fcn_id;
4883         vsi->fcn_id = fcn_id;
4884         return I40IW_SUCCESS;
4885 }
4886
4887 /**
4888  * i40iw_vsi_stats_free - Free the vsi stats
4889  * @vsi: pointer to the vsi structure
4890  */
4891 void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi)
4892 {
4893         u8 fcn_id = vsi->fcn_id;
4894
4895         if (vsi->stats_fcn_id_alloc && fcn_id < I40IW_MAX_STATS_COUNT)
4896                 vsi->dev->fcn_id_array[fcn_id] = false;
4897         i40iw_hw_stats_stop_timer(vsi);
4898 }
4899
4900 static struct i40iw_cqp_ops iw_cqp_ops = {
4901         .cqp_init = i40iw_sc_cqp_init,
4902         .cqp_create = i40iw_sc_cqp_create,
4903         .cqp_post_sq = i40iw_sc_cqp_post_sq,
4904         .cqp_get_next_send_wqe = i40iw_sc_cqp_get_next_send_wqe,
4905         .cqp_destroy = i40iw_sc_cqp_destroy,
4906         .poll_for_cqp_op_done = i40iw_sc_poll_for_cqp_op_done
4907 };
4908
4909 static struct i40iw_ccq_ops iw_ccq_ops = {
4910         .ccq_init = i40iw_sc_ccq_init,
4911         .ccq_create = i40iw_sc_ccq_create,
4912         .ccq_destroy = i40iw_sc_ccq_destroy,
4913         .ccq_create_done = i40iw_sc_ccq_create_done,
4914         .ccq_get_cqe_info = i40iw_sc_ccq_get_cqe_info,
4915         .ccq_arm = i40iw_sc_ccq_arm
4916 };
4917
4918 static struct i40iw_ceq_ops iw_ceq_ops = {
4919         .ceq_init = i40iw_sc_ceq_init,
4920         .ceq_create = i40iw_sc_ceq_create,
4921         .cceq_create_done = i40iw_sc_cceq_create_done,
4922         .cceq_destroy_done = i40iw_sc_cceq_destroy_done,
4923         .cceq_create = i40iw_sc_cceq_create,
4924         .ceq_destroy = i40iw_sc_ceq_destroy,
4925         .process_ceq = i40iw_sc_process_ceq
4926 };
4927
4928 static struct i40iw_aeq_ops iw_aeq_ops = {
4929         .aeq_init = i40iw_sc_aeq_init,
4930         .aeq_create = i40iw_sc_aeq_create,
4931         .aeq_destroy = i40iw_sc_aeq_destroy,
4932         .get_next_aeqe = i40iw_sc_get_next_aeqe,
4933         .repost_aeq_entries = i40iw_sc_repost_aeq_entries,
4934         .aeq_create_done = i40iw_sc_aeq_create_done,
4935         .aeq_destroy_done = i40iw_sc_aeq_destroy_done
4936 };
4937
4938 /* iwarp pd ops */
4939 static struct i40iw_pd_ops iw_pd_ops = {
4940         .pd_init = i40iw_sc_pd_init,
4941 };
4942
4943 static struct i40iw_priv_qp_ops iw_priv_qp_ops = {
4944         .qp_init = i40iw_sc_qp_init,
4945         .qp_create = i40iw_sc_qp_create,
4946         .qp_modify = i40iw_sc_qp_modify,
4947         .qp_destroy = i40iw_sc_qp_destroy,
4948         .qp_flush_wqes = i40iw_sc_qp_flush_wqes,
4949         .qp_upload_context = i40iw_sc_qp_upload_context,
4950         .qp_setctx = i40iw_sc_qp_setctx,
4951         .qp_send_lsmm = i40iw_sc_send_lsmm,
4952         .qp_send_lsmm_nostag = i40iw_sc_send_lsmm_nostag,
4953         .qp_send_rtt = i40iw_sc_send_rtt,
4954         .qp_post_wqe0 = i40iw_sc_post_wqe0,
4955         .iw_mr_fast_register = i40iw_sc_mr_fast_register
4956 };
4957
4958 static struct i40iw_priv_cq_ops iw_priv_cq_ops = {
4959         .cq_init = i40iw_sc_cq_init,
4960         .cq_create = i40iw_sc_cq_create,
4961         .cq_destroy = i40iw_sc_cq_destroy,
4962         .cq_modify = i40iw_sc_cq_modify,
4963 };
4964
4965 static struct i40iw_mr_ops iw_mr_ops = {
4966         .alloc_stag = i40iw_sc_alloc_stag,
4967         .mr_reg_non_shared = i40iw_sc_mr_reg_non_shared,
4968         .mr_reg_shared = i40iw_sc_mr_reg_shared,
4969         .dealloc_stag = i40iw_sc_dealloc_stag,
4970         .query_stag = i40iw_sc_query_stag,
4971         .mw_alloc = i40iw_sc_mw_alloc
4972 };
4973
4974 static struct i40iw_cqp_misc_ops iw_cqp_misc_ops = {
4975         .manage_push_page = i40iw_sc_manage_push_page,
4976         .manage_hmc_pm_func_table = i40iw_sc_manage_hmc_pm_func_table,
4977         .set_hmc_resource_profile = i40iw_sc_set_hmc_resource_profile,
4978         .commit_fpm_values = i40iw_sc_commit_fpm_values,
4979         .query_fpm_values = i40iw_sc_query_fpm_values,
4980         .static_hmc_pages_allocated = i40iw_sc_static_hmc_pages_allocated,
4981         .add_arp_cache_entry = i40iw_sc_add_arp_cache_entry,
4982         .del_arp_cache_entry = i40iw_sc_del_arp_cache_entry,
4983         .query_arp_cache_entry = i40iw_sc_query_arp_cache_entry,
4984         .manage_apbvt_entry = i40iw_sc_manage_apbvt_entry,
4985         .manage_qhash_table_entry = i40iw_sc_manage_qhash_table_entry,
4986         .alloc_local_mac_ipaddr_table_entry = i40iw_sc_alloc_local_mac_ipaddr_entry,
4987         .add_local_mac_ipaddr_entry = i40iw_sc_add_local_mac_ipaddr_entry,
4988         .del_local_mac_ipaddr_entry = i40iw_sc_del_local_mac_ipaddr_entry,
4989         .cqp_nop = i40iw_sc_cqp_nop,
4990         .commit_fpm_values_done = i40iw_sc_commit_fpm_values_done,
4991         .query_fpm_values_done = i40iw_sc_query_fpm_values_done,
4992         .manage_hmc_pm_func_table_done = i40iw_sc_manage_hmc_pm_func_table_done,
4993         .update_suspend_qp = i40iw_sc_suspend_qp,
4994         .update_resume_qp = i40iw_sc_resume_qp
4995 };
4996
4997 static struct i40iw_hmc_ops iw_hmc_ops = {
4998         .init_iw_hmc = i40iw_sc_init_iw_hmc,
4999         .parse_fpm_query_buf = i40iw_sc_parse_fpm_query_buf,
5000         .configure_iw_fpm = i40iw_sc_configure_iw_fpm,
5001         .parse_fpm_commit_buf = i40iw_sc_parse_fpm_commit_buf,
5002         .create_hmc_object = i40iw_sc_create_hmc_obj,
5003         .del_hmc_object = i40iw_sc_del_hmc_obj
5004 };
5005
5006 /**
5007  * i40iw_device_init - Initialize IWARP device
5008  * @dev: IWARP device pointer
5009  * @info: IWARP init info
5010  */
5011 enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
5012                                          struct i40iw_device_init_info *info)
5013 {
5014         u32 val;
5015         u32 vchnl_ver = 0;
5016         u16 hmc_fcn = 0;
5017         enum i40iw_status_code ret_code = 0;
5018         u8 db_size;
5019
5020         spin_lock_init(&dev->cqp_lock);
5021         INIT_LIST_HEAD(&dev->cqp_cmd_head);             /* for the cqp commands backlog. */
5022
5023         i40iw_device_init_uk(&dev->dev_uk);
5024
5025         dev->debug_mask = info->debug_mask;
5026
5027         dev->hmc_fn_id = info->hmc_fn_id;
5028         dev->exception_lan_queue = info->exception_lan_queue;
5029         dev->is_pf = info->is_pf;
5030
5031         dev->fpm_query_buf_pa = info->fpm_query_buf_pa;
5032         dev->fpm_query_buf = info->fpm_query_buf;
5033
5034         dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa;
5035         dev->fpm_commit_buf = info->fpm_commit_buf;
5036
5037         dev->hw = info->hw;
5038         dev->hw->hw_addr = info->bar0;
5039
5040         if (dev->is_pf) {
5041                 val = i40iw_rd32(dev->hw, I40E_GLPCI_DREVID);
5042                 dev->hw_rev = (u8)RS_32(val, I40E_GLPCI_DREVID_DEFAULT_REVID);
5043
5044                 val = i40iw_rd32(dev->hw, I40E_GLPCI_LBARCTRL);
5045                 db_size = (u8)RS_32(val, I40E_GLPCI_LBARCTRL_PE_DB_SIZE);
5046                 if ((db_size != I40IW_PE_DB_SIZE_4M) &&
5047                     (db_size != I40IW_PE_DB_SIZE_8M)) {
5048                         i40iw_debug(dev, I40IW_DEBUG_DEV,
5049                                     "%s: PE doorbell is not enabled in CSR val 0x%x\n",
5050                                     __func__, val);
5051                         ret_code = I40IW_ERR_PE_DOORBELL_NOT_ENABLED;
5052                         return ret_code;
5053                 }
5054                 dev->db_addr = dev->hw->hw_addr + I40IW_DB_ADDR_OFFSET;
5055                 dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_pf;
5056         } else {
5057                 dev->db_addr = dev->hw->hw_addr + I40IW_VF_DB_ADDR_OFFSET;
5058         }
5059
5060         dev->cqp_ops = &iw_cqp_ops;
5061         dev->ccq_ops = &iw_ccq_ops;
5062         dev->ceq_ops = &iw_ceq_ops;
5063         dev->aeq_ops = &iw_aeq_ops;
5064         dev->cqp_misc_ops = &iw_cqp_misc_ops;
5065         dev->iw_pd_ops = &iw_pd_ops;
5066         dev->iw_priv_qp_ops = &iw_priv_qp_ops;
5067         dev->iw_priv_cq_ops = &iw_priv_cq_ops;
5068         dev->mr_ops = &iw_mr_ops;
5069         dev->hmc_ops = &iw_hmc_ops;
5070         dev->vchnl_if.vchnl_send = info->vchnl_send;
5071         if (dev->vchnl_if.vchnl_send)
5072                 dev->vchnl_up = true;
5073         else
5074                 dev->vchnl_up = false;
5075         if (!dev->is_pf) {
5076                 dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_vf;
5077                 ret_code = i40iw_vchnl_vf_get_ver(dev, &vchnl_ver);
5078                 if (!ret_code) {
5079                         i40iw_debug(dev, I40IW_DEBUG_DEV,
5080                                     "%s: Get Channel version rc = 0x%0x, version is %u\n",
5081                                 __func__, ret_code, vchnl_ver);
5082                         ret_code = i40iw_vchnl_vf_get_hmc_fcn(dev, &hmc_fcn);
5083                         if (!ret_code) {
5084                                 i40iw_debug(dev, I40IW_DEBUG_DEV,
5085                                             "%s Get HMC function rc = 0x%0x, hmc fcn is %u\n",
5086                                             __func__, ret_code, hmc_fcn);
5087                                 dev->hmc_fn_id = (u8)hmc_fcn;
5088                         }
5089                 }
5090         }
5091         dev->iw_vf_cqp_ops = &iw_vf_cqp_ops;
5092
5093         return ret_code;
5094 }