Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / infiniband / hw / hns / hns_roce_hw_v2.c
1 /*
2  * Copyright (c) 2016-2017 Hisilicon Limited.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/acpi.h>
34 #include <linux/etherdevice.h>
35 #include <linux/interrupt.h>
36 #include <linux/kernel.h>
37 #include <linux/types.h>
38 #include <net/addrconf.h>
39 #include <rdma/ib_addr.h>
40 #include <rdma/ib_cache.h>
41 #include <rdma/ib_umem.h>
42 #include <rdma/uverbs_ioctl.h>
43
44 #include "hnae3.h"
45 #include "hns_roce_common.h"
46 #include "hns_roce_device.h"
47 #include "hns_roce_cmd.h"
48 #include "hns_roce_hem.h"
49 #include "hns_roce_hw_v2.h"
50
51 static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
52                             struct ib_sge *sg)
53 {
54         dseg->lkey = cpu_to_le32(sg->lkey);
55         dseg->addr = cpu_to_le64(sg->addr);
56         dseg->len  = cpu_to_le32(sg->length);
57 }
58
59 static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
60                          struct hns_roce_wqe_frmr_seg *fseg,
61                          const struct ib_reg_wr *wr)
62 {
63         struct hns_roce_mr *mr = to_hr_mr(wr->mr);
64
65         /* use ib_access_flags */
66         roce_set_bit(rc_sq_wqe->byte_4,
67                      V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
68                      wr->access & IB_ACCESS_MW_BIND ? 1 : 0);
69         roce_set_bit(rc_sq_wqe->byte_4,
70                      V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S,
71                      wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
72         roce_set_bit(rc_sq_wqe->byte_4,
73                      V2_RC_FRMR_WQE_BYTE_4_RR_S,
74                      wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0);
75         roce_set_bit(rc_sq_wqe->byte_4,
76                      V2_RC_FRMR_WQE_BYTE_4_RW_S,
77                      wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
78         roce_set_bit(rc_sq_wqe->byte_4,
79                      V2_RC_FRMR_WQE_BYTE_4_LW_S,
80                      wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
81
82         /* Data structure reuse may lead to confusion */
83         rc_sq_wqe->msg_len = cpu_to_le32(mr->pbl_ba & 0xffffffff);
84         rc_sq_wqe->inv_key = cpu_to_le32(mr->pbl_ba >> 32);
85
86         rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
87         rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
88         rc_sq_wqe->rkey = cpu_to_le32(wr->key);
89         rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
90
91         fseg->pbl_size = cpu_to_le32(mr->pbl_size);
92         roce_set_field(fseg->mode_buf_pg_sz,
93                        V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
94                        V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
95                        mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
96         roce_set_bit(fseg->mode_buf_pg_sz,
97                      V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
98 }
99
100 static void set_atomic_seg(struct hns_roce_wqe_atomic_seg *aseg,
101                            const struct ib_atomic_wr *wr)
102 {
103         if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
104                 aseg->fetchadd_swap_data = cpu_to_le64(wr->swap);
105                 aseg->cmp_data  = cpu_to_le64(wr->compare_add);
106         } else {
107                 aseg->fetchadd_swap_data = cpu_to_le64(wr->compare_add);
108                 aseg->cmp_data  = 0;
109         }
110 }
111
112 static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
113                            unsigned int *sge_ind)
114 {
115         struct hns_roce_v2_wqe_data_seg *dseg;
116         struct ib_sge *sg;
117         int num_in_wqe = 0;
118         int extend_sge_num;
119         int fi_sge_num;
120         int se_sge_num;
121         int shift;
122         int i;
123
124         if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
125                 num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE;
126         extend_sge_num = wr->num_sge - num_in_wqe;
127         sg = wr->sg_list + num_in_wqe;
128         shift = qp->hr_buf.page_shift;
129
130         /*
131          * Check whether wr->num_sge sges are in the same page. If not, we
132          * should calculate how many sges in the first page and the second
133          * page.
134          */
135         dseg = get_send_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1));
136         fi_sge_num = (round_up((uintptr_t)dseg, 1 << shift) -
137                       (uintptr_t)dseg) /
138                       sizeof(struct hns_roce_v2_wqe_data_seg);
139         if (extend_sge_num > fi_sge_num) {
140                 se_sge_num = extend_sge_num - fi_sge_num;
141                 for (i = 0; i < fi_sge_num; i++) {
142                         set_data_seg_v2(dseg++, sg + i);
143                         (*sge_ind)++;
144                 }
145                 dseg = get_send_extend_sge(qp,
146                                            (*sge_ind) & (qp->sge.sge_cnt - 1));
147                 for (i = 0; i < se_sge_num; i++) {
148                         set_data_seg_v2(dseg++, sg + fi_sge_num + i);
149                         (*sge_ind)++;
150                 }
151         } else {
152                 for (i = 0; i < extend_sge_num; i++) {
153                         set_data_seg_v2(dseg++, sg + i);
154                         (*sge_ind)++;
155                 }
156         }
157 }
158
159 static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
160                              struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
161                              void *wqe, unsigned int *sge_ind,
162                              const struct ib_send_wr **bad_wr)
163 {
164         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
165         struct hns_roce_v2_wqe_data_seg *dseg = wqe;
166         struct hns_roce_qp *qp = to_hr_qp(ibqp);
167         int i;
168
169         if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
170                 if (le32_to_cpu(rc_sq_wqe->msg_len) >
171                     hr_dev->caps.max_sq_inline) {
172                         *bad_wr = wr;
173                         dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal",
174                                 rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline);
175                         return -EINVAL;
176                 }
177
178                 if (wr->opcode == IB_WR_RDMA_READ) {
179                         *bad_wr =  wr;
180                         dev_err(hr_dev->dev, "Not support inline data!\n");
181                         return -EINVAL;
182                 }
183
184                 for (i = 0; i < wr->num_sge; i++) {
185                         memcpy(wqe, ((void *)wr->sg_list[i].addr),
186                                wr->sg_list[i].length);
187                         wqe += wr->sg_list[i].length;
188                 }
189
190                 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
191                              1);
192         } else {
193                 if (wr->num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
194                         for (i = 0; i < wr->num_sge; i++) {
195                                 if (likely(wr->sg_list[i].length)) {
196                                         set_data_seg_v2(dseg, wr->sg_list + i);
197                                         dseg++;
198                                 }
199                         }
200                 } else {
201                         roce_set_field(rc_sq_wqe->byte_20,
202                                      V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
203                                      V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
204                                      (*sge_ind) & (qp->sge.sge_cnt - 1));
205
206                         for (i = 0; i < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
207                                 if (likely(wr->sg_list[i].length)) {
208                                         set_data_seg_v2(dseg, wr->sg_list + i);
209                                         dseg++;
210                                 }
211                         }
212
213                         set_extend_sge(qp, wr, sge_ind);
214                 }
215
216                 roce_set_field(rc_sq_wqe->byte_16,
217                                V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
218                                V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, wr->num_sge);
219         }
220
221         return 0;
222 }
223
224 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
225                                  const struct ib_qp_attr *attr,
226                                  int attr_mask, enum ib_qp_state cur_state,
227                                  enum ib_qp_state new_state);
228
229 static int hns_roce_v2_post_send(struct ib_qp *ibqp,
230                                  const struct ib_send_wr *wr,
231                                  const struct ib_send_wr **bad_wr)
232 {
233         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
234         struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
235         struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
236         struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
237         struct hns_roce_qp *qp = to_hr_qp(ibqp);
238         struct hns_roce_wqe_frmr_seg *fseg;
239         struct device *dev = hr_dev->dev;
240         struct hns_roce_v2_db sq_db;
241         struct ib_qp_attr attr;
242         unsigned int sge_ind = 0;
243         unsigned int owner_bit;
244         unsigned long flags;
245         unsigned int ind;
246         void *wqe = NULL;
247         bool loopback;
248         int attr_mask;
249         u32 tmp_len;
250         int ret = 0;
251         u32 hr_op;
252         u8 *smac;
253         int nreq;
254         int i;
255
256         if (unlikely(ibqp->qp_type != IB_QPT_RC &&
257                      ibqp->qp_type != IB_QPT_GSI &&
258                      ibqp->qp_type != IB_QPT_UD)) {
259                 dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
260                 *bad_wr = wr;
261                 return -EOPNOTSUPP;
262         }
263
264         if (unlikely(qp->state == IB_QPS_RESET || qp->state == IB_QPS_INIT ||
265                      qp->state == IB_QPS_RTR)) {
266                 dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state);
267                 *bad_wr = wr;
268                 return -EINVAL;
269         }
270
271         spin_lock_irqsave(&qp->sq.lock, flags);
272         ind = qp->sq_next_wqe;
273         sge_ind = qp->next_sge;
274
275         for (nreq = 0; wr; ++nreq, wr = wr->next) {
276                 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
277                         ret = -ENOMEM;
278                         *bad_wr = wr;
279                         goto out;
280                 }
281
282                 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
283                         dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
284                                 wr->num_sge, qp->sq.max_gs);
285                         ret = -EINVAL;
286                         *bad_wr = wr;
287                         goto out;
288                 }
289
290                 wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
291                 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
292                                                                       wr->wr_id;
293
294                 owner_bit =
295                        ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
296                 tmp_len = 0;
297
298                 /* Corresponding to the QP type, wqe process separately */
299                 if (ibqp->qp_type == IB_QPT_GSI) {
300                         ud_sq_wqe = wqe;
301                         memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
302
303                         roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
304                                        V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
305                         roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
306                                        V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
307                         roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
308                                        V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
309                         roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
310                                        V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
311                         roce_set_field(ud_sq_wqe->byte_48,
312                                        V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
313                                        V2_UD_SEND_WQE_BYTE_48_DMAC_4_S,
314                                        ah->av.mac[4]);
315                         roce_set_field(ud_sq_wqe->byte_48,
316                                        V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
317                                        V2_UD_SEND_WQE_BYTE_48_DMAC_5_S,
318                                        ah->av.mac[5]);
319
320                         /* MAC loopback */
321                         smac = (u8 *)hr_dev->dev_addr[qp->port];
322                         loopback = ether_addr_equal_unaligned(ah->av.mac,
323                                                               smac) ? 1 : 0;
324
325                         roce_set_bit(ud_sq_wqe->byte_40,
326                                      V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
327
328                         roce_set_field(ud_sq_wqe->byte_4,
329                                        V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
330                                        V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
331                                        HNS_ROCE_V2_WQE_OP_SEND);
332
333                         for (i = 0; i < wr->num_sge; i++)
334                                 tmp_len += wr->sg_list[i].length;
335
336                         ud_sq_wqe->msg_len =
337                          cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len);
338
339                         switch (wr->opcode) {
340                         case IB_WR_SEND_WITH_IMM:
341                         case IB_WR_RDMA_WRITE_WITH_IMM:
342                                 ud_sq_wqe->immtdata =
343                                       cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
344                                 break;
345                         default:
346                                 ud_sq_wqe->immtdata = 0;
347                                 break;
348                         }
349
350                         /* Set sig attr */
351                         roce_set_bit(ud_sq_wqe->byte_4,
352                                    V2_UD_SEND_WQE_BYTE_4_CQE_S,
353                                    (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
354
355                         /* Set se attr */
356                         roce_set_bit(ud_sq_wqe->byte_4,
357                                   V2_UD_SEND_WQE_BYTE_4_SE_S,
358                                   (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
359
360                         roce_set_bit(ud_sq_wqe->byte_4,
361                                      V2_UD_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
362
363                         roce_set_field(ud_sq_wqe->byte_16,
364                                        V2_UD_SEND_WQE_BYTE_16_PD_M,
365                                        V2_UD_SEND_WQE_BYTE_16_PD_S,
366                                        to_hr_pd(ibqp->pd)->pdn);
367
368                         roce_set_field(ud_sq_wqe->byte_16,
369                                        V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
370                                        V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S,
371                                        wr->num_sge);
372
373                         roce_set_field(ud_sq_wqe->byte_20,
374                                      V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
375                                      V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
376                                      sge_ind & (qp->sge.sge_cnt - 1));
377
378                         roce_set_field(ud_sq_wqe->byte_24,
379                                        V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
380                                        V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
381                         ud_sq_wqe->qkey =
382                              cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
383                              qp->qkey : ud_wr(wr)->remote_qkey);
384                         roce_set_field(ud_sq_wqe->byte_32,
385                                        V2_UD_SEND_WQE_BYTE_32_DQPN_M,
386                                        V2_UD_SEND_WQE_BYTE_32_DQPN_S,
387                                        ud_wr(wr)->remote_qpn);
388
389                         roce_set_field(ud_sq_wqe->byte_36,
390                                        V2_UD_SEND_WQE_BYTE_36_VLAN_M,
391                                        V2_UD_SEND_WQE_BYTE_36_VLAN_S,
392                                        le16_to_cpu(ah->av.vlan));
393                         roce_set_field(ud_sq_wqe->byte_36,
394                                        V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
395                                        V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
396                                        ah->av.hop_limit);
397                         roce_set_field(ud_sq_wqe->byte_36,
398                                        V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
399                                        V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
400                                        ah->av.sl_tclass_flowlabel >>
401                                        HNS_ROCE_TCLASS_SHIFT);
402                         roce_set_field(ud_sq_wqe->byte_40,
403                                        V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
404                                        V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S,
405                                        ah->av.sl_tclass_flowlabel &
406                                        HNS_ROCE_FLOW_LABEL_MASK);
407                         roce_set_field(ud_sq_wqe->byte_40,
408                                        V2_UD_SEND_WQE_BYTE_40_SL_M,
409                                        V2_UD_SEND_WQE_BYTE_40_SL_S,
410                                       le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
411                                       HNS_ROCE_SL_SHIFT);
412                         roce_set_field(ud_sq_wqe->byte_40,
413                                        V2_UD_SEND_WQE_BYTE_40_PORTN_M,
414                                        V2_UD_SEND_WQE_BYTE_40_PORTN_S,
415                                        qp->port);
416
417                         roce_set_bit(ud_sq_wqe->byte_40,
418                                      V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
419                                      ah->av.vlan_en ? 1 : 0);
420                         roce_set_field(ud_sq_wqe->byte_48,
421                                        V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
422                                        V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S,
423                                        hns_get_gid_index(hr_dev, qp->phy_port,
424                                                          ah->av.gid_index));
425
426                         memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
427                                GID_LEN_V2);
428
429                         set_extend_sge(qp, wr, &sge_ind);
430                         ind++;
431                 } else if (ibqp->qp_type == IB_QPT_RC) {
432                         rc_sq_wqe = wqe;
433                         memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
434                         for (i = 0; i < wr->num_sge; i++)
435                                 tmp_len += wr->sg_list[i].length;
436
437                         rc_sq_wqe->msg_len =
438                          cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len);
439
440                         switch (wr->opcode) {
441                         case IB_WR_SEND_WITH_IMM:
442                         case IB_WR_RDMA_WRITE_WITH_IMM:
443                                 rc_sq_wqe->immtdata =
444                                       cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
445                                 break;
446                         case IB_WR_SEND_WITH_INV:
447                                 rc_sq_wqe->inv_key =
448                                         cpu_to_le32(wr->ex.invalidate_rkey);
449                                 break;
450                         default:
451                                 rc_sq_wqe->immtdata = 0;
452                                 break;
453                         }
454
455                         roce_set_bit(rc_sq_wqe->byte_4,
456                                      V2_RC_SEND_WQE_BYTE_4_FENCE_S,
457                                      (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
458
459                         roce_set_bit(rc_sq_wqe->byte_4,
460                                   V2_RC_SEND_WQE_BYTE_4_SE_S,
461                                   (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
462
463                         roce_set_bit(rc_sq_wqe->byte_4,
464                                    V2_RC_SEND_WQE_BYTE_4_CQE_S,
465                                    (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
466
467                         roce_set_bit(rc_sq_wqe->byte_4,
468                                      V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
469
470                         wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
471                         switch (wr->opcode) {
472                         case IB_WR_RDMA_READ:
473                                 hr_op = HNS_ROCE_V2_WQE_OP_RDMA_READ;
474                                 rc_sq_wqe->rkey =
475                                         cpu_to_le32(rdma_wr(wr)->rkey);
476                                 rc_sq_wqe->va =
477                                         cpu_to_le64(rdma_wr(wr)->remote_addr);
478                                 break;
479                         case IB_WR_RDMA_WRITE:
480                                 hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE;
481                                 rc_sq_wqe->rkey =
482                                         cpu_to_le32(rdma_wr(wr)->rkey);
483                                 rc_sq_wqe->va =
484                                         cpu_to_le64(rdma_wr(wr)->remote_addr);
485                                 break;
486                         case IB_WR_RDMA_WRITE_WITH_IMM:
487                                 hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM;
488                                 rc_sq_wqe->rkey =
489                                         cpu_to_le32(rdma_wr(wr)->rkey);
490                                 rc_sq_wqe->va =
491                                         cpu_to_le64(rdma_wr(wr)->remote_addr);
492                                 break;
493                         case IB_WR_SEND:
494                                 hr_op = HNS_ROCE_V2_WQE_OP_SEND;
495                                 break;
496                         case IB_WR_SEND_WITH_INV:
497                                 hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_INV;
498                                 break;
499                         case IB_WR_SEND_WITH_IMM:
500                                 hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM;
501                                 break;
502                         case IB_WR_LOCAL_INV:
503                                 hr_op = HNS_ROCE_V2_WQE_OP_LOCAL_INV;
504                                 roce_set_bit(rc_sq_wqe->byte_4,
505                                                V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
506                                 rc_sq_wqe->inv_key =
507                                             cpu_to_le32(wr->ex.invalidate_rkey);
508                                 break;
509                         case IB_WR_REG_MR:
510                                 hr_op = HNS_ROCE_V2_WQE_OP_FAST_REG_PMR;
511                                 fseg = wqe;
512                                 set_frmr_seg(rc_sq_wqe, fseg, reg_wr(wr));
513                                 break;
514                         case IB_WR_ATOMIC_CMP_AND_SWP:
515                                 hr_op = HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP;
516                                 rc_sq_wqe->rkey =
517                                         cpu_to_le32(atomic_wr(wr)->rkey);
518                                 rc_sq_wqe->va =
519                                         cpu_to_le64(atomic_wr(wr)->remote_addr);
520                                 break;
521                         case IB_WR_ATOMIC_FETCH_AND_ADD:
522                                 hr_op = HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD;
523                                 rc_sq_wqe->rkey =
524                                         cpu_to_le32(atomic_wr(wr)->rkey);
525                                 rc_sq_wqe->va =
526                                         cpu_to_le64(atomic_wr(wr)->remote_addr);
527                                 break;
528                         case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
529                                 hr_op =
530                                        HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP;
531                                 break;
532                         case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
533                                 hr_op =
534                                       HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD;
535                                 break;
536                         default:
537                                 hr_op = HNS_ROCE_V2_WQE_OP_MASK;
538                                 break;
539                         }
540
541                         roce_set_field(rc_sq_wqe->byte_4,
542                                        V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
543                                        V2_RC_SEND_WQE_BYTE_4_OPCODE_S, hr_op);
544
545                         if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
546                             wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
547                                 struct hns_roce_v2_wqe_data_seg *dseg;
548
549                                 dseg = wqe;
550                                 set_data_seg_v2(dseg, wr->sg_list);
551                                 wqe += sizeof(struct hns_roce_v2_wqe_data_seg);
552                                 set_atomic_seg(wqe, atomic_wr(wr));
553                                 roce_set_field(rc_sq_wqe->byte_16,
554                                                V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
555                                                V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
556                                                wr->num_sge);
557                         } else if (wr->opcode != IB_WR_REG_MR) {
558                                 ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe,
559                                                         wqe, &sge_ind, bad_wr);
560                                 if (ret)
561                                         goto out;
562                         }
563
564                         ind++;
565                 } else {
566                         dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
567                         spin_unlock_irqrestore(&qp->sq.lock, flags);
568                         *bad_wr = wr;
569                         return -EOPNOTSUPP;
570                 }
571         }
572
573 out:
574         if (likely(nreq)) {
575                 qp->sq.head += nreq;
576                 /* Memory barrier */
577                 wmb();
578
579                 sq_db.byte_4 = 0;
580                 sq_db.parameter = 0;
581
582                 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
583                                V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
584                 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
585                                V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
586                 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
587                                V2_DB_PARAMETER_IDX_S,
588                                qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
589                 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
590                                V2_DB_PARAMETER_SL_S, qp->sl);
591
592                 hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
593
594                 qp->sq_next_wqe = ind;
595                 qp->next_sge = sge_ind;
596
597                 if (qp->state == IB_QPS_ERR) {
598                         attr_mask = IB_QP_STATE;
599                         attr.qp_state = IB_QPS_ERR;
600
601                         ret = hns_roce_v2_modify_qp(&qp->ibqp, &attr, attr_mask,
602                                                     qp->state, IB_QPS_ERR);
603                         if (ret) {
604                                 spin_unlock_irqrestore(&qp->sq.lock, flags);
605                                 *bad_wr = wr;
606                                 return ret;
607                         }
608                 }
609         }
610
611         spin_unlock_irqrestore(&qp->sq.lock, flags);
612
613         return ret;
614 }
615
616 static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
617                                  const struct ib_recv_wr *wr,
618                                  const struct ib_recv_wr **bad_wr)
619 {
620         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
621         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
622         struct hns_roce_v2_wqe_data_seg *dseg;
623         struct hns_roce_rinl_sge *sge_list;
624         struct device *dev = hr_dev->dev;
625         struct ib_qp_attr attr;
626         unsigned long flags;
627         void *wqe = NULL;
628         int attr_mask;
629         int ret = 0;
630         int nreq;
631         int ind;
632         int i;
633
634         spin_lock_irqsave(&hr_qp->rq.lock, flags);
635         ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
636
637         if (hr_qp->state == IB_QPS_RESET) {
638                 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
639                 *bad_wr = wr;
640                 return -EINVAL;
641         }
642
643         for (nreq = 0; wr; ++nreq, wr = wr->next) {
644                 if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
645                         hr_qp->ibqp.recv_cq)) {
646                         ret = -ENOMEM;
647                         *bad_wr = wr;
648                         goto out;
649                 }
650
651                 if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
652                         dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
653                                 wr->num_sge, hr_qp->rq.max_gs);
654                         ret = -EINVAL;
655                         *bad_wr = wr;
656                         goto out;
657                 }
658
659                 wqe = get_recv_wqe(hr_qp, ind);
660                 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
661                 for (i = 0; i < wr->num_sge; i++) {
662                         if (!wr->sg_list[i].length)
663                                 continue;
664                         set_data_seg_v2(dseg, wr->sg_list + i);
665                         dseg++;
666                 }
667
668                 if (i < hr_qp->rq.max_gs) {
669                         dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
670                         dseg->addr = 0;
671                 }
672
673                 /* rq support inline data */
674                 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
675                         sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
676                         hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt =
677                                                                (u32)wr->num_sge;
678                         for (i = 0; i < wr->num_sge; i++) {
679                                 sge_list[i].addr =
680                                                (void *)(u64)wr->sg_list[i].addr;
681                                 sge_list[i].len = wr->sg_list[i].length;
682                         }
683                 }
684
685                 hr_qp->rq.wrid[ind] = wr->wr_id;
686
687                 ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
688         }
689
690 out:
691         if (likely(nreq)) {
692                 hr_qp->rq.head += nreq;
693                 /* Memory barrier */
694                 wmb();
695
696                 *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
697
698                 if (hr_qp->state == IB_QPS_ERR) {
699                         attr_mask = IB_QP_STATE;
700                         attr.qp_state = IB_QPS_ERR;
701
702                         ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr,
703                                                     attr_mask, hr_qp->state,
704                                                     IB_QPS_ERR);
705                         if (ret) {
706                                 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
707                                 *bad_wr = wr;
708                                 return ret;
709                         }
710                 }
711         }
712         spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
713
714         return ret;
715 }
716
717 static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
718                                       unsigned long instance_stage,
719                                       unsigned long reset_stage)
720 {
721         /* When hardware reset has been completed once or more, we should stop
722          * sending mailbox&cmq&doorbell to hardware. If now in .init_instance()
723          * function, we should exit with error. If now at HNAE3_INIT_CLIENT
724          * stage of soft reset process, we should exit with error, and then
725          * HNAE3_INIT_CLIENT related process can rollback the operation like
726          * notifing hardware to free resources, HNAE3_INIT_CLIENT related
727          * process will exit with error to notify NIC driver to reschedule soft
728          * reset process once again.
729          */
730         hr_dev->is_reset = true;
731         hr_dev->dis_db = true;
732
733         if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
734             instance_stage == HNS_ROCE_STATE_INIT)
735                 return CMD_RST_PRC_EBUSY;
736
737         return CMD_RST_PRC_SUCCESS;
738 }
739
740 static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
741                                         unsigned long instance_stage,
742                                         unsigned long reset_stage)
743 {
744         struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
745         struct hnae3_handle *handle = priv->handle;
746         const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
747
748         /* When hardware reset is detected, we should stop sending mailbox&cmq&
749          * doorbell to hardware. If now in .init_instance() function, we should
750          * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset
751          * process, we should exit with error, and then HNAE3_INIT_CLIENT
752          * related process can rollback the operation like notifing hardware to
753          * free resources, HNAE3_INIT_CLIENT related process will exit with
754          * error to notify NIC driver to reschedule soft reset process once
755          * again.
756          */
757         hr_dev->dis_db = true;
758         if (!ops->get_hw_reset_stat(handle))
759                 hr_dev->is_reset = true;
760
761         if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
762             instance_stage == HNS_ROCE_STATE_INIT)
763                 return CMD_RST_PRC_EBUSY;
764
765         return CMD_RST_PRC_SUCCESS;
766 }
767
768 static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
769 {
770         struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
771         struct hnae3_handle *handle = priv->handle;
772         const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
773
774         /* When software reset is detected at .init_instance() function, we
775          * should stop sending mailbox&cmq&doorbell to hardware, and exit
776          * with error.
777          */
778         hr_dev->dis_db = true;
779         if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
780                 hr_dev->is_reset = true;
781
782         return CMD_RST_PRC_EBUSY;
783 }
784
785 static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
786 {
787         struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
788         struct hnae3_handle *handle = priv->handle;
789         const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
790         unsigned long instance_stage;   /* the current instance stage */
791         unsigned long reset_stage;      /* the current reset stage */
792         unsigned long reset_cnt;
793         bool sw_resetting;
794         bool hw_resetting;
795
796         if (hr_dev->is_reset)
797                 return CMD_RST_PRC_SUCCESS;
798
799         /* Get information about reset from NIC driver or RoCE driver itself,
800          * the meaning of the following variables from NIC driver are described
801          * as below:
802          * reset_cnt -- The count value of completed hardware reset.
803          * hw_resetting -- Whether hardware device is resetting now.
804          * sw_resetting -- Whether NIC's software reset process is running now.
805          */
806         instance_stage = handle->rinfo.instance_state;
807         reset_stage = handle->rinfo.reset_state;
808         reset_cnt = ops->ae_dev_reset_cnt(handle);
809         hw_resetting = ops->get_hw_reset_stat(handle);
810         sw_resetting = ops->ae_dev_resetting(handle);
811
812         if (reset_cnt != hr_dev->reset_cnt)
813                 return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
814                                                   reset_stage);
815         else if (hw_resetting)
816                 return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
817                                                     reset_stage);
818         else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
819                 return hns_roce_v2_cmd_sw_resetting(hr_dev);
820
821         return 0;
822 }
823
824 static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
825 {
826         int ntu = ring->next_to_use;
827         int ntc = ring->next_to_clean;
828         int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
829
830         return ring->desc_num - used - 1;
831 }
832
833 static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
834                                    struct hns_roce_v2_cmq_ring *ring)
835 {
836         int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
837
838         ring->desc = kzalloc(size, GFP_KERNEL);
839         if (!ring->desc)
840                 return -ENOMEM;
841
842         ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
843                                              DMA_BIDIRECTIONAL);
844         if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
845                 ring->desc_dma_addr = 0;
846                 kfree(ring->desc);
847                 ring->desc = NULL;
848                 return -ENOMEM;
849         }
850
851         return 0;
852 }
853
854 static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
855                                    struct hns_roce_v2_cmq_ring *ring)
856 {
857         dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
858                          ring->desc_num * sizeof(struct hns_roce_cmq_desc),
859                          DMA_BIDIRECTIONAL);
860
861         ring->desc_dma_addr = 0;
862         kfree(ring->desc);
863 }
864
865 static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
866 {
867         struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
868         struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
869                                             &priv->cmq.csq : &priv->cmq.crq;
870
871         ring->flag = ring_type;
872         ring->next_to_clean = 0;
873         ring->next_to_use = 0;
874
875         return hns_roce_alloc_cmq_desc(hr_dev, ring);
876 }
877
878 static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
879 {
880         struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
881         struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
882                                             &priv->cmq.csq : &priv->cmq.crq;
883         dma_addr_t dma = ring->desc_dma_addr;
884
885         if (ring_type == TYPE_CSQ) {
886                 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
887                 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
888                            upper_32_bits(dma));
889                 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
890                           (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
891                            HNS_ROCE_CMQ_ENABLE);
892                 roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
893                 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
894         } else {
895                 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
896                 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
897                            upper_32_bits(dma));
898                 roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
899                           (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
900                            HNS_ROCE_CMQ_ENABLE);
901                 roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
902                 roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
903         }
904 }
905
906 static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
907 {
908         struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
909         int ret;
910
911         /* Setup the queue entries for command queue */
912         priv->cmq.csq.desc_num = CMD_CSQ_DESC_NUM;
913         priv->cmq.crq.desc_num = CMD_CRQ_DESC_NUM;
914
915         /* Setup the lock for command queue */
916         spin_lock_init(&priv->cmq.csq.lock);
917         spin_lock_init(&priv->cmq.crq.lock);
918
919         /* Setup Tx write back timeout */
920         priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
921
922         /* Init CSQ */
923         ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
924         if (ret) {
925                 dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
926                 return ret;
927         }
928
929         /* Init CRQ */
930         ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
931         if (ret) {
932                 dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
933                 goto err_crq;
934         }
935
936         /* Init CSQ REG */
937         hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
938
939         /* Init CRQ REG */
940         hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
941
942         return 0;
943
944 err_crq:
945         hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
946
947         return ret;
948 }
949
950 static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
951 {
952         struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
953
954         hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
955         hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
956 }
957
958 static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
959                                           enum hns_roce_opcode_type opcode,
960                                           bool is_read)
961 {
962         memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
963         desc->opcode = cpu_to_le16(opcode);
964         desc->flag =
965                 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
966         if (is_read)
967                 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
968         else
969                 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
970 }
971
972 static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
973 {
974         struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
975         u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
976
977         return head == priv->cmq.csq.next_to_use;
978 }
979
980 static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
981 {
982         struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
983         struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
984         struct hns_roce_cmq_desc *desc;
985         u16 ntc = csq->next_to_clean;
986         u32 head;
987         int clean = 0;
988
989         desc = &csq->desc[ntc];
990         head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
991         while (head != ntc) {
992                 memset(desc, 0, sizeof(*desc));
993                 ntc++;
994                 if (ntc == csq->desc_num)
995                         ntc = 0;
996                 desc = &csq->desc[ntc];
997                 clean++;
998         }
999         csq->next_to_clean = ntc;
1000
1001         return clean;
1002 }
1003
1004 static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1005                                struct hns_roce_cmq_desc *desc, int num)
1006 {
1007         struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
1008         struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
1009         struct hns_roce_cmq_desc *desc_to_use;
1010         bool complete = false;
1011         u32 timeout = 0;
1012         int handle = 0;
1013         u16 desc_ret;
1014         int ret = 0;
1015         int ntc;
1016
1017         spin_lock_bh(&csq->lock);
1018
1019         if (num > hns_roce_cmq_space(csq)) {
1020                 spin_unlock_bh(&csq->lock);
1021                 return -EBUSY;
1022         }
1023
1024         /*
1025          * Record the location of desc in the cmq for this time
1026          * which will be use for hardware to write back
1027          */
1028         ntc = csq->next_to_use;
1029
1030         while (handle < num) {
1031                 desc_to_use = &csq->desc[csq->next_to_use];
1032                 *desc_to_use = desc[handle];
1033                 dev_dbg(hr_dev->dev, "set cmq desc:\n");
1034                 csq->next_to_use++;
1035                 if (csq->next_to_use == csq->desc_num)
1036                         csq->next_to_use = 0;
1037                 handle++;
1038         }
1039
1040         /* Write to hardware */
1041         roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
1042
1043         /*
1044          * If the command is sync, wait for the firmware to write back,
1045          * if multi descriptors to be sent, use the first one to check
1046          */
1047         if ((desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
1048                 do {
1049                         if (hns_roce_cmq_csq_done(hr_dev))
1050                                 break;
1051                         udelay(1);
1052                         timeout++;
1053                 } while (timeout < priv->cmq.tx_timeout);
1054         }
1055
1056         if (hns_roce_cmq_csq_done(hr_dev)) {
1057                 complete = true;
1058                 handle = 0;
1059                 while (handle < num) {
1060                         /* get the result of hardware write back */
1061                         desc_to_use = &csq->desc[ntc];
1062                         desc[handle] = *desc_to_use;
1063                         dev_dbg(hr_dev->dev, "Get cmq desc:\n");
1064                         desc_ret = desc[handle].retval;
1065                         if (desc_ret == CMD_EXEC_SUCCESS)
1066                                 ret = 0;
1067                         else
1068                                 ret = -EIO;
1069                         priv->cmq.last_status = desc_ret;
1070                         ntc++;
1071                         handle++;
1072                         if (ntc == csq->desc_num)
1073                                 ntc = 0;
1074                 }
1075         }
1076
1077         if (!complete)
1078                 ret = -EAGAIN;
1079
1080         /* clean the command send queue */
1081         handle = hns_roce_cmq_csq_clean(hr_dev);
1082         if (handle != num)
1083                 dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
1084                          handle, num);
1085
1086         spin_unlock_bh(&csq->lock);
1087
1088         return ret;
1089 }
1090
1091 static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1092                              struct hns_roce_cmq_desc *desc, int num)
1093 {
1094         int retval;
1095         int ret;
1096
1097         ret = hns_roce_v2_rst_process_cmd(hr_dev);
1098         if (ret == CMD_RST_PRC_SUCCESS)
1099                 return 0;
1100         if (ret == CMD_RST_PRC_EBUSY)
1101                 return -EBUSY;
1102
1103         ret = __hns_roce_cmq_send(hr_dev, desc, num);
1104         if (ret) {
1105                 retval = hns_roce_v2_rst_process_cmd(hr_dev);
1106                 if (retval == CMD_RST_PRC_SUCCESS)
1107                         return 0;
1108                 else if (retval == CMD_RST_PRC_EBUSY)
1109                         return -EBUSY;
1110         }
1111
1112         return ret;
1113 }
1114
1115 static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
1116 {
1117         struct hns_roce_query_version *resp;
1118         struct hns_roce_cmq_desc desc;
1119         int ret;
1120
1121         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
1122         ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1123         if (ret)
1124                 return ret;
1125
1126         resp = (struct hns_roce_query_version *)desc.data;
1127         hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version);
1128         hr_dev->vendor_id = hr_dev->pci_dev->vendor;
1129
1130         return 0;
1131 }
1132
1133 static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
1134 {
1135         struct hns_roce_func_clear *resp;
1136         struct hns_roce_cmq_desc desc;
1137         unsigned long end;
1138         int ret;
1139
1140         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false);
1141         resp = (struct hns_roce_func_clear *)desc.data;
1142
1143         ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1144         if (ret) {
1145                 dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n",
1146                          ret);
1147                 return;
1148         }
1149
1150         msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL);
1151         end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS;
1152         while (end) {
1153                 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT);
1154                 end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT;
1155
1156                 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR,
1157                                               true);
1158
1159                 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1160                 if (ret)
1161                         continue;
1162
1163                 if (roce_get_bit(resp->func_done, FUNC_CLEAR_RST_FUN_DONE_S)) {
1164                         hr_dev->is_reset = true;
1165                         return;
1166                 }
1167         }
1168
1169         dev_err(hr_dev->dev, "Func clear fail.\n");
1170 }
1171
1172 static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
1173 {
1174         struct hns_roce_query_fw_info *resp;
1175         struct hns_roce_cmq_desc desc;
1176         int ret;
1177
1178         hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
1179         ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1180         if (ret)
1181                 return ret;
1182
1183         resp = (struct hns_roce_query_fw_info *)desc.data;
1184         hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
1185
1186         return 0;
1187 }
1188
1189 static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
1190 {
1191         struct hns_roce_cfg_global_param *req;
1192         struct hns_roce_cmq_desc desc;
1193
1194         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
1195                                       false);
1196
1197         req = (struct hns_roce_cfg_global_param *)desc.data;
1198         memset(req, 0, sizeof(*req));
1199         roce_set_field(req->time_cfg_udp_port,
1200                        CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
1201                        CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
1202         roce_set_field(req->time_cfg_udp_port,
1203                        CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
1204                        CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7);
1205
1206         return hns_roce_cmq_send(hr_dev, &desc, 1);
1207 }
1208
1209 static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
1210 {
1211         struct hns_roce_cmq_desc desc[2];
1212         struct hns_roce_pf_res_a *req_a;
1213         struct hns_roce_pf_res_b *req_b;
1214         int ret;
1215         int i;
1216
1217         for (i = 0; i < 2; i++) {
1218                 hns_roce_cmq_setup_basic_desc(&desc[i],
1219                                               HNS_ROCE_OPC_QUERY_PF_RES, true);
1220
1221                 if (i == 0)
1222                         desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1223                 else
1224                         desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1225         }
1226
1227         ret = hns_roce_cmq_send(hr_dev, desc, 2);
1228         if (ret)
1229                 return ret;
1230
1231         req_a = (struct hns_roce_pf_res_a *)desc[0].data;
1232         req_b = (struct hns_roce_pf_res_b *)desc[1].data;
1233
1234         hr_dev->caps.qpc_bt_num = roce_get_field(req_a->qpc_bt_idx_num,
1235                                                  PF_RES_DATA_1_PF_QPC_BT_NUM_M,
1236                                                  PF_RES_DATA_1_PF_QPC_BT_NUM_S);
1237         hr_dev->caps.srqc_bt_num = roce_get_field(req_a->srqc_bt_idx_num,
1238                                                 PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
1239                                                 PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
1240         hr_dev->caps.cqc_bt_num = roce_get_field(req_a->cqc_bt_idx_num,
1241                                                  PF_RES_DATA_3_PF_CQC_BT_NUM_M,
1242                                                  PF_RES_DATA_3_PF_CQC_BT_NUM_S);
1243         hr_dev->caps.mpt_bt_num = roce_get_field(req_a->mpt_bt_idx_num,
1244                                                  PF_RES_DATA_4_PF_MPT_BT_NUM_M,
1245                                                  PF_RES_DATA_4_PF_MPT_BT_NUM_S);
1246
1247         hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
1248                                              PF_RES_DATA_3_PF_SL_NUM_M,
1249                                              PF_RES_DATA_3_PF_SL_NUM_S);
1250         hr_dev->caps.sccc_bt_num = roce_get_field(req_b->sccc_bt_idx_num,
1251                                              PF_RES_DATA_4_PF_SCCC_BT_NUM_M,
1252                                              PF_RES_DATA_4_PF_SCCC_BT_NUM_S);
1253
1254         return 0;
1255 }
1256
1257 static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
1258 {
1259         struct hns_roce_pf_timer_res_a *req_a;
1260         struct hns_roce_cmq_desc desc[2];
1261         int ret, i;
1262
1263         for (i = 0; i < 2; i++) {
1264                 hns_roce_cmq_setup_basic_desc(&desc[i],
1265                                               HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
1266                                               true);
1267
1268                 if (i == 0)
1269                         desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1270                 else
1271                         desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1272         }
1273
1274         ret = hns_roce_cmq_send(hr_dev, desc, 2);
1275         if (ret)
1276                 return ret;
1277
1278         req_a = (struct hns_roce_pf_timer_res_a *)desc[0].data;
1279
1280         hr_dev->caps.qpc_timer_bt_num =
1281                                 roce_get_field(req_a->qpc_timer_bt_idx_num,
1282                                         PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
1283                                         PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
1284         hr_dev->caps.cqc_timer_bt_num =
1285                                 roce_get_field(req_a->cqc_timer_bt_idx_num,
1286                                         PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
1287                                         PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
1288
1289         return 0;
1290 }
1291
1292 static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
1293                                                   int vf_id)
1294 {
1295         struct hns_roce_cmq_desc desc;
1296         struct hns_roce_vf_switch *swt;
1297         int ret;
1298
1299         swt = (struct hns_roce_vf_switch *)desc.data;
1300         hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
1301         swt->rocee_sel |= cpu_to_le16(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
1302         roce_set_field(swt->fun_id,
1303                         VF_SWITCH_DATA_FUN_ID_VF_ID_M,
1304                         VF_SWITCH_DATA_FUN_ID_VF_ID_S,
1305                         vf_id);
1306         ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1307         if (ret)
1308                 return ret;
1309         desc.flag =
1310                 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
1311         desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1312         roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1);
1313         roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 1);
1314         roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1);
1315
1316         return hns_roce_cmq_send(hr_dev, &desc, 1);
1317 }
1318
1319 static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
1320 {
1321         struct hns_roce_cmq_desc desc[2];
1322         struct hns_roce_vf_res_a *req_a;
1323         struct hns_roce_vf_res_b *req_b;
1324         int i;
1325
1326         req_a = (struct hns_roce_vf_res_a *)desc[0].data;
1327         req_b = (struct hns_roce_vf_res_b *)desc[1].data;
1328         memset(req_a, 0, sizeof(*req_a));
1329         memset(req_b, 0, sizeof(*req_b));
1330         for (i = 0; i < 2; i++) {
1331                 hns_roce_cmq_setup_basic_desc(&desc[i],
1332                                               HNS_ROCE_OPC_ALLOC_VF_RES, false);
1333
1334                 if (i == 0)
1335                         desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1336                 else
1337                         desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1338
1339                 if (i == 0) {
1340                         roce_set_field(req_a->vf_qpc_bt_idx_num,
1341                                        VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
1342                                        VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
1343                         roce_set_field(req_a->vf_qpc_bt_idx_num,
1344                                        VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
1345                                        VF_RES_A_DATA_1_VF_QPC_BT_NUM_S,
1346                                        HNS_ROCE_VF_QPC_BT_NUM);
1347
1348                         roce_set_field(req_a->vf_srqc_bt_idx_num,
1349                                        VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
1350                                        VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
1351                         roce_set_field(req_a->vf_srqc_bt_idx_num,
1352                                        VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
1353                                        VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
1354                                        HNS_ROCE_VF_SRQC_BT_NUM);
1355
1356                         roce_set_field(req_a->vf_cqc_bt_idx_num,
1357                                        VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
1358                                        VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
1359                         roce_set_field(req_a->vf_cqc_bt_idx_num,
1360                                        VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
1361                                        VF_RES_A_DATA_3_VF_CQC_BT_NUM_S,
1362                                        HNS_ROCE_VF_CQC_BT_NUM);
1363
1364                         roce_set_field(req_a->vf_mpt_bt_idx_num,
1365                                        VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
1366                                        VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
1367                         roce_set_field(req_a->vf_mpt_bt_idx_num,
1368                                        VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
1369                                        VF_RES_A_DATA_4_VF_MPT_BT_NUM_S,
1370                                        HNS_ROCE_VF_MPT_BT_NUM);
1371
1372                         roce_set_field(req_a->vf_eqc_bt_idx_num,
1373                                        VF_RES_A_DATA_5_VF_EQC_IDX_M,
1374                                        VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
1375                         roce_set_field(req_a->vf_eqc_bt_idx_num,
1376                                        VF_RES_A_DATA_5_VF_EQC_NUM_M,
1377                                        VF_RES_A_DATA_5_VF_EQC_NUM_S,
1378                                        HNS_ROCE_VF_EQC_NUM);
1379                 } else {
1380                         roce_set_field(req_b->vf_smac_idx_num,
1381                                        VF_RES_B_DATA_1_VF_SMAC_IDX_M,
1382                                        VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
1383                         roce_set_field(req_b->vf_smac_idx_num,
1384                                        VF_RES_B_DATA_1_VF_SMAC_NUM_M,
1385                                        VF_RES_B_DATA_1_VF_SMAC_NUM_S,
1386                                        HNS_ROCE_VF_SMAC_NUM);
1387
1388                         roce_set_field(req_b->vf_sgid_idx_num,
1389                                        VF_RES_B_DATA_2_VF_SGID_IDX_M,
1390                                        VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
1391                         roce_set_field(req_b->vf_sgid_idx_num,
1392                                        VF_RES_B_DATA_2_VF_SGID_NUM_M,
1393                                        VF_RES_B_DATA_2_VF_SGID_NUM_S,
1394                                        HNS_ROCE_VF_SGID_NUM);
1395
1396                         roce_set_field(req_b->vf_qid_idx_sl_num,
1397                                        VF_RES_B_DATA_3_VF_QID_IDX_M,
1398                                        VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
1399                         roce_set_field(req_b->vf_qid_idx_sl_num,
1400                                        VF_RES_B_DATA_3_VF_SL_NUM_M,
1401                                        VF_RES_B_DATA_3_VF_SL_NUM_S,
1402                                        HNS_ROCE_VF_SL_NUM);
1403
1404                         roce_set_field(req_b->vf_sccc_idx_num,
1405                                        VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M,
1406                                        VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0);
1407                         roce_set_field(req_b->vf_sccc_idx_num,
1408                                        VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M,
1409                                        VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S,
1410                                        HNS_ROCE_VF_SCCC_BT_NUM);
1411                 }
1412         }
1413
1414         return hns_roce_cmq_send(hr_dev, desc, 2);
1415 }
1416
1417 static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
1418 {
1419         u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
1420         u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
1421         u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
1422         u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
1423         u8 sccc_hop_num = hr_dev->caps.sccc_hop_num;
1424         struct hns_roce_cfg_bt_attr *req;
1425         struct hns_roce_cmq_desc desc;
1426
1427         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
1428         req = (struct hns_roce_cfg_bt_attr *)desc.data;
1429         memset(req, 0, sizeof(*req));
1430
1431         roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
1432                        CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
1433                        hr_dev->caps.qpc_ba_pg_sz + PG_SHIFT_OFFSET);
1434         roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
1435                        CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
1436                        hr_dev->caps.qpc_buf_pg_sz + PG_SHIFT_OFFSET);
1437         roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
1438                        CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
1439                        qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
1440
1441         roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
1442                        CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
1443                        hr_dev->caps.srqc_ba_pg_sz + PG_SHIFT_OFFSET);
1444         roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
1445                        CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
1446                        hr_dev->caps.srqc_buf_pg_sz + PG_SHIFT_OFFSET);
1447         roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
1448                        CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
1449                        srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
1450
1451         roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
1452                        CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
1453                        hr_dev->caps.cqc_ba_pg_sz + PG_SHIFT_OFFSET);
1454         roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
1455                        CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
1456                        hr_dev->caps.cqc_buf_pg_sz + PG_SHIFT_OFFSET);
1457         roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
1458                        CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
1459                        cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
1460
1461         roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
1462                        CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
1463                        hr_dev->caps.mpt_ba_pg_sz + PG_SHIFT_OFFSET);
1464         roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
1465                        CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
1466                        hr_dev->caps.mpt_buf_pg_sz + PG_SHIFT_OFFSET);
1467         roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
1468                        CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
1469                        mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
1470
1471         roce_set_field(req->vf_sccc_cfg,
1472                        CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M,
1473                        CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S,
1474                        hr_dev->caps.sccc_ba_pg_sz + PG_SHIFT_OFFSET);
1475         roce_set_field(req->vf_sccc_cfg,
1476                        CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M,
1477                        CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S,
1478                        hr_dev->caps.sccc_buf_pg_sz + PG_SHIFT_OFFSET);
1479         roce_set_field(req->vf_sccc_cfg,
1480                        CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M,
1481                        CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S,
1482                        sccc_hop_num ==
1483                               HNS_ROCE_HOP_NUM_0 ? 0 : sccc_hop_num);
1484
1485         return hns_roce_cmq_send(hr_dev, &desc, 1);
1486 }
1487
1488 static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
1489 {
1490         struct hns_roce_caps *caps = &hr_dev->caps;
1491         int ret;
1492
1493         ret = hns_roce_cmq_query_hw_info(hr_dev);
1494         if (ret) {
1495                 dev_err(hr_dev->dev, "Query hardware version fail, ret = %d.\n",
1496                         ret);
1497                 return ret;
1498         }
1499
1500         ret = hns_roce_query_fw_ver(hr_dev);
1501         if (ret) {
1502                 dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
1503                         ret);
1504                 return ret;
1505         }
1506
1507         ret = hns_roce_config_global_param(hr_dev);
1508         if (ret) {
1509                 dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
1510                         ret);
1511                 return ret;
1512         }
1513
1514         /* Get pf resource owned by every pf */
1515         ret = hns_roce_query_pf_resource(hr_dev);
1516         if (ret) {
1517                 dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
1518                         ret);
1519                 return ret;
1520         }
1521
1522         if (hr_dev->pci_dev->revision == 0x21) {
1523                 ret = hns_roce_query_pf_timer_resource(hr_dev);
1524                 if (ret) {
1525                         dev_err(hr_dev->dev,
1526                                 "Query pf timer resource fail, ret = %d.\n",
1527                                 ret);
1528                         return ret;
1529                 }
1530         }
1531
1532         ret = hns_roce_alloc_vf_resource(hr_dev);
1533         if (ret) {
1534                 dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
1535                         ret);
1536                 return ret;
1537         }
1538
1539         if (hr_dev->pci_dev->revision == 0x21) {
1540                 ret = hns_roce_set_vf_switch_param(hr_dev, 0);
1541                 if (ret) {
1542                         dev_err(hr_dev->dev,
1543                                 "Set function switch param fail, ret = %d.\n",
1544                                 ret);
1545                         return ret;
1546                 }
1547         }
1548
1549         hr_dev->vendor_part_id = hr_dev->pci_dev->device;
1550         hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
1551
1552         caps->num_qps           = HNS_ROCE_V2_MAX_QP_NUM;
1553         caps->max_wqes          = HNS_ROCE_V2_MAX_WQE_NUM;
1554         caps->num_cqs           = HNS_ROCE_V2_MAX_CQ_NUM;
1555         caps->num_srqs          = HNS_ROCE_V2_MAX_SRQ_NUM;
1556         caps->min_cqes          = HNS_ROCE_MIN_CQE_NUM;
1557         caps->max_cqes          = HNS_ROCE_V2_MAX_CQE_NUM;
1558         caps->max_srqwqes       = HNS_ROCE_V2_MAX_SRQWQE_NUM;
1559         caps->max_sq_sg         = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
1560         caps->max_extend_sg     = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
1561         caps->max_rq_sg         = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
1562         caps->max_sq_inline     = HNS_ROCE_V2_MAX_SQ_INLINE;
1563         caps->max_srq_sg        = HNS_ROCE_V2_MAX_SRQ_SGE_NUM;
1564         caps->num_uars          = HNS_ROCE_V2_UAR_NUM;
1565         caps->phy_num_uars      = HNS_ROCE_V2_PHY_UAR_NUM;
1566         caps->num_aeq_vectors   = HNS_ROCE_V2_AEQE_VEC_NUM;
1567         caps->num_comp_vectors  = HNS_ROCE_V2_COMP_VEC_NUM;
1568         caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
1569         caps->num_mtpts         = HNS_ROCE_V2_MAX_MTPT_NUM;
1570         caps->num_mtt_segs      = HNS_ROCE_V2_MAX_MTT_SEGS;
1571         caps->num_cqe_segs      = HNS_ROCE_V2_MAX_CQE_SEGS;
1572         caps->num_srqwqe_segs   = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
1573         caps->num_idx_segs      = HNS_ROCE_V2_MAX_IDX_SEGS;
1574         caps->num_pds           = HNS_ROCE_V2_MAX_PD_NUM;
1575         caps->max_qp_init_rdma  = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
1576         caps->max_qp_dest_rdma  = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
1577         caps->max_sq_desc_sz    = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
1578         caps->max_rq_desc_sz    = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
1579         caps->max_srq_desc_sz   = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
1580         caps->qpc_entry_sz      = HNS_ROCE_V2_QPC_ENTRY_SZ;
1581         caps->irrl_entry_sz     = HNS_ROCE_V2_IRRL_ENTRY_SZ;
1582         caps->trrl_entry_sz     = HNS_ROCE_V2_TRRL_ENTRY_SZ;
1583         caps->cqc_entry_sz      = HNS_ROCE_V2_CQC_ENTRY_SZ;
1584         caps->srqc_entry_sz     = HNS_ROCE_V2_SRQC_ENTRY_SZ;
1585         caps->mtpt_entry_sz     = HNS_ROCE_V2_MTPT_ENTRY_SZ;
1586         caps->mtt_entry_sz      = HNS_ROCE_V2_MTT_ENTRY_SZ;
1587         caps->idx_entry_sz      = 4;
1588         caps->cq_entry_sz       = HNS_ROCE_V2_CQE_ENTRY_SIZE;
1589         caps->page_size_cap     = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
1590         caps->reserved_lkey     = 0;
1591         caps->reserved_pds      = 0;
1592         caps->reserved_mrws     = 1;
1593         caps->reserved_uars     = 0;
1594         caps->reserved_cqs      = 0;
1595         caps->reserved_srqs     = 0;
1596         caps->reserved_qps      = HNS_ROCE_V2_RSV_QPS;
1597
1598         caps->qpc_ba_pg_sz      = 0;
1599         caps->qpc_buf_pg_sz     = 0;
1600         caps->qpc_hop_num       = HNS_ROCE_CONTEXT_HOP_NUM;
1601         caps->srqc_ba_pg_sz     = 0;
1602         caps->srqc_buf_pg_sz    = 0;
1603         caps->srqc_hop_num      = HNS_ROCE_CONTEXT_HOP_NUM;
1604         caps->cqc_ba_pg_sz      = 0;
1605         caps->cqc_buf_pg_sz     = 0;
1606         caps->cqc_hop_num       = HNS_ROCE_CONTEXT_HOP_NUM;
1607         caps->mpt_ba_pg_sz      = 0;
1608         caps->mpt_buf_pg_sz     = 0;
1609         caps->mpt_hop_num       = HNS_ROCE_CONTEXT_HOP_NUM;
1610         caps->pbl_ba_pg_sz      = 2;
1611         caps->pbl_buf_pg_sz     = 0;
1612         caps->pbl_hop_num       = HNS_ROCE_PBL_HOP_NUM;
1613         caps->mtt_ba_pg_sz      = 0;
1614         caps->mtt_buf_pg_sz     = 0;
1615         caps->mtt_hop_num       = HNS_ROCE_MTT_HOP_NUM;
1616         caps->wqe_sq_hop_num    = 2;
1617         caps->wqe_sge_hop_num   = 1;
1618         caps->wqe_rq_hop_num    = 2;
1619         caps->cqe_ba_pg_sz      = 6;
1620         caps->cqe_buf_pg_sz     = 0;
1621         caps->cqe_hop_num       = HNS_ROCE_CQE_HOP_NUM;
1622         caps->srqwqe_ba_pg_sz   = 0;
1623         caps->srqwqe_buf_pg_sz  = 0;
1624         caps->srqwqe_hop_num    = HNS_ROCE_SRQWQE_HOP_NUM;
1625         caps->idx_ba_pg_sz      = 0;
1626         caps->idx_buf_pg_sz     = 0;
1627         caps->idx_hop_num       = HNS_ROCE_IDX_HOP_NUM;
1628         caps->eqe_ba_pg_sz      = 0;
1629         caps->eqe_buf_pg_sz     = 0;
1630         caps->eqe_hop_num       = HNS_ROCE_EQE_HOP_NUM;
1631         caps->tsq_buf_pg_sz     = 0;
1632         caps->chunk_sz          = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
1633
1634         caps->flags             = HNS_ROCE_CAP_FLAG_REREG_MR |
1635                                   HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
1636                                   HNS_ROCE_CAP_FLAG_RQ_INLINE |
1637                                   HNS_ROCE_CAP_FLAG_RECORD_DB |
1638                                   HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
1639
1640         if (hr_dev->pci_dev->revision == 0x21)
1641                 caps->flags |= HNS_ROCE_CAP_FLAG_MW |
1642                                HNS_ROCE_CAP_FLAG_FRMR;
1643
1644         caps->pkey_table_len[0] = 1;
1645         caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
1646         caps->ceqe_depth        = HNS_ROCE_V2_COMP_EQE_NUM;
1647         caps->aeqe_depth        = HNS_ROCE_V2_ASYNC_EQE_NUM;
1648         caps->local_ca_ack_delay = 0;
1649         caps->max_mtu = IB_MTU_4096;
1650
1651         caps->max_srqs          = HNS_ROCE_V2_MAX_SRQ;
1652         caps->max_srq_wrs       = HNS_ROCE_V2_MAX_SRQ_WR;
1653         caps->max_srq_sges      = HNS_ROCE_V2_MAX_SRQ_SGE;
1654
1655         if (hr_dev->pci_dev->revision == 0x21) {
1656                 caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC |
1657                                HNS_ROCE_CAP_FLAG_SRQ |
1658                                HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
1659
1660                 caps->num_qpc_timer       = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
1661                 caps->qpc_timer_entry_sz  = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
1662                 caps->qpc_timer_ba_pg_sz  = 0;
1663                 caps->qpc_timer_buf_pg_sz = 0;
1664                 caps->qpc_timer_hop_num   = HNS_ROCE_HOP_NUM_0;
1665                 caps->num_cqc_timer       = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
1666                 caps->cqc_timer_entry_sz  = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
1667                 caps->cqc_timer_ba_pg_sz  = 0;
1668                 caps->cqc_timer_buf_pg_sz = 0;
1669                 caps->cqc_timer_hop_num   = HNS_ROCE_HOP_NUM_0;
1670
1671                 caps->sccc_entry_sz     = HNS_ROCE_V2_SCCC_ENTRY_SZ;
1672                 caps->sccc_ba_pg_sz     = 0;
1673                 caps->sccc_buf_pg_sz    = 0;
1674                 caps->sccc_hop_num      = HNS_ROCE_SCCC_HOP_NUM;
1675         }
1676
1677         ret = hns_roce_v2_set_bt(hr_dev);
1678         if (ret)
1679                 dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
1680                         ret);
1681
1682         return ret;
1683 }
1684
1685 static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
1686                                       enum hns_roce_link_table_type type)
1687 {
1688         struct hns_roce_cmq_desc desc[2];
1689         struct hns_roce_cfg_llm_a *req_a =
1690                                 (struct hns_roce_cfg_llm_a *)desc[0].data;
1691         struct hns_roce_cfg_llm_b *req_b =
1692                                 (struct hns_roce_cfg_llm_b *)desc[1].data;
1693         struct hns_roce_v2_priv *priv = hr_dev->priv;
1694         struct hns_roce_link_table *link_tbl;
1695         struct hns_roce_link_table_entry *entry;
1696         enum hns_roce_opcode_type opcode;
1697         u32 page_num;
1698         int i;
1699
1700         switch (type) {
1701         case TSQ_LINK_TABLE:
1702                 link_tbl = &priv->tsq;
1703                 opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
1704                 break;
1705         case TPQ_LINK_TABLE:
1706                 link_tbl = &priv->tpq;
1707                 opcode = HNS_ROCE_OPC_CFG_TMOUT_LLM;
1708                 break;
1709         default:
1710                 return -EINVAL;
1711         }
1712
1713         page_num = link_tbl->npages;
1714         entry = link_tbl->table.buf;
1715         memset(req_a, 0, sizeof(*req_a));
1716         memset(req_b, 0, sizeof(*req_b));
1717
1718         for (i = 0; i < 2; i++) {
1719                 hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false);
1720
1721                 if (i == 0)
1722                         desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1723                 else
1724                         desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1725
1726                 if (i == 0) {
1727                         req_a->base_addr_l = link_tbl->table.map & 0xffffffff;
1728                         req_a->base_addr_h = (link_tbl->table.map >> 32) &
1729                                              0xffffffff;
1730                         roce_set_field(req_a->depth_pgsz_init_en,
1731                                        CFG_LLM_QUE_DEPTH_M,
1732                                        CFG_LLM_QUE_DEPTH_S,
1733                                        link_tbl->npages);
1734                         roce_set_field(req_a->depth_pgsz_init_en,
1735                                        CFG_LLM_QUE_PGSZ_M,
1736                                        CFG_LLM_QUE_PGSZ_S,
1737                                        link_tbl->pg_sz);
1738                         req_a->head_ba_l = entry[0].blk_ba0;
1739                         req_a->head_ba_h_nxtptr = entry[0].blk_ba1_nxt_ptr;
1740                         roce_set_field(req_a->head_ptr,
1741                                        CFG_LLM_HEAD_PTR_M,
1742                                        CFG_LLM_HEAD_PTR_S, 0);
1743                 } else {
1744                         req_b->tail_ba_l = entry[page_num - 1].blk_ba0;
1745                         roce_set_field(req_b->tail_ba_h,
1746                                        CFG_LLM_TAIL_BA_H_M,
1747                                        CFG_LLM_TAIL_BA_H_S,
1748                                        entry[page_num - 1].blk_ba1_nxt_ptr &
1749                                        HNS_ROCE_LINK_TABLE_BA1_M);
1750                         roce_set_field(req_b->tail_ptr,
1751                                        CFG_LLM_TAIL_PTR_M,
1752                                        CFG_LLM_TAIL_PTR_S,
1753                                        (entry[page_num - 2].blk_ba1_nxt_ptr &
1754                                        HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
1755                                        HNS_ROCE_LINK_TABLE_NXT_PTR_S);
1756                 }
1757         }
1758         roce_set_field(req_a->depth_pgsz_init_en,
1759                        CFG_LLM_INIT_EN_M, CFG_LLM_INIT_EN_S, 1);
1760
1761         return hns_roce_cmq_send(hr_dev, desc, 2);
1762 }
1763
1764 static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
1765                                     enum hns_roce_link_table_type type)
1766 {
1767         struct hns_roce_v2_priv *priv = hr_dev->priv;
1768         struct hns_roce_link_table *link_tbl;
1769         struct hns_roce_link_table_entry *entry;
1770         struct device *dev = hr_dev->dev;
1771         u32 buf_chk_sz;
1772         dma_addr_t t;
1773         int func_num = 1;
1774         int pg_num_a;
1775         int pg_num_b;
1776         int pg_num;
1777         int size;
1778         int i;
1779
1780         switch (type) {
1781         case TSQ_LINK_TABLE:
1782                 link_tbl = &priv->tsq;
1783                 buf_chk_sz = 1 << (hr_dev->caps.tsq_buf_pg_sz + PAGE_SHIFT);
1784                 pg_num_a = hr_dev->caps.num_qps * 8 / buf_chk_sz;
1785                 pg_num_b = hr_dev->caps.sl_num * 4 + 2;
1786                 break;
1787         case TPQ_LINK_TABLE:
1788                 link_tbl = &priv->tpq;
1789                 buf_chk_sz = 1 << (hr_dev->caps.tpq_buf_pg_sz + PAGE_SHIFT);
1790                 pg_num_a = hr_dev->caps.num_cqs * 4 / buf_chk_sz;
1791                 pg_num_b = 2 * 4 * func_num + 2;
1792                 break;
1793         default:
1794                 return -EINVAL;
1795         }
1796
1797         pg_num = max(pg_num_a, pg_num_b);
1798         size = pg_num * sizeof(struct hns_roce_link_table_entry);
1799
1800         link_tbl->table.buf = dma_alloc_coherent(dev, size,
1801                                                  &link_tbl->table.map,
1802                                                  GFP_KERNEL);
1803         if (!link_tbl->table.buf)
1804                 goto out;
1805
1806         link_tbl->pg_list = kcalloc(pg_num, sizeof(*link_tbl->pg_list),
1807                                     GFP_KERNEL);
1808         if (!link_tbl->pg_list)
1809                 goto err_kcalloc_failed;
1810
1811         entry = link_tbl->table.buf;
1812         for (i = 0; i < pg_num; ++i) {
1813                 link_tbl->pg_list[i].buf = dma_alloc_coherent(dev, buf_chk_sz,
1814                                                               &t, GFP_KERNEL);
1815                 if (!link_tbl->pg_list[i].buf)
1816                         goto err_alloc_buf_failed;
1817
1818                 link_tbl->pg_list[i].map = t;
1819
1820                 entry[i].blk_ba0 = (t >> 12) & 0xffffffff;
1821                 roce_set_field(entry[i].blk_ba1_nxt_ptr,
1822                                HNS_ROCE_LINK_TABLE_BA1_M,
1823                                HNS_ROCE_LINK_TABLE_BA1_S,
1824                                t >> 44);
1825
1826                 if (i < (pg_num - 1))
1827                         roce_set_field(entry[i].blk_ba1_nxt_ptr,
1828                                        HNS_ROCE_LINK_TABLE_NXT_PTR_M,
1829                                        HNS_ROCE_LINK_TABLE_NXT_PTR_S,
1830                                        i + 1);
1831         }
1832         link_tbl->npages = pg_num;
1833         link_tbl->pg_sz = buf_chk_sz;
1834
1835         return hns_roce_config_link_table(hr_dev, type);
1836
1837 err_alloc_buf_failed:
1838         for (i -= 1; i >= 0; i--)
1839                 dma_free_coherent(dev, buf_chk_sz,
1840                                   link_tbl->pg_list[i].buf,
1841                                   link_tbl->pg_list[i].map);
1842         kfree(link_tbl->pg_list);
1843
1844 err_kcalloc_failed:
1845         dma_free_coherent(dev, size, link_tbl->table.buf,
1846                           link_tbl->table.map);
1847
1848 out:
1849         return -ENOMEM;
1850 }
1851
1852 static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
1853                                      struct hns_roce_link_table *link_tbl)
1854 {
1855         struct device *dev = hr_dev->dev;
1856         int size;
1857         int i;
1858
1859         size = link_tbl->npages * sizeof(struct hns_roce_link_table_entry);
1860
1861         for (i = 0; i < link_tbl->npages; ++i)
1862                 if (link_tbl->pg_list[i].buf)
1863                         dma_free_coherent(dev, link_tbl->pg_sz,
1864                                           link_tbl->pg_list[i].buf,
1865                                           link_tbl->pg_list[i].map);
1866         kfree(link_tbl->pg_list);
1867
1868         dma_free_coherent(dev, size, link_tbl->table.buf,
1869                           link_tbl->table.map);
1870 }
1871
1872 static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
1873 {
1874         struct hns_roce_v2_priv *priv = hr_dev->priv;
1875         int qpc_count, cqc_count;
1876         int ret, i;
1877
1878         /* TSQ includes SQ doorbell and ack doorbell */
1879         ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
1880         if (ret) {
1881                 dev_err(hr_dev->dev, "TSQ init failed, ret = %d.\n", ret);
1882                 return ret;
1883         }
1884
1885         ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
1886         if (ret) {
1887                 dev_err(hr_dev->dev, "TPQ init failed, ret = %d.\n", ret);
1888                 goto err_tpq_init_failed;
1889         }
1890
1891         /* Alloc memory for QPC Timer buffer space chunk*/
1892         for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
1893              qpc_count++) {
1894                 ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
1895                                          qpc_count);
1896                 if (ret) {
1897                         dev_err(hr_dev->dev, "QPC Timer get failed\n");
1898                         goto err_qpc_timer_failed;
1899                 }
1900         }
1901
1902         /* Alloc memory for CQC Timer buffer space chunk*/
1903         for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
1904              cqc_count++) {
1905                 ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
1906                                          cqc_count);
1907                 if (ret) {
1908                         dev_err(hr_dev->dev, "CQC Timer get failed\n");
1909                         goto err_cqc_timer_failed;
1910                 }
1911         }
1912
1913         return 0;
1914
1915 err_cqc_timer_failed:
1916         for (i = 0; i < cqc_count; i++)
1917                 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
1918
1919 err_qpc_timer_failed:
1920         for (i = 0; i < qpc_count; i++)
1921                 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
1922
1923         hns_roce_free_link_table(hr_dev, &priv->tpq);
1924
1925 err_tpq_init_failed:
1926         hns_roce_free_link_table(hr_dev, &priv->tsq);
1927
1928         return ret;
1929 }
1930
1931 static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
1932 {
1933         struct hns_roce_v2_priv *priv = hr_dev->priv;
1934
1935         if (hr_dev->pci_dev->revision == 0x21)
1936                 hns_roce_function_clear(hr_dev);
1937
1938         hns_roce_free_link_table(hr_dev, &priv->tpq);
1939         hns_roce_free_link_table(hr_dev, &priv->tsq);
1940 }
1941
1942 static int hns_roce_query_mbox_status(struct hns_roce_dev *hr_dev)
1943 {
1944         struct hns_roce_cmq_desc desc;
1945         struct hns_roce_mbox_status *mb_st =
1946                                        (struct hns_roce_mbox_status *)desc.data;
1947         enum hns_roce_cmd_return_status status;
1948
1949         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, true);
1950
1951         status = hns_roce_cmq_send(hr_dev, &desc, 1);
1952         if (status)
1953                 return status;
1954
1955         return cpu_to_le32(mb_st->mb_status_hw_run);
1956 }
1957
1958 static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
1959 {
1960         u32 status = hns_roce_query_mbox_status(hr_dev);
1961
1962         return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
1963 }
1964
1965 static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
1966 {
1967         u32 status = hns_roce_query_mbox_status(hr_dev);
1968
1969         return status & HNS_ROCE_HW_MB_STATUS_MASK;
1970 }
1971
1972 static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
1973                               u64 out_param, u32 in_modifier, u8 op_modifier,
1974                               u16 op, u16 token, int event)
1975 {
1976         struct hns_roce_cmq_desc desc;
1977         struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
1978
1979         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
1980
1981         mb->in_param_l = cpu_to_le64(in_param);
1982         mb->in_param_h = cpu_to_le64(in_param) >> 32;
1983         mb->out_param_l = cpu_to_le64(out_param);
1984         mb->out_param_h = cpu_to_le64(out_param) >> 32;
1985         mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op);
1986         mb->token_event_en = cpu_to_le32(event << 16 | token);
1987
1988         return hns_roce_cmq_send(hr_dev, &desc, 1);
1989 }
1990
1991 static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
1992                                  u64 out_param, u32 in_modifier, u8 op_modifier,
1993                                  u16 op, u16 token, int event)
1994 {
1995         struct device *dev = hr_dev->dev;
1996         unsigned long end;
1997         int ret;
1998
1999         end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
2000         while (hns_roce_v2_cmd_pending(hr_dev)) {
2001                 if (time_after(jiffies, end)) {
2002                         dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
2003                                 (int)end);
2004                         return -EAGAIN;
2005                 }
2006                 cond_resched();
2007         }
2008
2009         ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier,
2010                                  op_modifier, op, token, event);
2011         if (ret)
2012                 dev_err(dev, "Post mailbox fail(%d)\n", ret);
2013
2014         return ret;
2015 }
2016
2017 static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
2018                                 unsigned long timeout)
2019 {
2020         struct device *dev = hr_dev->dev;
2021         unsigned long end;
2022         u32 status;
2023
2024         end = msecs_to_jiffies(timeout) + jiffies;
2025         while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
2026                 cond_resched();
2027
2028         if (hns_roce_v2_cmd_pending(hr_dev)) {
2029                 dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
2030                 return -ETIMEDOUT;
2031         }
2032
2033         status = hns_roce_v2_cmd_complete(hr_dev);
2034         if (status != 0x1) {
2035                 if (status == CMD_RST_PRC_EBUSY)
2036                         return status;
2037
2038                 dev_err(dev, "mailbox status 0x%x!\n", status);
2039                 return -EBUSY;
2040         }
2041
2042         return 0;
2043 }
2044
2045 static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev,
2046                                       int gid_index, const union ib_gid *gid,
2047                                       enum hns_roce_sgid_type sgid_type)
2048 {
2049         struct hns_roce_cmq_desc desc;
2050         struct hns_roce_cfg_sgid_tb *sgid_tb =
2051                                     (struct hns_roce_cfg_sgid_tb *)desc.data;
2052         u32 *p;
2053
2054         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
2055
2056         roce_set_field(sgid_tb->table_idx_rsv,
2057                        CFG_SGID_TB_TABLE_IDX_M,
2058                        CFG_SGID_TB_TABLE_IDX_S, gid_index);
2059         roce_set_field(sgid_tb->vf_sgid_type_rsv,
2060                        CFG_SGID_TB_VF_SGID_TYPE_M,
2061                        CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
2062
2063         p = (u32 *)&gid->raw[0];
2064         sgid_tb->vf_sgid_l = cpu_to_le32(*p);
2065
2066         p = (u32 *)&gid->raw[4];
2067         sgid_tb->vf_sgid_ml = cpu_to_le32(*p);
2068
2069         p = (u32 *)&gid->raw[8];
2070         sgid_tb->vf_sgid_mh = cpu_to_le32(*p);
2071
2072         p = (u32 *)&gid->raw[0xc];
2073         sgid_tb->vf_sgid_h = cpu_to_le32(*p);
2074
2075         return hns_roce_cmq_send(hr_dev, &desc, 1);
2076 }
2077
2078 static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
2079                                int gid_index, const union ib_gid *gid,
2080                                const struct ib_gid_attr *attr)
2081 {
2082         enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
2083         int ret;
2084
2085         if (!gid || !attr)
2086                 return -EINVAL;
2087
2088         if (attr->gid_type == IB_GID_TYPE_ROCE)
2089                 sgid_type = GID_TYPE_FLAG_ROCE_V1;
2090
2091         if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
2092                 if (ipv6_addr_v4mapped((void *)gid))
2093                         sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
2094                 else
2095                         sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
2096         }
2097
2098         ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type);
2099         if (ret)
2100                 dev_err(hr_dev->dev, "Configure sgid table failed(%d)!\n", ret);
2101
2102         return ret;
2103 }
2104
2105 static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
2106                                u8 *addr)
2107 {
2108         struct hns_roce_cmq_desc desc;
2109         struct hns_roce_cfg_smac_tb *smac_tb =
2110                                     (struct hns_roce_cfg_smac_tb *)desc.data;
2111         u16 reg_smac_h;
2112         u32 reg_smac_l;
2113
2114         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
2115
2116         reg_smac_l = *(u32 *)(&addr[0]);
2117         reg_smac_h = *(u16 *)(&addr[4]);
2118
2119         memset(smac_tb, 0, sizeof(*smac_tb));
2120         roce_set_field(smac_tb->tb_idx_rsv,
2121                        CFG_SMAC_TB_IDX_M,
2122                        CFG_SMAC_TB_IDX_S, phy_port);
2123         roce_set_field(smac_tb->vf_smac_h_rsv,
2124                        CFG_SMAC_TB_VF_SMAC_H_M,
2125                        CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
2126         smac_tb->vf_smac_l = reg_smac_l;
2127
2128         return hns_roce_cmq_send(hr_dev, &desc, 1);
2129 }
2130
2131 static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
2132                         struct hns_roce_mr *mr)
2133 {
2134         struct sg_dma_page_iter sg_iter;
2135         u64 page_addr;
2136         u64 *pages;
2137         int i;
2138
2139         mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
2140         mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
2141         roce_set_field(mpt_entry->byte_48_mode_ba,
2142                        V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
2143                        upper_32_bits(mr->pbl_ba >> 3));
2144
2145         pages = (u64 *)__get_free_page(GFP_KERNEL);
2146         if (!pages)
2147                 return -ENOMEM;
2148
2149         i = 0;
2150         for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
2151                 page_addr = sg_page_iter_dma_address(&sg_iter);
2152                 pages[i] = page_addr >> 6;
2153
2154                 /* Record the first 2 entry directly to MTPT table */
2155                 if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
2156                         goto found;
2157                 i++;
2158         }
2159 found:
2160         mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
2161         roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
2162                        V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
2163
2164         mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
2165         roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
2166                        V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
2167         roce_set_field(mpt_entry->byte_64_buf_pa1,
2168                        V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2169                        V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2170                        mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2171
2172         free_page((unsigned long)pages);
2173
2174         return 0;
2175 }
2176
2177 static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
2178                                   unsigned long mtpt_idx)
2179 {
2180         struct hns_roce_v2_mpt_entry *mpt_entry;
2181         int ret;
2182
2183         mpt_entry = mb_buf;
2184         memset(mpt_entry, 0, sizeof(*mpt_entry));
2185
2186         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2187                        V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
2188         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2189                        V2_MPT_BYTE_4_PBL_HOP_NUM_S, mr->pbl_hop_num ==
2190                        HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
2191         roce_set_field(mpt_entry->byte_4_pd_hop_st,
2192                        V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2193                        V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2194                        mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2195         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2196                        V2_MPT_BYTE_4_PD_S, mr->pd);
2197
2198         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
2199         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 0);
2200         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2201         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
2202                      (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
2203         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S,
2204                      mr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
2205         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
2206                      (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
2207         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
2208                      (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
2209         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
2210                      (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
2211
2212         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
2213                      mr->type == MR_TYPE_MR ? 0 : 1);
2214         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
2215                      1);
2216
2217         mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
2218         mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
2219         mpt_entry->lkey = cpu_to_le32(mr->key);
2220         mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
2221         mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
2222
2223         if (mr->type == MR_TYPE_DMA)
2224                 return 0;
2225
2226         ret = set_mtpt_pbl(mpt_entry, mr);
2227
2228         return ret;
2229 }
2230
2231 static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
2232                                         struct hns_roce_mr *mr, int flags,
2233                                         u32 pdn, int mr_access_flags, u64 iova,
2234                                         u64 size, void *mb_buf)
2235 {
2236         struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
2237         int ret = 0;
2238
2239         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2240                        V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
2241
2242         if (flags & IB_MR_REREG_PD) {
2243                 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2244                                V2_MPT_BYTE_4_PD_S, pdn);
2245                 mr->pd = pdn;
2246         }
2247
2248         if (flags & IB_MR_REREG_ACCESS) {
2249                 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
2250                              V2_MPT_BYTE_8_BIND_EN_S,
2251                              (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
2252                 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
2253                              V2_MPT_BYTE_8_ATOMIC_EN_S,
2254                              mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
2255                 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
2256                              mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
2257                 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
2258                              mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
2259                 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
2260                              mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
2261         }
2262
2263         if (flags & IB_MR_REREG_TRANS) {
2264                 mpt_entry->va_l = cpu_to_le32(lower_32_bits(iova));
2265                 mpt_entry->va_h = cpu_to_le32(upper_32_bits(iova));
2266                 mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
2267                 mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
2268
2269                 mr->iova = iova;
2270                 mr->size = size;
2271
2272                 ret = set_mtpt_pbl(mpt_entry, mr);
2273         }
2274
2275         return ret;
2276 }
2277
2278 static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
2279 {
2280         struct hns_roce_v2_mpt_entry *mpt_entry;
2281
2282         mpt_entry = mb_buf;
2283         memset(mpt_entry, 0, sizeof(*mpt_entry));
2284
2285         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2286                        V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
2287         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2288                        V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1);
2289         roce_set_field(mpt_entry->byte_4_pd_hop_st,
2290                        V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2291                        V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2292                        mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2293         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2294                        V2_MPT_BYTE_4_PD_S, mr->pd);
2295
2296         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1);
2297         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
2298         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2299
2300         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1);
2301         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
2302         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0);
2303         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
2304
2305         mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
2306
2307         mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
2308         roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
2309                        V2_MPT_BYTE_48_PBL_BA_H_S,
2310                        upper_32_bits(mr->pbl_ba >> 3));
2311
2312         roce_set_field(mpt_entry->byte_64_buf_pa1,
2313                        V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2314                        V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2315                        mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2316
2317         return 0;
2318 }
2319
2320 static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
2321 {
2322         struct hns_roce_v2_mpt_entry *mpt_entry;
2323
2324         mpt_entry = mb_buf;
2325         memset(mpt_entry, 0, sizeof(*mpt_entry));
2326
2327         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2328                        V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
2329         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2330                        V2_MPT_BYTE_4_PD_S, mw->pdn);
2331         roce_set_field(mpt_entry->byte_4_pd_hop_st,
2332                        V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2333                        V2_MPT_BYTE_4_PBL_HOP_NUM_S,
2334                        mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ?
2335                        0 : mw->pbl_hop_num);
2336         roce_set_field(mpt_entry->byte_4_pd_hop_st,
2337                        V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2338                        V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2339                        mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2340
2341         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
2342         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2343
2344         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
2345         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
2346         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
2347         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S,
2348                      mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
2349
2350         roce_set_field(mpt_entry->byte_64_buf_pa1,
2351                        V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2352                        V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2353                        mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2354
2355         mpt_entry->lkey = cpu_to_le32(mw->rkey);
2356
2357         return 0;
2358 }
2359
2360 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
2361 {
2362         return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
2363                                    n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
2364 }
2365
2366 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
2367 {
2368         struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
2369
2370         /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
2371         return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
2372                 !!(n & (hr_cq->ib_cq.cqe + 1))) ? cqe : NULL;
2373 }
2374
2375 static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
2376 {
2377         return get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
2378 }
2379
2380 static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
2381 {
2382         return hns_roce_buf_offset(&srq->buf, n << srq->wqe_shift);
2383 }
2384
2385 static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
2386 {
2387         /* always called with interrupts disabled. */
2388         spin_lock(&srq->lock);
2389
2390         bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
2391         srq->tail++;
2392
2393         spin_unlock(&srq->lock);
2394 }
2395
2396 static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
2397 {
2398         *hr_cq->set_ci_db = cons_index & 0xffffff;
2399 }
2400
2401 static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2402                                    struct hns_roce_srq *srq)
2403 {
2404         struct hns_roce_v2_cqe *cqe, *dest;
2405         u32 prod_index;
2406         int nfreed = 0;
2407         int wqe_index;
2408         u8 owner_bit;
2409
2410         for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
2411              ++prod_index) {
2412                 if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
2413                         break;
2414         }
2415
2416         /*
2417          * Now backwards through the CQ, removing CQ entries
2418          * that match our QP by overwriting them with next entries.
2419          */
2420         while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
2421                 cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
2422                 if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
2423                                     V2_CQE_BYTE_16_LCL_QPN_S) &
2424                                     HNS_ROCE_V2_CQE_QPN_MASK) == qpn) {
2425                         if (srq &&
2426                             roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S)) {
2427                                 wqe_index = roce_get_field(cqe->byte_4,
2428                                                      V2_CQE_BYTE_4_WQE_INDX_M,
2429                                                      V2_CQE_BYTE_4_WQE_INDX_S);
2430                                 hns_roce_free_srq_wqe(srq, wqe_index);
2431                         }
2432                         ++nfreed;
2433                 } else if (nfreed) {
2434                         dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
2435                                           hr_cq->ib_cq.cqe);
2436                         owner_bit = roce_get_bit(dest->byte_4,
2437                                                  V2_CQE_BYTE_4_OWNER_S);
2438                         memcpy(dest, cqe, sizeof(*cqe));
2439                         roce_set_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S,
2440                                      owner_bit);
2441                 }
2442         }
2443
2444         if (nfreed) {
2445                 hr_cq->cons_index += nfreed;
2446                 /*
2447                  * Make sure update of buffer contents is done before
2448                  * updating consumer index.
2449                  */
2450                 wmb();
2451                 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
2452         }
2453 }
2454
2455 static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2456                                  struct hns_roce_srq *srq)
2457 {
2458         spin_lock_irq(&hr_cq->lock);
2459         __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
2460         spin_unlock_irq(&hr_cq->lock);
2461 }
2462
2463 static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
2464                                   struct hns_roce_cq *hr_cq, void *mb_buf,
2465                                   u64 *mtts, dma_addr_t dma_handle, int nent,
2466                                   u32 vector)
2467 {
2468         struct hns_roce_v2_cq_context *cq_context;
2469
2470         cq_context = mb_buf;
2471         memset(cq_context, 0, sizeof(*cq_context));
2472
2473         roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
2474                        V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
2475         roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
2476                        V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
2477         roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
2478                        V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
2479         roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
2480                        V2_CQC_BYTE_4_CEQN_S, vector);
2481         cq_context->byte_4_pg_ceqn = cpu_to_le32(cq_context->byte_4_pg_ceqn);
2482
2483         roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
2484                        V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
2485
2486         cq_context->cqe_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
2487         cq_context->cqe_cur_blk_addr =
2488                                 cpu_to_le32(cq_context->cqe_cur_blk_addr);
2489
2490         roce_set_field(cq_context->byte_16_hop_addr,
2491                        V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
2492                        V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
2493                        cpu_to_le32((mtts[0]) >> (32 + PAGE_ADDR_SHIFT)));
2494         roce_set_field(cq_context->byte_16_hop_addr,
2495                        V2_CQC_BYTE_16_CQE_HOP_NUM_M,
2496                        V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
2497                        HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
2498
2499         cq_context->cqe_nxt_blk_addr = (u32)(mtts[1] >> PAGE_ADDR_SHIFT);
2500         roce_set_field(cq_context->byte_24_pgsz_addr,
2501                        V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
2502                        V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
2503                        cpu_to_le32((mtts[1]) >> (32 + PAGE_ADDR_SHIFT)));
2504         roce_set_field(cq_context->byte_24_pgsz_addr,
2505                        V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
2506                        V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
2507                        hr_dev->caps.cqe_ba_pg_sz + PG_SHIFT_OFFSET);
2508         roce_set_field(cq_context->byte_24_pgsz_addr,
2509                        V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
2510                        V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
2511                        hr_dev->caps.cqe_buf_pg_sz + PG_SHIFT_OFFSET);
2512
2513         cq_context->cqe_ba = (u32)(dma_handle >> 3);
2514
2515         roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
2516                        V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
2517
2518         if (hr_cq->db_en)
2519                 roce_set_bit(cq_context->byte_44_db_record,
2520                              V2_CQC_BYTE_44_DB_RECORD_EN_S, 1);
2521
2522         roce_set_field(cq_context->byte_44_db_record,
2523                        V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
2524                        V2_CQC_BYTE_44_DB_RECORD_ADDR_S,
2525                        ((u32)hr_cq->db.dma) >> 1);
2526         cq_context->db_record_addr = hr_cq->db.dma >> 32;
2527
2528         roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
2529                        V2_CQC_BYTE_56_CQ_MAX_CNT_M,
2530                        V2_CQC_BYTE_56_CQ_MAX_CNT_S,
2531                        HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
2532         roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
2533                        V2_CQC_BYTE_56_CQ_PERIOD_M,
2534                        V2_CQC_BYTE_56_CQ_PERIOD_S,
2535                        HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
2536 }
2537
2538 static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
2539                                      enum ib_cq_notify_flags flags)
2540 {
2541         struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
2542         struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2543         u32 notification_flag;
2544         u32 doorbell[2];
2545
2546         doorbell[0] = 0;
2547         doorbell[1] = 0;
2548
2549         notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
2550                              V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
2551         /*
2552          * flags = 0; Notification Flag = 1, next
2553          * flags = 1; Notification Flag = 0, solocited
2554          */
2555         roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
2556                        hr_cq->cqn);
2557         roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
2558                        HNS_ROCE_V2_CQ_DB_NTR);
2559         roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
2560                        V2_CQ_DB_PARAMETER_CONS_IDX_S,
2561                        hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
2562         roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
2563                        V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
2564         roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
2565                      notification_flag);
2566
2567         hns_roce_write64(hr_dev, doorbell, hr_cq->cq_db_l);
2568
2569         return 0;
2570 }
2571
2572 static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
2573                                                     struct hns_roce_qp **cur_qp,
2574                                                     struct ib_wc *wc)
2575 {
2576         struct hns_roce_rinl_sge *sge_list;
2577         u32 wr_num, wr_cnt, sge_num;
2578         u32 sge_cnt, data_len, size;
2579         void *wqe_buf;
2580
2581         wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
2582                                 V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
2583         wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
2584
2585         sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
2586         sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
2587         wqe_buf = get_recv_wqe(*cur_qp, wr_cnt);
2588         data_len = wc->byte_len;
2589
2590         for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
2591                 size = min(sge_list[sge_cnt].len, data_len);
2592                 memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
2593
2594                 data_len -= size;
2595                 wqe_buf += size;
2596         }
2597
2598         if (data_len) {
2599                 wc->status = IB_WC_LOC_LEN_ERR;
2600                 return -EAGAIN;
2601         }
2602
2603         return 0;
2604 }
2605
2606 static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
2607                                 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
2608 {
2609         struct hns_roce_srq *srq = NULL;
2610         struct hns_roce_dev *hr_dev;
2611         struct hns_roce_v2_cqe *cqe;
2612         struct hns_roce_qp *hr_qp;
2613         struct hns_roce_wq *wq;
2614         struct ib_qp_attr attr;
2615         int attr_mask;
2616         int is_send;
2617         u16 wqe_ctr;
2618         u32 opcode;
2619         u32 status;
2620         int qpn;
2621         int ret;
2622
2623         /* Find cqe according to consumer index */
2624         cqe = next_cqe_sw_v2(hr_cq);
2625         if (!cqe)
2626                 return -EAGAIN;
2627
2628         ++hr_cq->cons_index;
2629         /* Memory barrier */
2630         rmb();
2631
2632         /* 0->SQ, 1->RQ */
2633         is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
2634
2635         qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
2636                                 V2_CQE_BYTE_16_LCL_QPN_S);
2637
2638         if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
2639                 hr_dev = to_hr_dev(hr_cq->ib_cq.device);
2640                 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
2641                 if (unlikely(!hr_qp)) {
2642                         dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n",
2643                                 hr_cq->cqn, (qpn & HNS_ROCE_V2_CQE_QPN_MASK));
2644                         return -EINVAL;
2645                 }
2646                 *cur_qp = hr_qp;
2647         }
2648
2649         wc->qp = &(*cur_qp)->ibqp;
2650         wc->vendor_err = 0;
2651
2652         if (is_send) {
2653                 wq = &(*cur_qp)->sq;
2654                 if ((*cur_qp)->sq_signal_bits) {
2655                         /*
2656                          * If sg_signal_bit is 1,
2657                          * firstly tail pointer updated to wqe
2658                          * which current cqe correspond to
2659                          */
2660                         wqe_ctr = (u16)roce_get_field(cqe->byte_4,
2661                                                       V2_CQE_BYTE_4_WQE_INDX_M,
2662                                                       V2_CQE_BYTE_4_WQE_INDX_S);
2663                         wq->tail += (wqe_ctr - (u16)wq->tail) &
2664                                     (wq->wqe_cnt - 1);
2665                 }
2666
2667                 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2668                 ++wq->tail;
2669         } else if ((*cur_qp)->ibqp.srq) {
2670                 srq = to_hr_srq((*cur_qp)->ibqp.srq);
2671                 wqe_ctr = le16_to_cpu(roce_get_field(cqe->byte_4,
2672                                                      V2_CQE_BYTE_4_WQE_INDX_M,
2673                                                      V2_CQE_BYTE_4_WQE_INDX_S));
2674                 wc->wr_id = srq->wrid[wqe_ctr];
2675                 hns_roce_free_srq_wqe(srq, wqe_ctr);
2676         } else {
2677                 /* Update tail pointer, record wr_id */
2678                 wq = &(*cur_qp)->rq;
2679                 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2680                 ++wq->tail;
2681         }
2682
2683         status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
2684                                 V2_CQE_BYTE_4_STATUS_S);
2685         switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) {
2686         case HNS_ROCE_CQE_V2_SUCCESS:
2687                 wc->status = IB_WC_SUCCESS;
2688                 break;
2689         case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR:
2690                 wc->status = IB_WC_LOC_LEN_ERR;
2691                 break;
2692         case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR:
2693                 wc->status = IB_WC_LOC_QP_OP_ERR;
2694                 break;
2695         case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR:
2696                 wc->status = IB_WC_LOC_PROT_ERR;
2697                 break;
2698         case HNS_ROCE_CQE_V2_WR_FLUSH_ERR:
2699                 wc->status = IB_WC_WR_FLUSH_ERR;
2700                 break;
2701         case HNS_ROCE_CQE_V2_MW_BIND_ERR:
2702                 wc->status = IB_WC_MW_BIND_ERR;
2703                 break;
2704         case HNS_ROCE_CQE_V2_BAD_RESP_ERR:
2705                 wc->status = IB_WC_BAD_RESP_ERR;
2706                 break;
2707         case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR:
2708                 wc->status = IB_WC_LOC_ACCESS_ERR;
2709                 break;
2710         case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR:
2711                 wc->status = IB_WC_REM_INV_REQ_ERR;
2712                 break;
2713         case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR:
2714                 wc->status = IB_WC_REM_ACCESS_ERR;
2715                 break;
2716         case HNS_ROCE_CQE_V2_REMOTE_OP_ERR:
2717                 wc->status = IB_WC_REM_OP_ERR;
2718                 break;
2719         case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR:
2720                 wc->status = IB_WC_RETRY_EXC_ERR;
2721                 break;
2722         case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR:
2723                 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
2724                 break;
2725         case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR:
2726                 wc->status = IB_WC_REM_ABORT_ERR;
2727                 break;
2728         default:
2729                 wc->status = IB_WC_GENERAL_ERR;
2730                 break;
2731         }
2732
2733         /* flush cqe if wc status is error, excluding flush error */
2734         if ((wc->status != IB_WC_SUCCESS) &&
2735             (wc->status != IB_WC_WR_FLUSH_ERR)) {
2736                 attr_mask = IB_QP_STATE;
2737                 attr.qp_state = IB_QPS_ERR;
2738                 return hns_roce_v2_modify_qp(&(*cur_qp)->ibqp,
2739                                              &attr, attr_mask,
2740                                              (*cur_qp)->state, IB_QPS_ERR);
2741         }
2742
2743         if (wc->status == IB_WC_WR_FLUSH_ERR)
2744                 return 0;
2745
2746         if (is_send) {
2747                 wc->wc_flags = 0;
2748                 /* SQ corresponding to CQE */
2749                 switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
2750                                        V2_CQE_BYTE_4_OPCODE_S) & 0x1f) {
2751                 case HNS_ROCE_SQ_OPCODE_SEND:
2752                         wc->opcode = IB_WC_SEND;
2753                         break;
2754                 case HNS_ROCE_SQ_OPCODE_SEND_WITH_INV:
2755                         wc->opcode = IB_WC_SEND;
2756                         break;
2757                 case HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM:
2758                         wc->opcode = IB_WC_SEND;
2759                         wc->wc_flags |= IB_WC_WITH_IMM;
2760                         break;
2761                 case HNS_ROCE_SQ_OPCODE_RDMA_READ:
2762                         wc->opcode = IB_WC_RDMA_READ;
2763                         wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2764                         break;
2765                 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE:
2766                         wc->opcode = IB_WC_RDMA_WRITE;
2767                         break;
2768                 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM:
2769                         wc->opcode = IB_WC_RDMA_WRITE;
2770                         wc->wc_flags |= IB_WC_WITH_IMM;
2771                         break;
2772                 case HNS_ROCE_SQ_OPCODE_LOCAL_INV:
2773                         wc->opcode = IB_WC_LOCAL_INV;
2774                         wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2775                         break;
2776                 case HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP:
2777                         wc->opcode = IB_WC_COMP_SWAP;
2778                         wc->byte_len  = 8;
2779                         break;
2780                 case HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD:
2781                         wc->opcode = IB_WC_FETCH_ADD;
2782                         wc->byte_len  = 8;
2783                         break;
2784                 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP:
2785                         wc->opcode = IB_WC_MASKED_COMP_SWAP;
2786                         wc->byte_len  = 8;
2787                         break;
2788                 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD:
2789                         wc->opcode = IB_WC_MASKED_FETCH_ADD;
2790                         wc->byte_len  = 8;
2791                         break;
2792                 case HNS_ROCE_SQ_OPCODE_FAST_REG_WR:
2793                         wc->opcode = IB_WC_REG_MR;
2794                         break;
2795                 case HNS_ROCE_SQ_OPCODE_BIND_MW:
2796                         wc->opcode = IB_WC_REG_MR;
2797                         break;
2798                 default:
2799                         wc->status = IB_WC_GENERAL_ERR;
2800                         break;
2801                 }
2802         } else {
2803                 /* RQ correspond to CQE */
2804                 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2805
2806                 opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
2807                                         V2_CQE_BYTE_4_OPCODE_S);
2808                 switch (opcode & 0x1f) {
2809                 case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
2810                         wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2811                         wc->wc_flags = IB_WC_WITH_IMM;
2812                         wc->ex.imm_data =
2813                                 cpu_to_be32(le32_to_cpu(cqe->immtdata));
2814                         break;
2815                 case HNS_ROCE_V2_OPCODE_SEND:
2816                         wc->opcode = IB_WC_RECV;
2817                         wc->wc_flags = 0;
2818                         break;
2819                 case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
2820                         wc->opcode = IB_WC_RECV;
2821                         wc->wc_flags = IB_WC_WITH_IMM;
2822                         wc->ex.imm_data =
2823                                 cpu_to_be32(le32_to_cpu(cqe->immtdata));
2824                         break;
2825                 case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
2826                         wc->opcode = IB_WC_RECV;
2827                         wc->wc_flags = IB_WC_WITH_INVALIDATE;
2828                         wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
2829                         break;
2830                 default:
2831                         wc->status = IB_WC_GENERAL_ERR;
2832                         break;
2833                 }
2834
2835                 if ((wc->qp->qp_type == IB_QPT_RC ||
2836                      wc->qp->qp_type == IB_QPT_UC) &&
2837                     (opcode == HNS_ROCE_V2_OPCODE_SEND ||
2838                     opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
2839                     opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
2840                     (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
2841                         ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
2842                         if (ret)
2843                                 return -EAGAIN;
2844                 }
2845
2846                 wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
2847                                             V2_CQE_BYTE_32_SL_S);
2848                 wc->src_qp = (u8)roce_get_field(cqe->byte_32,
2849                                                 V2_CQE_BYTE_32_RMT_QPN_M,
2850                                                 V2_CQE_BYTE_32_RMT_QPN_S);
2851                 wc->slid = 0;
2852                 wc->wc_flags |= (roce_get_bit(cqe->byte_32,
2853                                               V2_CQE_BYTE_32_GRH_S) ?
2854                                               IB_WC_GRH : 0);
2855                 wc->port_num = roce_get_field(cqe->byte_32,
2856                                 V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
2857                 wc->pkey_index = 0;
2858                 memcpy(wc->smac, cqe->smac, 4);
2859                 wc->smac[4] = roce_get_field(cqe->byte_28,
2860                                              V2_CQE_BYTE_28_SMAC_4_M,
2861                                              V2_CQE_BYTE_28_SMAC_4_S);
2862                 wc->smac[5] = roce_get_field(cqe->byte_28,
2863                                              V2_CQE_BYTE_28_SMAC_5_M,
2864                                              V2_CQE_BYTE_28_SMAC_5_S);
2865                 if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
2866                         wc->vlan_id = (u16)roce_get_field(cqe->byte_28,
2867                                                           V2_CQE_BYTE_28_VID_M,
2868                                                           V2_CQE_BYTE_28_VID_S);
2869                 } else {
2870                         wc->vlan_id = 0xffff;
2871                 }
2872
2873                 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
2874                 wc->network_hdr_type = roce_get_field(cqe->byte_28,
2875                                                     V2_CQE_BYTE_28_PORT_TYPE_M,
2876                                                     V2_CQE_BYTE_28_PORT_TYPE_S);
2877         }
2878
2879         return 0;
2880 }
2881
2882 static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
2883                                struct ib_wc *wc)
2884 {
2885         struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2886         struct hns_roce_qp *cur_qp = NULL;
2887         unsigned long flags;
2888         int npolled;
2889
2890         spin_lock_irqsave(&hr_cq->lock, flags);
2891
2892         for (npolled = 0; npolled < num_entries; ++npolled) {
2893                 if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
2894                         break;
2895         }
2896
2897         if (npolled) {
2898                 /* Memory barrier */
2899                 wmb();
2900                 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
2901         }
2902
2903         spin_unlock_irqrestore(&hr_cq->lock, flags);
2904
2905         return npolled;
2906 }
2907
2908 static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
2909                                struct hns_roce_hem_table *table, int obj,
2910                                int step_idx)
2911 {
2912         struct device *dev = hr_dev->dev;
2913         struct hns_roce_cmd_mailbox *mailbox;
2914         struct hns_roce_hem_iter iter;
2915         struct hns_roce_hem_mhop mhop;
2916         struct hns_roce_hem *hem;
2917         unsigned long mhop_obj = obj;
2918         int i, j, k;
2919         int ret = 0;
2920         u64 hem_idx = 0;
2921         u64 l1_idx = 0;
2922         u64 bt_ba = 0;
2923         u32 chunk_ba_num;
2924         u32 hop_num;
2925         u16 op = 0xff;
2926
2927         if (!hns_roce_check_whether_mhop(hr_dev, table->type))
2928                 return 0;
2929
2930         hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
2931         i = mhop.l0_idx;
2932         j = mhop.l1_idx;
2933         k = mhop.l2_idx;
2934         hop_num = mhop.hop_num;
2935         chunk_ba_num = mhop.bt_chunk_size / 8;
2936
2937         if (hop_num == 2) {
2938                 hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
2939                           k;
2940                 l1_idx = i * chunk_ba_num + j;
2941         } else if (hop_num == 1) {
2942                 hem_idx = i * chunk_ba_num + j;
2943         } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
2944                 hem_idx = i;
2945         }
2946
2947         switch (table->type) {
2948         case HEM_TYPE_QPC:
2949                 op = HNS_ROCE_CMD_WRITE_QPC_BT0;
2950                 break;
2951         case HEM_TYPE_MTPT:
2952                 op = HNS_ROCE_CMD_WRITE_MPT_BT0;
2953                 break;
2954         case HEM_TYPE_CQC:
2955                 op = HNS_ROCE_CMD_WRITE_CQC_BT0;
2956                 break;
2957         case HEM_TYPE_SRQC:
2958                 op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
2959                 break;
2960         case HEM_TYPE_SCCC:
2961                 op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
2962                 break;
2963         case HEM_TYPE_QPC_TIMER:
2964                 op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
2965                 break;
2966         case HEM_TYPE_CQC_TIMER:
2967                 op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
2968                 break;
2969         default:
2970                 dev_warn(dev, "Table %d not to be written by mailbox!\n",
2971                          table->type);
2972                 return 0;
2973         }
2974
2975         if (table->type == HEM_TYPE_SCCC && step_idx)
2976                 return 0;
2977
2978         op += step_idx;
2979
2980         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2981         if (IS_ERR(mailbox))
2982                 return PTR_ERR(mailbox);
2983
2984         if (table->type == HEM_TYPE_SCCC)
2985                 obj = mhop.l0_idx;
2986
2987         if (check_whether_last_step(hop_num, step_idx)) {
2988                 hem = table->hem[hem_idx];
2989                 for (hns_roce_hem_first(hem, &iter);
2990                      !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
2991                         bt_ba = hns_roce_hem_addr(&iter);
2992
2993                         /* configure the ba, tag, and op */
2994                         ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma,
2995                                                 obj, 0, op,
2996                                                 HNS_ROCE_CMD_TIMEOUT_MSECS);
2997                 }
2998         } else {
2999                 if (step_idx == 0)
3000                         bt_ba = table->bt_l0_dma_addr[i];
3001                 else if (step_idx == 1 && hop_num == 2)
3002                         bt_ba = table->bt_l1_dma_addr[l1_idx];
3003
3004                 /* configure the ba, tag, and op */
3005                 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
3006                                         0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
3007         }
3008
3009         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3010         return ret;
3011 }
3012
3013 static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
3014                                  struct hns_roce_hem_table *table, int obj,
3015                                  int step_idx)
3016 {
3017         struct device *dev = hr_dev->dev;
3018         struct hns_roce_cmd_mailbox *mailbox;
3019         int ret;
3020         u16 op = 0xff;
3021
3022         if (!hns_roce_check_whether_mhop(hr_dev, table->type))
3023                 return 0;
3024
3025         switch (table->type) {
3026         case HEM_TYPE_QPC:
3027                 op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
3028                 break;
3029         case HEM_TYPE_MTPT:
3030                 op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
3031                 break;
3032         case HEM_TYPE_CQC:
3033                 op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
3034                 break;
3035         case HEM_TYPE_SCCC:
3036         case HEM_TYPE_QPC_TIMER:
3037         case HEM_TYPE_CQC_TIMER:
3038                 break;
3039         case HEM_TYPE_SRQC:
3040                 op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
3041                 break;
3042         default:
3043                 dev_warn(dev, "Table %d not to be destroyed by mailbox!\n",
3044                          table->type);
3045                 return 0;
3046         }
3047
3048         if (table->type == HEM_TYPE_SCCC ||
3049             table->type == HEM_TYPE_QPC_TIMER ||
3050             table->type == HEM_TYPE_CQC_TIMER)
3051                 return 0;
3052
3053         op += step_idx;
3054
3055         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3056         if (IS_ERR(mailbox))
3057                 return PTR_ERR(mailbox);
3058
3059         /* configure the tag and op */
3060         ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
3061                                 HNS_ROCE_CMD_TIMEOUT_MSECS);
3062
3063         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3064         return ret;
3065 }
3066
3067 static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
3068                                  enum ib_qp_state cur_state,
3069                                  enum ib_qp_state new_state,
3070                                  struct hns_roce_v2_qp_context *context,
3071                                  struct hns_roce_qp *hr_qp)
3072 {
3073         struct hns_roce_cmd_mailbox *mailbox;
3074         int ret;
3075
3076         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3077         if (IS_ERR(mailbox))
3078                 return PTR_ERR(mailbox);
3079
3080         memcpy(mailbox->buf, context, sizeof(*context) * 2);
3081
3082         ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
3083                                 HNS_ROCE_CMD_MODIFY_QPC,
3084                                 HNS_ROCE_CMD_TIMEOUT_MSECS);
3085
3086         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3087
3088         return ret;
3089 }
3090
3091 static void set_access_flags(struct hns_roce_qp *hr_qp,
3092                              struct hns_roce_v2_qp_context *context,
3093                              struct hns_roce_v2_qp_context *qpc_mask,
3094                              const struct ib_qp_attr *attr, int attr_mask)
3095 {
3096         u8 dest_rd_atomic;
3097         u32 access_flags;
3098
3099         dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
3100                          attr->max_dest_rd_atomic : hr_qp->resp_depth;
3101
3102         access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
3103                        attr->qp_access_flags : hr_qp->atomic_rd_en;
3104
3105         if (!dest_rd_atomic)
3106                 access_flags &= IB_ACCESS_REMOTE_WRITE;
3107
3108         roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3109                      !!(access_flags & IB_ACCESS_REMOTE_READ));
3110         roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
3111
3112         roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3113                      !!(access_flags & IB_ACCESS_REMOTE_WRITE));
3114         roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
3115
3116         roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3117                      !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
3118         roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
3119 }
3120
3121 static void modify_qp_reset_to_init(struct ib_qp *ibqp,
3122                                     const struct ib_qp_attr *attr,
3123                                     int attr_mask,
3124                                     struct hns_roce_v2_qp_context *context,
3125                                     struct hns_roce_v2_qp_context *qpc_mask)
3126 {
3127         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3128         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3129
3130         /*
3131          * In v2 engine, software pass context and context mask to hardware
3132          * when modifying qp. If software need modify some fields in context,
3133          * we should set all bits of the relevant fields in context mask to
3134          * 0 at the same time, else set them to 0x1.
3135          */
3136         roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3137                        V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
3138         roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3139                        V2_QPC_BYTE_4_TST_S, 0);
3140
3141         if (ibqp->qp_type == IB_QPT_GSI)
3142                 roce_set_field(context->byte_4_sqpn_tst,
3143                                V2_QPC_BYTE_4_SGE_SHIFT_M,
3144                                V2_QPC_BYTE_4_SGE_SHIFT_S,
3145                                ilog2((unsigned int)hr_qp->sge.sge_cnt));
3146         else
3147                 roce_set_field(context->byte_4_sqpn_tst,
3148                                V2_QPC_BYTE_4_SGE_SHIFT_M,
3149                                V2_QPC_BYTE_4_SGE_SHIFT_S,
3150                                hr_qp->sq.max_gs > 2 ?
3151                                ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
3152
3153         roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
3154                        V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
3155
3156         roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3157                        V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
3158         roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3159                        V2_QPC_BYTE_4_SQPN_S, 0);
3160
3161         roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3162                        V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
3163         roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3164                        V2_QPC_BYTE_16_PD_S, 0);
3165
3166         roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
3167                        V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
3168         roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
3169                        V2_QPC_BYTE_20_RQWS_S, 0);
3170
3171         roce_set_field(context->byte_20_smac_sgid_idx,
3172                        V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
3173                        ilog2((unsigned int)hr_qp->sq.wqe_cnt));
3174         roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3175                        V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
3176
3177         roce_set_field(context->byte_20_smac_sgid_idx,
3178                        V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
3179                        (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
3180                        hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT || ibqp->srq) ? 0 :
3181                        ilog2((unsigned int)hr_qp->rq.wqe_cnt));
3182         roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3183                        V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
3184
3185         /* No VLAN need to set 0xFFF */
3186         roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
3187                        V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
3188         roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
3189                        V2_QPC_BYTE_24_VLAN_ID_S, 0);
3190
3191         /*
3192          * Set some fields in context to zero, Because the default values
3193          * of all fields in context are zero, we need not set them to 0 again.
3194          * but we should set the relevant fields of context mask to 0.
3195          */
3196         roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_TX_ERR_S, 0);
3197         roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_RX_ERR_S, 0);
3198         roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0);
3199         roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0);
3200
3201         roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_TEMPID_M,
3202                        V2_QPC_BYTE_60_TEMPID_S, 0);
3203
3204         roce_set_field(qpc_mask->byte_60_qpst_tempid,
3205                        V2_QPC_BYTE_60_SCC_TOKEN_M, V2_QPC_BYTE_60_SCC_TOKEN_S,
3206                        0);
3207         roce_set_bit(qpc_mask->byte_60_qpst_tempid,
3208                      V2_QPC_BYTE_60_SQ_DB_DOING_S, 0);
3209         roce_set_bit(qpc_mask->byte_60_qpst_tempid,
3210                      V2_QPC_BYTE_60_RQ_DB_DOING_S, 0);
3211         roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
3212         roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
3213
3214         if (hr_qp->rdb_en) {
3215                 roce_set_bit(context->byte_68_rq_db,
3216                              V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
3217                 roce_set_bit(qpc_mask->byte_68_rq_db,
3218                              V2_QPC_BYTE_68_RQ_RECORD_EN_S, 0);
3219         }
3220
3221         roce_set_field(context->byte_68_rq_db,
3222                        V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
3223                        V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S,
3224                        ((u32)hr_qp->rdb.dma) >> 1);
3225         roce_set_field(qpc_mask->byte_68_rq_db,
3226                        V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
3227                        V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, 0);
3228         context->rq_db_record_addr = hr_qp->rdb.dma >> 32;
3229         qpc_mask->rq_db_record_addr = 0;
3230
3231         roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
3232                     (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
3233         roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
3234
3235         roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3236                        V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
3237         roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3238                        V2_QPC_BYTE_80_RX_CQN_S, 0);
3239         if (ibqp->srq) {
3240                 roce_set_field(context->byte_76_srqn_op_en,
3241                                V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
3242                                to_hr_srq(ibqp->srq)->srqn);
3243                 roce_set_field(qpc_mask->byte_76_srqn_op_en,
3244                                V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
3245                 roce_set_bit(context->byte_76_srqn_op_en,
3246                              V2_QPC_BYTE_76_SRQ_EN_S, 1);
3247                 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3248                              V2_QPC_BYTE_76_SRQ_EN_S, 0);
3249         }
3250
3251         roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3252                        V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3253                        V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
3254         roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3255                        V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
3256                        V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
3257
3258         roce_set_field(qpc_mask->byte_92_srq_info, V2_QPC_BYTE_92_SRQ_INFO_M,
3259                        V2_QPC_BYTE_92_SRQ_INFO_S, 0);
3260
3261         roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
3262                        V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
3263
3264         roce_set_field(qpc_mask->byte_104_rq_sge,
3265                        V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M,
3266                        V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S, 0);
3267
3268         roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3269                      V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
3270         roce_set_field(qpc_mask->byte_108_rx_reqepsn,
3271                        V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
3272                        V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
3273         roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3274                      V2_QPC_BYTE_108_RX_REQ_RNR_S, 0);
3275
3276         qpc_mask->rq_rnr_timer = 0;
3277         qpc_mask->rx_msg_len = 0;
3278         qpc_mask->rx_rkey_pkt_info = 0;
3279         qpc_mask->rx_va = 0;
3280
3281         roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
3282                        V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
3283         roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
3284                        V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
3285
3286         roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S,
3287                      0);
3288         roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M,
3289                        V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0);
3290         roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M,
3291                        V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S, 0);
3292
3293         roce_set_field(qpc_mask->byte_144_raq,
3294                        V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M,
3295                        V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0);
3296         roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M,
3297                        V2_QPC_BYTE_144_RAQ_CREDIT_S, 0);
3298         roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0);
3299
3300         roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RQ_MSN_M,
3301                        V2_QPC_BYTE_148_RQ_MSN_S, 0);
3302         roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RAQ_SYNDROME_M,
3303                        V2_QPC_BYTE_148_RAQ_SYNDROME_S, 0);
3304
3305         roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
3306                        V2_QPC_BYTE_152_RAQ_PSN_S, 0);
3307         roce_set_field(qpc_mask->byte_152_raq,
3308                        V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M,
3309                        V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S, 0);
3310
3311         roce_set_field(qpc_mask->byte_156_raq, V2_QPC_BYTE_156_RAQ_USE_PKTN_M,
3312                        V2_QPC_BYTE_156_RAQ_USE_PKTN_S, 0);
3313
3314         roce_set_field(qpc_mask->byte_160_sq_ci_pi,
3315                        V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
3316                        V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
3317         roce_set_field(qpc_mask->byte_160_sq_ci_pi,
3318                        V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
3319                        V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0);
3320
3321         roce_set_bit(qpc_mask->byte_168_irrl_idx,
3322                      V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S, 0);
3323         roce_set_bit(qpc_mask->byte_168_irrl_idx,
3324                      V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S, 0);
3325         roce_set_bit(qpc_mask->byte_168_irrl_idx,
3326                      V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S, 0);
3327         roce_set_bit(qpc_mask->byte_168_irrl_idx,
3328                      V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0);
3329         roce_set_bit(qpc_mask->byte_168_irrl_idx,
3330                      V2_QPC_BYTE_168_SQ_INVLD_FLG_S, 0);
3331         roce_set_field(qpc_mask->byte_168_irrl_idx,
3332                        V2_QPC_BYTE_168_IRRL_IDX_LSB_M,
3333                        V2_QPC_BYTE_168_IRRL_IDX_LSB_S, 0);
3334
3335         roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
3336                        V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
3337         roce_set_field(qpc_mask->byte_172_sq_psn,
3338                        V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
3339                        V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
3340
3341         roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S,
3342                      0);
3343
3344         roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
3345         roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 0);
3346
3347         roce_set_field(qpc_mask->byte_176_msg_pktn,
3348                        V2_QPC_BYTE_176_MSG_USE_PKTN_M,
3349                        V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0);
3350         roce_set_field(qpc_mask->byte_176_msg_pktn,
3351                        V2_QPC_BYTE_176_IRRL_HEAD_PRE_M,
3352                        V2_QPC_BYTE_176_IRRL_HEAD_PRE_S, 0);
3353
3354         roce_set_field(qpc_mask->byte_184_irrl_idx,
3355                        V2_QPC_BYTE_184_IRRL_IDX_MSB_M,
3356                        V2_QPC_BYTE_184_IRRL_IDX_MSB_S, 0);
3357
3358         qpc_mask->cur_sge_offset = 0;
3359
3360         roce_set_field(qpc_mask->byte_192_ext_sge,
3361                        V2_QPC_BYTE_192_CUR_SGE_IDX_M,
3362                        V2_QPC_BYTE_192_CUR_SGE_IDX_S, 0);
3363         roce_set_field(qpc_mask->byte_192_ext_sge,
3364                        V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M,
3365                        V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S, 0);
3366
3367         roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
3368                        V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
3369
3370         roce_set_field(qpc_mask->byte_200_sq_max, V2_QPC_BYTE_200_SQ_MAX_IDX_M,
3371                        V2_QPC_BYTE_200_SQ_MAX_IDX_S, 0);
3372         roce_set_field(qpc_mask->byte_200_sq_max,
3373                        V2_QPC_BYTE_200_LCL_OPERATED_CNT_M,
3374                        V2_QPC_BYTE_200_LCL_OPERATED_CNT_S, 0);
3375
3376         roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RNR_FLG_S, 0);
3377         roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RTY_FLG_S, 0);
3378
3379         roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
3380                        V2_QPC_BYTE_212_CHECK_FLG_S, 0);
3381
3382         qpc_mask->sq_timer = 0;
3383
3384         roce_set_field(qpc_mask->byte_220_retry_psn_msn,
3385                        V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
3386                        V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
3387         roce_set_field(qpc_mask->byte_232_irrl_sge,
3388                        V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
3389                        V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
3390
3391         roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_SO_LP_VLD_S,
3392                      0);
3393         roce_set_bit(qpc_mask->byte_232_irrl_sge,
3394                      V2_QPC_BYTE_232_FENCE_LP_VLD_S, 0);
3395         roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_IRRL_LP_VLD_S,
3396                      0);
3397
3398         qpc_mask->irrl_cur_sge_offset = 0;
3399
3400         roce_set_field(qpc_mask->byte_240_irrl_tail,
3401                        V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
3402                        V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
3403         roce_set_field(qpc_mask->byte_240_irrl_tail,
3404                        V2_QPC_BYTE_240_IRRL_TAIL_RD_M,
3405                        V2_QPC_BYTE_240_IRRL_TAIL_RD_S, 0);
3406         roce_set_field(qpc_mask->byte_240_irrl_tail,
3407                        V2_QPC_BYTE_240_RX_ACK_MSN_M,
3408                        V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
3409
3410         roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_M,
3411                        V2_QPC_BYTE_248_IRRL_PSN_S, 0);
3412         roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_PSN_ERR_S,
3413                      0);
3414         roce_set_field(qpc_mask->byte_248_ack_psn,
3415                        V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
3416                        V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
3417         roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_VLD_S,
3418                      0);
3419         roce_set_bit(qpc_mask->byte_248_ack_psn,
3420                      V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
3421         roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_CQ_ERR_IND_S,
3422                      0);
3423
3424         hr_qp->access_flags = attr->qp_access_flags;
3425         roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3426                        V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
3427         roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3428                        V2_QPC_BYTE_252_TX_CQN_S, 0);
3429
3430         roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_ERR_TYPE_M,
3431                        V2_QPC_BYTE_252_ERR_TYPE_S, 0);
3432
3433         roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
3434                        V2_QPC_BYTE_256_RQ_CQE_IDX_M,
3435                        V2_QPC_BYTE_256_RQ_CQE_IDX_S, 0);
3436         roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
3437                        V2_QPC_BYTE_256_SQ_FLUSH_IDX_M,
3438                        V2_QPC_BYTE_256_SQ_FLUSH_IDX_S, 0);
3439 }
3440
3441 static void modify_qp_init_to_init(struct ib_qp *ibqp,
3442                                    const struct ib_qp_attr *attr, int attr_mask,
3443                                    struct hns_roce_v2_qp_context *context,
3444                                    struct hns_roce_v2_qp_context *qpc_mask)
3445 {
3446         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3447
3448         /*
3449          * In v2 engine, software pass context and context mask to hardware
3450          * when modifying qp. If software need modify some fields in context,
3451          * we should set all bits of the relevant fields in context mask to
3452          * 0 at the same time, else set them to 0x1.
3453          */
3454         roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3455                        V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
3456         roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3457                        V2_QPC_BYTE_4_TST_S, 0);
3458
3459         if (ibqp->qp_type == IB_QPT_GSI)
3460                 roce_set_field(context->byte_4_sqpn_tst,
3461                                V2_QPC_BYTE_4_SGE_SHIFT_M,
3462                                V2_QPC_BYTE_4_SGE_SHIFT_S,
3463                                ilog2((unsigned int)hr_qp->sge.sge_cnt));
3464         else
3465                 roce_set_field(context->byte_4_sqpn_tst,
3466                                V2_QPC_BYTE_4_SGE_SHIFT_M,
3467                                V2_QPC_BYTE_4_SGE_SHIFT_S,
3468                                hr_qp->sq.max_gs >
3469                                HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ?
3470                                ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
3471
3472         roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
3473                        V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
3474
3475         if (attr_mask & IB_QP_ACCESS_FLAGS) {
3476                 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3477                              !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
3478                 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3479                              0);
3480
3481                 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3482                              !!(attr->qp_access_flags &
3483                              IB_ACCESS_REMOTE_WRITE));
3484                 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3485                              0);
3486
3487                 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3488                              !!(attr->qp_access_flags &
3489                              IB_ACCESS_REMOTE_ATOMIC));
3490                 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3491                              0);
3492         } else {
3493                 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3494                              !!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ));
3495                 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3496                              0);
3497
3498                 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3499                              !!(hr_qp->access_flags & IB_ACCESS_REMOTE_WRITE));
3500                 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3501                              0);
3502
3503                 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3504                              !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
3505                 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3506                              0);
3507         }
3508
3509         roce_set_field(context->byte_20_smac_sgid_idx,
3510                        V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
3511                        ilog2((unsigned int)hr_qp->sq.wqe_cnt));
3512         roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3513                        V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
3514
3515         roce_set_field(context->byte_20_smac_sgid_idx,
3516                        V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
3517                        (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
3518                        hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT || ibqp->srq) ? 0 :
3519                        ilog2((unsigned int)hr_qp->rq.wqe_cnt));
3520         roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3521                        V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
3522
3523         roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3524                        V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
3525         roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3526                        V2_QPC_BYTE_16_PD_S, 0);
3527
3528         roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3529                        V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
3530         roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3531                        V2_QPC_BYTE_80_RX_CQN_S, 0);
3532
3533         roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3534                        V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
3535         roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3536                        V2_QPC_BYTE_252_TX_CQN_S, 0);
3537
3538         if (ibqp->srq) {
3539                 roce_set_bit(context->byte_76_srqn_op_en,
3540                              V2_QPC_BYTE_76_SRQ_EN_S, 1);
3541                 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3542                              V2_QPC_BYTE_76_SRQ_EN_S, 0);
3543                 roce_set_field(context->byte_76_srqn_op_en,
3544                                V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
3545                                to_hr_srq(ibqp->srq)->srqn);
3546                 roce_set_field(qpc_mask->byte_76_srqn_op_en,
3547                                V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
3548         }
3549
3550         roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3551                        V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
3552         roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3553                        V2_QPC_BYTE_4_SQPN_S, 0);
3554
3555         if (attr_mask & IB_QP_DEST_QPN) {
3556                 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
3557                                V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
3558                 roce_set_field(qpc_mask->byte_56_dqpn_err,
3559                                V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
3560         }
3561 }
3562
3563 static bool check_wqe_rq_mtt_count(struct hns_roce_dev *hr_dev,
3564                                    struct hns_roce_qp *hr_qp, int mtt_cnt,
3565                                    u32 page_size)
3566 {
3567         struct device *dev = hr_dev->dev;
3568
3569         if (hr_qp->rq.wqe_cnt < 1)
3570                 return true;
3571
3572         if (mtt_cnt < 1) {
3573                 dev_err(dev, "qp(0x%lx) rqwqe buf ba find failed\n",
3574                         hr_qp->qpn);
3575                 return false;
3576         }
3577
3578         if (mtt_cnt < MTT_MIN_COUNT &&
3579                 (hr_qp->rq.offset + page_size) < hr_qp->buff_size) {
3580                 dev_err(dev, "qp(0x%lx) next rqwqe buf ba find failed\n",
3581                         hr_qp->qpn);
3582                 return false;
3583         }
3584
3585         return true;
3586 }
3587
3588 static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
3589                                  const struct ib_qp_attr *attr, int attr_mask,
3590                                  struct hns_roce_v2_qp_context *context,
3591                                  struct hns_roce_v2_qp_context *qpc_mask)
3592 {
3593         const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
3594         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3595         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3596         struct device *dev = hr_dev->dev;
3597         u64 mtts[MTT_MIN_COUNT] = { 0 };
3598         dma_addr_t dma_handle_3;
3599         dma_addr_t dma_handle_2;
3600         u64 wqe_sge_ba;
3601         u32 page_size;
3602         u8 port_num;
3603         u64 *mtts_3;
3604         u64 *mtts_2;
3605         int count;
3606         u8 *dmac;
3607         u8 *smac;
3608         int port;
3609
3610         /* Search qp buf's mtts */
3611         page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
3612         count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
3613                                   hr_qp->rq.offset / page_size, mtts,
3614                                   MTT_MIN_COUNT, &wqe_sge_ba);
3615         if (!ibqp->srq)
3616                 if (!check_wqe_rq_mtt_count(hr_dev, hr_qp, count, page_size))
3617                         return -EINVAL;
3618
3619         /* Search IRRL's mtts */
3620         mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
3621                                      hr_qp->qpn, &dma_handle_2);
3622         if (!mtts_2) {
3623                 dev_err(dev, "qp irrl_table find failed\n");
3624                 return -EINVAL;
3625         }
3626
3627         /* Search TRRL's mtts */
3628         mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
3629                                      hr_qp->qpn, &dma_handle_3);
3630         if (!mtts_3) {
3631                 dev_err(dev, "qp trrl_table find failed\n");
3632                 return -EINVAL;
3633         }
3634
3635         if (attr_mask & IB_QP_ALT_PATH) {
3636                 dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask);
3637                 return -EINVAL;
3638         }
3639
3640         dmac = (u8 *)attr->ah_attr.roce.dmac;
3641         context->wqe_sge_ba = (u32)(wqe_sge_ba >> 3);
3642         qpc_mask->wqe_sge_ba = 0;
3643
3644         /*
3645          * In v2 engine, software pass context and context mask to hardware
3646          * when modifying qp. If software need modify some fields in context,
3647          * we should set all bits of the relevant fields in context mask to
3648          * 0 at the same time, else set them to 0x1.
3649          */
3650         roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
3651                        V2_QPC_BYTE_12_WQE_SGE_BA_S, wqe_sge_ba >> (32 + 3));
3652         roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
3653                        V2_QPC_BYTE_12_WQE_SGE_BA_S, 0);
3654
3655         roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
3656                        V2_QPC_BYTE_12_SQ_HOP_NUM_S,
3657                        hr_dev->caps.wqe_sq_hop_num == HNS_ROCE_HOP_NUM_0 ?
3658                        0 : hr_dev->caps.wqe_sq_hop_num);
3659         roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
3660                        V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
3661
3662         roce_set_field(context->byte_20_smac_sgid_idx,
3663                        V2_QPC_BYTE_20_SGE_HOP_NUM_M,
3664                        V2_QPC_BYTE_20_SGE_HOP_NUM_S,
3665                        ((ibqp->qp_type == IB_QPT_GSI) ||
3666                        hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
3667                        hr_dev->caps.wqe_sge_hop_num : 0);
3668         roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3669                        V2_QPC_BYTE_20_SGE_HOP_NUM_M,
3670                        V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
3671
3672         roce_set_field(context->byte_20_smac_sgid_idx,
3673                        V2_QPC_BYTE_20_RQ_HOP_NUM_M,
3674                        V2_QPC_BYTE_20_RQ_HOP_NUM_S,
3675                        hr_dev->caps.wqe_rq_hop_num == HNS_ROCE_HOP_NUM_0 ?
3676                        0 : hr_dev->caps.wqe_rq_hop_num);
3677         roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3678                        V2_QPC_BYTE_20_RQ_HOP_NUM_M,
3679                        V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
3680
3681         roce_set_field(context->byte_16_buf_ba_pg_sz,
3682                        V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
3683                        V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
3684                        hr_qp->wqe_bt_pg_shift + PG_SHIFT_OFFSET);
3685         roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
3686                        V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
3687                        V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
3688
3689         roce_set_field(context->byte_16_buf_ba_pg_sz,
3690                        V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
3691                        V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
3692                        hr_dev->caps.mtt_buf_pg_sz + PG_SHIFT_OFFSET);
3693         roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
3694                        V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
3695                        V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
3696
3697         context->rq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
3698         qpc_mask->rq_cur_blk_addr = 0;
3699
3700         roce_set_field(context->byte_92_srq_info,
3701                        V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
3702                        V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
3703                        mtts[0] >> (32 + PAGE_ADDR_SHIFT));
3704         roce_set_field(qpc_mask->byte_92_srq_info,
3705                        V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
3706                        V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
3707
3708         context->rq_nxt_blk_addr = (u32)(mtts[1] >> PAGE_ADDR_SHIFT);
3709         qpc_mask->rq_nxt_blk_addr = 0;
3710
3711         roce_set_field(context->byte_104_rq_sge,
3712                        V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
3713                        V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
3714                        mtts[1] >> (32 + PAGE_ADDR_SHIFT));
3715         roce_set_field(qpc_mask->byte_104_rq_sge,
3716                        V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
3717                        V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
3718
3719         roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
3720                        V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
3721         roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
3722                        V2_QPC_BYTE_132_TRRL_BA_S, 0);
3723         context->trrl_ba = (u32)(dma_handle_3 >> (16 + 4));
3724         qpc_mask->trrl_ba = 0;
3725         roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
3726                        V2_QPC_BYTE_140_TRRL_BA_S,
3727                        (u32)(dma_handle_3 >> (32 + 16 + 4)));
3728         roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
3729                        V2_QPC_BYTE_140_TRRL_BA_S, 0);
3730
3731         context->irrl_ba = (u32)(dma_handle_2 >> 6);
3732         qpc_mask->irrl_ba = 0;
3733         roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
3734                        V2_QPC_BYTE_208_IRRL_BA_S,
3735                        dma_handle_2 >> (32 + 6));
3736         roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
3737                        V2_QPC_BYTE_208_IRRL_BA_S, 0);
3738
3739         roce_set_bit(context->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 1);
3740         roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0);
3741
3742         roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
3743                      hr_qp->sq_signal_bits);
3744         roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
3745                      0);
3746
3747         port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
3748
3749         smac = (u8 *)hr_dev->dev_addr[port];
3750         /* when dmac equals smac or loop_idc is 1, it should loopback */
3751         if (ether_addr_equal_unaligned(dmac, smac) ||
3752             hr_dev->loop_idc == 0x1) {
3753                 roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1);
3754                 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
3755         }
3756
3757         if (attr_mask & IB_QP_DEST_QPN) {
3758                 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
3759                                V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
3760                 roce_set_field(qpc_mask->byte_56_dqpn_err,
3761                                V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
3762         }
3763
3764         /* Configure GID index */
3765         port_num = rdma_ah_get_port_num(&attr->ah_attr);
3766         roce_set_field(context->byte_20_smac_sgid_idx,
3767                        V2_QPC_BYTE_20_SGID_IDX_M,
3768                        V2_QPC_BYTE_20_SGID_IDX_S,
3769                        hns_get_gid_index(hr_dev, port_num - 1,
3770                                          grh->sgid_index));
3771         roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3772                        V2_QPC_BYTE_20_SGID_IDX_M,
3773                        V2_QPC_BYTE_20_SGID_IDX_S, 0);
3774         memcpy(&(context->dmac), dmac, sizeof(u32));
3775         roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
3776                        V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
3777         qpc_mask->dmac = 0;
3778         roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
3779                        V2_QPC_BYTE_52_DMAC_S, 0);
3780
3781         /* mtu*(2^LP_PKTN_INI) should not bigger than 1 message length 64kb */
3782         roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
3783                        V2_QPC_BYTE_56_LP_PKTN_INI_S, 4);
3784         roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
3785                        V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
3786
3787         if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
3788                 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3789                                V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
3790         else if (attr_mask & IB_QP_PATH_MTU)
3791                 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3792                                V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
3793
3794         roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3795                        V2_QPC_BYTE_24_MTU_S, 0);
3796
3797         roce_set_field(context->byte_84_rq_ci_pi,
3798                        V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3799                        V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
3800         roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3801                        V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3802                        V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
3803
3804         roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3805                        V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
3806                        V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
3807         roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3808                      V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
3809         roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
3810                        V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
3811         roce_set_field(qpc_mask->byte_108_rx_reqepsn,
3812                        V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
3813                        V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
3814
3815         context->rq_rnr_timer = 0;
3816         qpc_mask->rq_rnr_timer = 0;
3817
3818         roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
3819                        V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
3820         roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
3821                        V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
3822
3823         /* rocee send 2^lp_sgen_ini segs every time */
3824         roce_set_field(context->byte_168_irrl_idx,
3825                        V2_QPC_BYTE_168_LP_SGEN_INI_M,
3826                        V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
3827         roce_set_field(qpc_mask->byte_168_irrl_idx,
3828                        V2_QPC_BYTE_168_LP_SGEN_INI_M,
3829                        V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
3830
3831         return 0;
3832 }
3833
3834 static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
3835                                 const struct ib_qp_attr *attr, int attr_mask,
3836                                 struct hns_roce_v2_qp_context *context,
3837                                 struct hns_roce_v2_qp_context *qpc_mask)
3838 {
3839         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3840         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3841         struct device *dev = hr_dev->dev;
3842         u64 sge_cur_blk = 0;
3843         u64 sq_cur_blk = 0;
3844         u32 page_size;
3845         int count;
3846
3847         /* Search qp buf's mtts */
3848         count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
3849         if (count < 1) {
3850                 dev_err(dev, "qp(0x%lx) buf pa find failed\n", hr_qp->qpn);
3851                 return -EINVAL;
3852         }
3853
3854         if (hr_qp->sge.offset) {
3855                 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
3856                 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
3857                                           hr_qp->sge.offset / page_size,
3858                                           &sge_cur_blk, 1, NULL);
3859                 if (count < 1) {
3860                         dev_err(dev, "qp(0x%lx) sge pa find failed\n",
3861                                 hr_qp->qpn);
3862                         return -EINVAL;
3863                 }
3864         }
3865
3866         /* Not support alternate path and path migration */
3867         if ((attr_mask & IB_QP_ALT_PATH) ||
3868             (attr_mask & IB_QP_PATH_MIG_STATE)) {
3869                 dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
3870                 return -EINVAL;
3871         }
3872
3873         /*
3874          * In v2 engine, software pass context and context mask to hardware
3875          * when modifying qp. If software need modify some fields in context,
3876          * we should set all bits of the relevant fields in context mask to
3877          * 0 at the same time, else set them to 0x1.
3878          */
3879         context->sq_cur_blk_addr = (u32)(sq_cur_blk >> PAGE_ADDR_SHIFT);
3880         roce_set_field(context->byte_168_irrl_idx,
3881                        V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
3882                        V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
3883                        sq_cur_blk >> (32 + PAGE_ADDR_SHIFT));
3884         qpc_mask->sq_cur_blk_addr = 0;
3885         roce_set_field(qpc_mask->byte_168_irrl_idx,
3886                        V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
3887                        V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
3888
3889         context->sq_cur_sge_blk_addr = ((ibqp->qp_type == IB_QPT_GSI) ||
3890                        hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
3891                        ((u32)(sge_cur_blk >>
3892                        PAGE_ADDR_SHIFT)) : 0;
3893         roce_set_field(context->byte_184_irrl_idx,
3894                        V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
3895                        V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
3896                        ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs >
3897                        HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
3898                        (sge_cur_blk >>
3899                        (32 + PAGE_ADDR_SHIFT)) : 0);
3900         qpc_mask->sq_cur_sge_blk_addr = 0;
3901         roce_set_field(qpc_mask->byte_184_irrl_idx,
3902                        V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
3903                        V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
3904
3905         context->rx_sq_cur_blk_addr = (u32)(sq_cur_blk >> PAGE_ADDR_SHIFT);
3906         roce_set_field(context->byte_232_irrl_sge,
3907                        V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
3908                        V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
3909                        sq_cur_blk >> (32 + PAGE_ADDR_SHIFT));
3910         qpc_mask->rx_sq_cur_blk_addr = 0;
3911         roce_set_field(qpc_mask->byte_232_irrl_sge,
3912                        V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
3913                        V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
3914
3915         /*
3916          * Set some fields in context to zero, Because the default values
3917          * of all fields in context are zero, we need not set them to 0 again.
3918          * but we should set the relevant fields of context mask to 0.
3919          */
3920         roce_set_field(qpc_mask->byte_232_irrl_sge,
3921                        V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
3922                        V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
3923
3924         roce_set_field(qpc_mask->byte_240_irrl_tail,
3925                        V2_QPC_BYTE_240_RX_ACK_MSN_M,
3926                        V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
3927
3928         roce_set_field(qpc_mask->byte_248_ack_psn,
3929                        V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
3930                        V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
3931         roce_set_bit(qpc_mask->byte_248_ack_psn,
3932                      V2_QPC_BYTE_248_IRRL_PSN_VLD_S, 0);
3933         roce_set_field(qpc_mask->byte_248_ack_psn,
3934                        V2_QPC_BYTE_248_IRRL_PSN_M,
3935                        V2_QPC_BYTE_248_IRRL_PSN_S, 0);
3936
3937         roce_set_field(qpc_mask->byte_240_irrl_tail,
3938                        V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
3939                        V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
3940
3941         roce_set_field(qpc_mask->byte_220_retry_psn_msn,
3942                        V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
3943                        V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
3944
3945         roce_set_bit(qpc_mask->byte_248_ack_psn,
3946                      V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
3947
3948         roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
3949                        V2_QPC_BYTE_212_CHECK_FLG_S, 0);
3950
3951         roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
3952                        V2_QPC_BYTE_212_LSN_S, 0x100);
3953         roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
3954                        V2_QPC_BYTE_212_LSN_S, 0);
3955
3956         roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
3957                        V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
3958
3959         return 0;
3960 }
3961
3962 static inline bool hns_roce_v2_check_qp_stat(enum ib_qp_state cur_state,
3963                                              enum ib_qp_state new_state)
3964 {
3965
3966         if ((cur_state != IB_QPS_RESET &&
3967             (new_state == IB_QPS_ERR || new_state == IB_QPS_RESET)) ||
3968             ((cur_state == IB_QPS_RTS || cur_state == IB_QPS_SQD) &&
3969             (new_state == IB_QPS_RTS || new_state == IB_QPS_SQD)) ||
3970             (cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS))
3971                 return true;
3972
3973         return false;
3974
3975 }
3976
3977 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
3978                                  const struct ib_qp_attr *attr,
3979                                  int attr_mask, enum ib_qp_state cur_state,
3980                                  enum ib_qp_state new_state)
3981 {
3982         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3983         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3984         struct hns_roce_v2_qp_context *context;
3985         struct hns_roce_v2_qp_context *qpc_mask;
3986         struct device *dev = hr_dev->dev;
3987         int ret = -EINVAL;
3988
3989         context = kcalloc(2, sizeof(*context), GFP_ATOMIC);
3990         if (!context)
3991                 return -ENOMEM;
3992
3993         qpc_mask = context + 1;
3994         /*
3995          * In v2 engine, software pass context and context mask to hardware
3996          * when modifying qp. If software need modify some fields in context,
3997          * we should set all bits of the relevant fields in context mask to
3998          * 0 at the same time, else set them to 0x1.
3999          */
4000         memset(qpc_mask, 0xff, sizeof(*qpc_mask));
4001         if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
4002                 memset(qpc_mask, 0, sizeof(*qpc_mask));
4003                 modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
4004                                         qpc_mask);
4005         } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
4006                 modify_qp_init_to_init(ibqp, attr, attr_mask, context,
4007                                        qpc_mask);
4008         } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
4009                 ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
4010                                             qpc_mask);
4011                 if (ret)
4012                         goto out;
4013         } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
4014                 ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
4015                                            qpc_mask);
4016                 if (ret)
4017                         goto out;
4018         } else if (hns_roce_v2_check_qp_stat(cur_state, new_state)) {
4019                 /* Nothing */
4020                 ;
4021         } else {
4022                 dev_err(dev, "Illegal state for QP!\n");
4023                 ret = -EINVAL;
4024                 goto out;
4025         }
4026
4027         /* When QP state is err, SQ and RQ WQE should be flushed */
4028         if (new_state == IB_QPS_ERR) {
4029                 roce_set_field(context->byte_160_sq_ci_pi,
4030                                V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
4031                                V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
4032                                hr_qp->sq.head);
4033                 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
4034                                V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
4035                                V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
4036
4037                 if (!ibqp->srq) {
4038                         roce_set_field(context->byte_84_rq_ci_pi,
4039                                V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4040                                V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
4041                                hr_qp->rq.head);
4042                         roce_set_field(qpc_mask->byte_84_rq_ci_pi,
4043                                V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4044                                V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
4045                 }
4046         }
4047
4048         if (attr_mask & IB_QP_AV) {
4049                 const struct ib_global_route *grh =
4050                                             rdma_ah_read_grh(&attr->ah_attr);
4051                 const struct ib_gid_attr *gid_attr = NULL;
4052                 int is_roce_protocol;
4053                 u16 vlan = 0xffff;
4054                 u8 ib_port;
4055                 u8 hr_port;
4056
4057                 ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num :
4058                            hr_qp->port + 1;
4059                 hr_port = ib_port - 1;
4060                 is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
4061                                rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
4062
4063                 if (is_roce_protocol) {
4064                         gid_attr = attr->ah_attr.grh.sgid_attr;
4065                         ret = rdma_read_gid_l2_fields(gid_attr, &vlan, NULL);
4066                         if (ret)
4067                                 goto out;
4068                 }
4069
4070                 if (vlan < VLAN_CFI_MASK) {
4071                         roce_set_bit(context->byte_76_srqn_op_en,
4072                                      V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
4073                         roce_set_bit(qpc_mask->byte_76_srqn_op_en,
4074                                      V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0);
4075                         roce_set_bit(context->byte_168_irrl_idx,
4076                                      V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1);
4077                         roce_set_bit(qpc_mask->byte_168_irrl_idx,
4078                                      V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
4079                 }
4080
4081                 roce_set_field(context->byte_24_mtu_tc,
4082                                V2_QPC_BYTE_24_VLAN_ID_M,
4083                                V2_QPC_BYTE_24_VLAN_ID_S, vlan);
4084                 roce_set_field(qpc_mask->byte_24_mtu_tc,
4085                                V2_QPC_BYTE_24_VLAN_ID_M,
4086                                V2_QPC_BYTE_24_VLAN_ID_S, 0);
4087
4088                 if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
4089                         dev_err(hr_dev->dev,
4090                                 "sgid_index(%u) too large. max is %d\n",
4091                                 grh->sgid_index,
4092                                 hr_dev->caps.gid_table_len[hr_port]);
4093                         ret = -EINVAL;
4094                         goto out;
4095                 }
4096
4097                 if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
4098                         dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n");
4099                         ret = -EINVAL;
4100                         goto out;
4101                 }
4102
4103                 roce_set_field(context->byte_52_udpspn_dmac,
4104                            V2_QPC_BYTE_52_UDPSPN_M, V2_QPC_BYTE_52_UDPSPN_S,
4105                            (gid_attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) ?
4106                            0 : 0x12b7);
4107
4108                 roce_set_field(qpc_mask->byte_52_udpspn_dmac,
4109                                V2_QPC_BYTE_52_UDPSPN_M,
4110                                V2_QPC_BYTE_52_UDPSPN_S, 0);
4111
4112                 roce_set_field(context->byte_20_smac_sgid_idx,
4113                                V2_QPC_BYTE_20_SGID_IDX_M,
4114                                V2_QPC_BYTE_20_SGID_IDX_S, grh->sgid_index);
4115
4116                 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4117                                V2_QPC_BYTE_20_SGID_IDX_M,
4118                                V2_QPC_BYTE_20_SGID_IDX_S, 0);
4119
4120                 roce_set_field(context->byte_24_mtu_tc,
4121                                V2_QPC_BYTE_24_HOP_LIMIT_M,
4122                                V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
4123                 roce_set_field(qpc_mask->byte_24_mtu_tc,
4124                                V2_QPC_BYTE_24_HOP_LIMIT_M,
4125                                V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
4126
4127                 if (hr_dev->pci_dev->revision == 0x21 &&
4128                     gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
4129                         roce_set_field(context->byte_24_mtu_tc,
4130                                        V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S,
4131                                        grh->traffic_class >> 2);
4132                 else
4133                         roce_set_field(context->byte_24_mtu_tc,
4134                                        V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S,
4135                                        grh->traffic_class);
4136                 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4137                                V2_QPC_BYTE_24_TC_S, 0);
4138                 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4139                                V2_QPC_BYTE_28_FL_S, grh->flow_label);
4140                 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4141                                V2_QPC_BYTE_28_FL_S, 0);
4142                 memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
4143                 memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
4144                 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4145                                V2_QPC_BYTE_28_SL_S,
4146                                rdma_ah_get_sl(&attr->ah_attr));
4147                 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4148                                V2_QPC_BYTE_28_SL_S, 0);
4149                 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
4150         }
4151
4152         if (attr_mask & IB_QP_TIMEOUT) {
4153                 if (attr->timeout < 31) {
4154                         roce_set_field(context->byte_28_at_fl,
4155                                        V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
4156                                        attr->timeout);
4157                         roce_set_field(qpc_mask->byte_28_at_fl,
4158                                        V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
4159                                        0);
4160                 } else {
4161                         dev_warn(dev, "Local ACK timeout shall be 0 to 30.\n");
4162                 }
4163         }
4164
4165         if (attr_mask & IB_QP_RETRY_CNT) {
4166                 roce_set_field(context->byte_212_lsn,
4167                                V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
4168                                V2_QPC_BYTE_212_RETRY_NUM_INIT_S,
4169                                attr->retry_cnt);
4170                 roce_set_field(qpc_mask->byte_212_lsn,
4171                                V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
4172                                V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
4173
4174                 roce_set_field(context->byte_212_lsn,
4175                                V2_QPC_BYTE_212_RETRY_CNT_M,
4176                                V2_QPC_BYTE_212_RETRY_CNT_S,
4177                                attr->retry_cnt);
4178                 roce_set_field(qpc_mask->byte_212_lsn,
4179                                V2_QPC_BYTE_212_RETRY_CNT_M,
4180                                V2_QPC_BYTE_212_RETRY_CNT_S, 0);
4181         }
4182
4183         if (attr_mask & IB_QP_RNR_RETRY) {
4184                 roce_set_field(context->byte_244_rnr_rxack,
4185                                V2_QPC_BYTE_244_RNR_NUM_INIT_M,
4186                                V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
4187                 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4188                                V2_QPC_BYTE_244_RNR_NUM_INIT_M,
4189                                V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
4190
4191                 roce_set_field(context->byte_244_rnr_rxack,
4192                                V2_QPC_BYTE_244_RNR_CNT_M,
4193                                V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
4194                 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4195                                V2_QPC_BYTE_244_RNR_CNT_M,
4196                                V2_QPC_BYTE_244_RNR_CNT_S, 0);
4197         }
4198
4199         if (attr_mask & IB_QP_SQ_PSN) {
4200                 roce_set_field(context->byte_172_sq_psn,
4201                                V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4202                                V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
4203                 roce_set_field(qpc_mask->byte_172_sq_psn,
4204                                V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4205                                V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
4206
4207                 roce_set_field(context->byte_196_sq_psn,
4208                                V2_QPC_BYTE_196_SQ_MAX_PSN_M,
4209                                V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
4210                 roce_set_field(qpc_mask->byte_196_sq_psn,
4211                                V2_QPC_BYTE_196_SQ_MAX_PSN_M,
4212                                V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
4213
4214                 roce_set_field(context->byte_220_retry_psn_msn,
4215                                V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
4216                                V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
4217                 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
4218                                V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
4219                                V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
4220
4221                 roce_set_field(context->byte_224_retry_msg,
4222                                V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
4223                                V2_QPC_BYTE_224_RETRY_MSG_PSN_S,
4224                                attr->sq_psn >> V2_QPC_BYTE_220_RETRY_MSG_PSN_S);
4225                 roce_set_field(qpc_mask->byte_224_retry_msg,
4226                                V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
4227                                V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
4228
4229                 roce_set_field(context->byte_224_retry_msg,
4230                                V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
4231                                V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S,
4232                                attr->sq_psn);
4233                 roce_set_field(qpc_mask->byte_224_retry_msg,
4234                                V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
4235                                V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
4236
4237                 roce_set_field(context->byte_244_rnr_rxack,
4238                                V2_QPC_BYTE_244_RX_ACK_EPSN_M,
4239                                V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
4240                 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4241                                V2_QPC_BYTE_244_RX_ACK_EPSN_M,
4242                                V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
4243         }
4244
4245         if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
4246              attr->max_dest_rd_atomic) {
4247                 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
4248                                V2_QPC_BYTE_140_RR_MAX_S,
4249                                fls(attr->max_dest_rd_atomic - 1));
4250                 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
4251                                V2_QPC_BYTE_140_RR_MAX_S, 0);
4252         }
4253
4254         if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
4255                 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
4256                                V2_QPC_BYTE_208_SR_MAX_S,
4257                                fls(attr->max_rd_atomic - 1));
4258                 roce_set_field(qpc_mask->byte_208_irrl,
4259                                V2_QPC_BYTE_208_SR_MAX_M,
4260                                V2_QPC_BYTE_208_SR_MAX_S, 0);
4261         }
4262
4263         if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
4264                 set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
4265
4266         if (attr_mask & IB_QP_MIN_RNR_TIMER) {
4267                 roce_set_field(context->byte_80_rnr_rx_cqn,
4268                                V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4269                                V2_QPC_BYTE_80_MIN_RNR_TIME_S,
4270                                attr->min_rnr_timer);
4271                 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
4272                                V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4273                                V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
4274         }
4275
4276         /* RC&UC required attr */
4277         if (attr_mask & IB_QP_RQ_PSN) {
4278                 roce_set_field(context->byte_108_rx_reqepsn,
4279                                V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4280                                V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
4281                 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
4282                                V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4283                                V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
4284
4285                 roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
4286                                V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
4287                 roce_set_field(qpc_mask->byte_152_raq,
4288                                V2_QPC_BYTE_152_RAQ_PSN_M,
4289                                V2_QPC_BYTE_152_RAQ_PSN_S, 0);
4290         }
4291
4292         if (attr_mask & IB_QP_QKEY) {
4293                 context->qkey_xrcd = attr->qkey;
4294                 qpc_mask->qkey_xrcd = 0;
4295                 hr_qp->qkey = attr->qkey;
4296         }
4297
4298         roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
4299                      ibqp->srq ? 1 : 0);
4300         roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
4301                      V2_QPC_BYTE_108_INV_CREDIT_S, 0);
4302
4303         /* Every status migrate must change state */
4304         roce_set_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
4305                        V2_QPC_BYTE_60_QP_ST_S, new_state);
4306         roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
4307                        V2_QPC_BYTE_60_QP_ST_S, 0);
4308
4309         /* SW pass context to HW */
4310         ret = hns_roce_v2_qp_modify(hr_dev, cur_state, new_state,
4311                                     context, hr_qp);
4312         if (ret) {
4313                 dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret);
4314                 goto out;
4315         }
4316
4317         hr_qp->state = new_state;
4318
4319         if (attr_mask & IB_QP_ACCESS_FLAGS)
4320                 hr_qp->atomic_rd_en = attr->qp_access_flags;
4321
4322         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
4323                 hr_qp->resp_depth = attr->max_dest_rd_atomic;
4324         if (attr_mask & IB_QP_PORT) {
4325                 hr_qp->port = attr->port_num - 1;
4326                 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
4327         }
4328
4329         if (new_state == IB_QPS_RESET && !ibqp->uobject) {
4330                 hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
4331                                      ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
4332                 if (ibqp->send_cq != ibqp->recv_cq)
4333                         hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
4334                                              hr_qp->qpn, NULL);
4335
4336                 hr_qp->rq.head = 0;
4337                 hr_qp->rq.tail = 0;
4338                 hr_qp->sq.head = 0;
4339                 hr_qp->sq.tail = 0;
4340                 hr_qp->sq_next_wqe = 0;
4341                 hr_qp->next_sge = 0;
4342                 if (hr_qp->rq.wqe_cnt)
4343                         *hr_qp->rdb.db_record = 0;
4344         }
4345
4346 out:
4347         kfree(context);
4348         return ret;
4349 }
4350
4351 static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
4352 {
4353         switch (state) {
4354         case HNS_ROCE_QP_ST_RST:        return IB_QPS_RESET;
4355         case HNS_ROCE_QP_ST_INIT:       return IB_QPS_INIT;
4356         case HNS_ROCE_QP_ST_RTR:        return IB_QPS_RTR;
4357         case HNS_ROCE_QP_ST_RTS:        return IB_QPS_RTS;
4358         case HNS_ROCE_QP_ST_SQ_DRAINING:
4359         case HNS_ROCE_QP_ST_SQD:        return IB_QPS_SQD;
4360         case HNS_ROCE_QP_ST_SQER:       return IB_QPS_SQE;
4361         case HNS_ROCE_QP_ST_ERR:        return IB_QPS_ERR;
4362         default:                        return -1;
4363         }
4364 }
4365
4366 static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
4367                                  struct hns_roce_qp *hr_qp,
4368                                  struct hns_roce_v2_qp_context *hr_context)
4369 {
4370         struct hns_roce_cmd_mailbox *mailbox;
4371         int ret;
4372
4373         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4374         if (IS_ERR(mailbox))
4375                 return PTR_ERR(mailbox);
4376
4377         ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
4378                                 HNS_ROCE_CMD_QUERY_QPC,
4379                                 HNS_ROCE_CMD_TIMEOUT_MSECS);
4380         if (ret) {
4381                 dev_err(hr_dev->dev, "QUERY QP cmd process error\n");
4382                 goto out;
4383         }
4384
4385         memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
4386
4387 out:
4388         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4389         return ret;
4390 }
4391
4392 static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
4393                                 int qp_attr_mask,
4394                                 struct ib_qp_init_attr *qp_init_attr)
4395 {
4396         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4397         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4398         struct hns_roce_v2_qp_context *context;
4399         struct device *dev = hr_dev->dev;
4400         int tmp_qp_state;
4401         int state;
4402         int ret;
4403
4404         context = kzalloc(sizeof(*context), GFP_KERNEL);
4405         if (!context)
4406                 return -ENOMEM;
4407
4408         memset(qp_attr, 0, sizeof(*qp_attr));
4409         memset(qp_init_attr, 0, sizeof(*qp_init_attr));
4410
4411         mutex_lock(&hr_qp->mutex);
4412
4413         if (hr_qp->state == IB_QPS_RESET) {
4414                 qp_attr->qp_state = IB_QPS_RESET;
4415                 ret = 0;
4416                 goto done;
4417         }
4418
4419         ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, context);
4420         if (ret) {
4421                 dev_err(dev, "query qpc error\n");
4422                 ret = -EINVAL;
4423                 goto out;
4424         }
4425
4426         state = roce_get_field(context->byte_60_qpst_tempid,
4427                                V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
4428         tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
4429         if (tmp_qp_state == -1) {
4430                 dev_err(dev, "Illegal ib_qp_state\n");
4431                 ret = -EINVAL;
4432                 goto out;
4433         }
4434         hr_qp->state = (u8)tmp_qp_state;
4435         qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
4436         qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->byte_24_mtu_tc,
4437                                                         V2_QPC_BYTE_24_MTU_M,
4438                                                         V2_QPC_BYTE_24_MTU_S);
4439         qp_attr->path_mig_state = IB_MIG_ARMED;
4440         qp_attr->ah_attr.type   = RDMA_AH_ATTR_TYPE_ROCE;
4441         if (hr_qp->ibqp.qp_type == IB_QPT_UD)
4442                 qp_attr->qkey = V2_QKEY_VAL;
4443
4444         qp_attr->rq_psn = roce_get_field(context->byte_108_rx_reqepsn,
4445                                          V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4446                                          V2_QPC_BYTE_108_RX_REQ_EPSN_S);
4447         qp_attr->sq_psn = (u32)roce_get_field(context->byte_172_sq_psn,
4448                                               V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4449                                               V2_QPC_BYTE_172_SQ_CUR_PSN_S);
4450         qp_attr->dest_qp_num = (u8)roce_get_field(context->byte_56_dqpn_err,
4451                                                   V2_QPC_BYTE_56_DQPN_M,
4452                                                   V2_QPC_BYTE_56_DQPN_S);
4453         qp_attr->qp_access_flags = ((roce_get_bit(context->byte_76_srqn_op_en,
4454                                     V2_QPC_BYTE_76_RRE_S)) << V2_QP_RWE_S) |
4455                                     ((roce_get_bit(context->byte_76_srqn_op_en,
4456                                     V2_QPC_BYTE_76_RWE_S)) << V2_QP_RRE_S) |
4457                                     ((roce_get_bit(context->byte_76_srqn_op_en,
4458                                     V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S);
4459
4460         if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
4461             hr_qp->ibqp.qp_type == IB_QPT_UC) {
4462                 struct ib_global_route *grh =
4463                                 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
4464
4465                 rdma_ah_set_sl(&qp_attr->ah_attr,
4466                                roce_get_field(context->byte_28_at_fl,
4467                                               V2_QPC_BYTE_28_SL_M,
4468                                               V2_QPC_BYTE_28_SL_S));
4469                 grh->flow_label = roce_get_field(context->byte_28_at_fl,
4470                                                  V2_QPC_BYTE_28_FL_M,
4471                                                  V2_QPC_BYTE_28_FL_S);
4472                 grh->sgid_index = roce_get_field(context->byte_20_smac_sgid_idx,
4473                                                  V2_QPC_BYTE_20_SGID_IDX_M,
4474                                                  V2_QPC_BYTE_20_SGID_IDX_S);
4475                 grh->hop_limit = roce_get_field(context->byte_24_mtu_tc,
4476                                                 V2_QPC_BYTE_24_HOP_LIMIT_M,
4477                                                 V2_QPC_BYTE_24_HOP_LIMIT_S);
4478                 grh->traffic_class = roce_get_field(context->byte_24_mtu_tc,
4479                                                     V2_QPC_BYTE_24_TC_M,
4480                                                     V2_QPC_BYTE_24_TC_S);
4481
4482                 memcpy(grh->dgid.raw, context->dgid, sizeof(grh->dgid.raw));
4483         }
4484
4485         qp_attr->port_num = hr_qp->port + 1;
4486         qp_attr->sq_draining = 0;
4487         qp_attr->max_rd_atomic = 1 << roce_get_field(context->byte_208_irrl,
4488                                                      V2_QPC_BYTE_208_SR_MAX_M,
4489                                                      V2_QPC_BYTE_208_SR_MAX_S);
4490         qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->byte_140_raq,
4491                                                      V2_QPC_BYTE_140_RR_MAX_M,
4492                                                      V2_QPC_BYTE_140_RR_MAX_S);
4493         qp_attr->min_rnr_timer = (u8)roce_get_field(context->byte_80_rnr_rx_cqn,
4494                                                  V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4495                                                  V2_QPC_BYTE_80_MIN_RNR_TIME_S);
4496         qp_attr->timeout = (u8)roce_get_field(context->byte_28_at_fl,
4497                                               V2_QPC_BYTE_28_AT_M,
4498                                               V2_QPC_BYTE_28_AT_S);
4499         qp_attr->retry_cnt = roce_get_field(context->byte_212_lsn,
4500                                             V2_QPC_BYTE_212_RETRY_CNT_M,
4501                                             V2_QPC_BYTE_212_RETRY_CNT_S);
4502         qp_attr->rnr_retry = context->rq_rnr_timer;
4503
4504 done:
4505         qp_attr->cur_qp_state = qp_attr->qp_state;
4506         qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
4507         qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
4508
4509         if (!ibqp->uobject) {
4510                 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
4511                 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
4512         } else {
4513                 qp_attr->cap.max_send_wr = 0;
4514                 qp_attr->cap.max_send_sge = 0;
4515         }
4516
4517         qp_init_attr->cap = qp_attr->cap;
4518
4519 out:
4520         mutex_unlock(&hr_qp->mutex);
4521         kfree(context);
4522         return ret;
4523 }
4524
4525 static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
4526                                          struct hns_roce_qp *hr_qp,
4527                                          struct ib_udata *udata)
4528 {
4529         struct hns_roce_cq *send_cq, *recv_cq;
4530         struct device *dev = hr_dev->dev;
4531         int ret;
4532
4533         if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
4534                 /* Modify qp to reset before destroying qp */
4535                 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
4536                                             hr_qp->state, IB_QPS_RESET);
4537                 if (ret) {
4538                         dev_err(dev, "modify QP %06lx to ERR failed.\n",
4539                                 hr_qp->qpn);
4540                         return ret;
4541                 }
4542         }
4543
4544         send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
4545         recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
4546
4547         hns_roce_lock_cqs(send_cq, recv_cq);
4548
4549         if (!udata) {
4550                 __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
4551                                        to_hr_srq(hr_qp->ibqp.srq) : NULL);
4552                 if (send_cq != recv_cq)
4553                         __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
4554         }
4555
4556         hns_roce_qp_remove(hr_dev, hr_qp);
4557
4558         hns_roce_unlock_cqs(send_cq, recv_cq);
4559
4560         hns_roce_qp_free(hr_dev, hr_qp);
4561
4562         /* Not special_QP, free their QPN */
4563         if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
4564             (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
4565             (hr_qp->ibqp.qp_type == IB_QPT_UD))
4566                 hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
4567
4568         hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
4569
4570         if (udata) {
4571                 struct hns_roce_ucontext *context =
4572                         rdma_udata_to_drv_context(
4573                                 udata,
4574                                 struct hns_roce_ucontext,
4575                                 ibucontext);
4576
4577                 if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1))
4578                         hns_roce_db_unmap_user(context, &hr_qp->sdb);
4579
4580                 if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
4581                         hns_roce_db_unmap_user(context, &hr_qp->rdb);
4582         } else {
4583                 kfree(hr_qp->sq.wrid);
4584                 kfree(hr_qp->rq.wrid);
4585                 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
4586                 if (hr_qp->rq.wqe_cnt)
4587                         hns_roce_free_db(hr_dev, &hr_qp->rdb);
4588         }
4589         ib_umem_release(hr_qp->umem);
4590
4591         if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
4592              hr_qp->rq.wqe_cnt) {
4593                 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
4594                 kfree(hr_qp->rq_inl_buf.wqe_list);
4595         }
4596
4597         return 0;
4598 }
4599
4600 static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
4601 {
4602         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4603         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4604         int ret;
4605
4606         ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
4607         if (ret) {
4608                 dev_err(hr_dev->dev, "Destroy qp failed(%d)\n", ret);
4609                 return ret;
4610         }
4611
4612         if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
4613                 kfree(hr_to_hr_sqp(hr_qp));
4614         else
4615                 kfree(hr_qp);
4616
4617         return 0;
4618 }
4619
4620 static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
4621                                                 struct hns_roce_qp *hr_qp)
4622 {
4623         struct hns_roce_sccc_clr_done *resp;
4624         struct hns_roce_sccc_clr *clr;
4625         struct hns_roce_cmq_desc desc;
4626         int ret, i;
4627
4628         mutex_lock(&hr_dev->qp_table.scc_mutex);
4629
4630         /* set scc ctx clear done flag */
4631         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
4632         ret =  hns_roce_cmq_send(hr_dev, &desc, 1);
4633         if (ret) {
4634                 dev_err(hr_dev->dev, "Reset SCC ctx  failed(%d)\n", ret);
4635                 goto out;
4636         }
4637
4638         /* clear scc context */
4639         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false);
4640         clr = (struct hns_roce_sccc_clr *)desc.data;
4641         clr->qpn = cpu_to_le32(hr_qp->qpn);
4642         ret =  hns_roce_cmq_send(hr_dev, &desc, 1);
4643         if (ret) {
4644                 dev_err(hr_dev->dev, "Clear SCC ctx failed(%d)\n", ret);
4645                 goto out;
4646         }
4647
4648         /* query scc context clear is done or not */
4649         resp = (struct hns_roce_sccc_clr_done *)desc.data;
4650         for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
4651                 hns_roce_cmq_setup_basic_desc(&desc,
4652                                               HNS_ROCE_OPC_QUERY_SCCC, true);
4653                 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
4654                 if (ret) {
4655                         dev_err(hr_dev->dev, "Query clr cmq failed(%d)\n", ret);
4656                         goto out;
4657                 }
4658
4659                 if (resp->clr_done)
4660                         goto out;
4661
4662                 msleep(20);
4663         }
4664
4665         dev_err(hr_dev->dev, "Query SCC clr done flag overtime.\n");
4666         ret = -ETIMEDOUT;
4667
4668 out:
4669         mutex_unlock(&hr_dev->qp_table.scc_mutex);
4670         return ret;
4671 }
4672
4673 static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
4674 {
4675         struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
4676         struct hns_roce_v2_cq_context *cq_context;
4677         struct hns_roce_cq *hr_cq = to_hr_cq(cq);
4678         struct hns_roce_v2_cq_context *cqc_mask;
4679         struct hns_roce_cmd_mailbox *mailbox;
4680         int ret;
4681
4682         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4683         if (IS_ERR(mailbox))
4684                 return PTR_ERR(mailbox);
4685
4686         cq_context = mailbox->buf;
4687         cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
4688
4689         memset(cqc_mask, 0xff, sizeof(*cqc_mask));
4690
4691         roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
4692                        V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
4693                        cq_count);
4694         roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
4695                        V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
4696                        0);
4697         roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
4698                        V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
4699                        cq_period);
4700         roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
4701                        V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
4702                        0);
4703
4704         ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1,
4705                                 HNS_ROCE_CMD_MODIFY_CQC,
4706                                 HNS_ROCE_CMD_TIMEOUT_MSECS);
4707         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4708         if (ret)
4709                 dev_err(hr_dev->dev, "MODIFY CQ Failed to cmd mailbox.\n");
4710
4711         return ret;
4712 }
4713
4714 static void hns_roce_set_qps_to_err(struct hns_roce_dev *hr_dev, u32 qpn)
4715 {
4716         struct hns_roce_qp *hr_qp;
4717         struct ib_qp_attr attr;
4718         int attr_mask;
4719         int ret;
4720
4721         hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
4722         if (!hr_qp) {
4723                 dev_warn(hr_dev->dev, "no hr_qp can be found!\n");
4724                 return;
4725         }
4726
4727         if (hr_qp->ibqp.uobject) {
4728                 if (hr_qp->sdb_en == 1) {
4729                         hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
4730                         if (hr_qp->rdb_en == 1)
4731                                 hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
4732                 } else {
4733                         dev_warn(hr_dev->dev, "flush cqe is unsupported in userspace!\n");
4734                         return;
4735                 }
4736         }
4737
4738         attr_mask = IB_QP_STATE;
4739         attr.qp_state = IB_QPS_ERR;
4740         ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr, attr_mask,
4741                                     hr_qp->state, IB_QPS_ERR);
4742         if (ret)
4743                 dev_err(hr_dev->dev, "failed to modify qp %d to err state.\n",
4744                         qpn);
4745 }
4746
4747 static void hns_roce_irq_work_handle(struct work_struct *work)
4748 {
4749         struct hns_roce_work *irq_work =
4750                                 container_of(work, struct hns_roce_work, work);
4751         struct device *dev = irq_work->hr_dev->dev;
4752         u32 qpn = irq_work->qpn;
4753         u32 cqn = irq_work->cqn;
4754
4755         switch (irq_work->event_type) {
4756         case HNS_ROCE_EVENT_TYPE_PATH_MIG:
4757                 dev_info(dev, "Path migrated succeeded.\n");
4758                 break;
4759         case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
4760                 dev_warn(dev, "Path migration failed.\n");
4761                 break;
4762         case HNS_ROCE_EVENT_TYPE_COMM_EST:
4763                 break;
4764         case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
4765                 dev_warn(dev, "Send queue drained.\n");
4766                 break;
4767         case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
4768                 dev_err(dev, "Local work queue 0x%x catas error, sub_type:%d\n",
4769                         qpn, irq_work->sub_type);
4770                 hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
4771                 break;
4772         case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
4773                 dev_err(dev, "Invalid request local work queue 0x%x error.\n",
4774                         qpn);
4775                 hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
4776                 break;
4777         case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
4778                 dev_err(dev, "Local access violation work queue 0x%x error, sub_type:%d\n",
4779                         qpn, irq_work->sub_type);
4780                 hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
4781                 break;
4782         case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
4783                 dev_warn(dev, "SRQ limit reach.\n");
4784                 break;
4785         case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
4786                 dev_warn(dev, "SRQ last wqe reach.\n");
4787                 break;
4788         case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
4789                 dev_err(dev, "SRQ catas error.\n");
4790                 break;
4791         case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
4792                 dev_err(dev, "CQ 0x%x access err.\n", cqn);
4793                 break;
4794         case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
4795                 dev_warn(dev, "CQ 0x%x overflow\n", cqn);
4796                 break;
4797         case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
4798                 dev_warn(dev, "DB overflow.\n");
4799                 break;
4800         case HNS_ROCE_EVENT_TYPE_FLR:
4801                 dev_warn(dev, "Function level reset.\n");
4802                 break;
4803         default:
4804                 break;
4805         }
4806
4807         kfree(irq_work);
4808 }
4809
4810 static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
4811                                       struct hns_roce_eq *eq,
4812                                       u32 qpn, u32 cqn)
4813 {
4814         struct hns_roce_work *irq_work;
4815
4816         irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC);
4817         if (!irq_work)
4818                 return;
4819
4820         INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
4821         irq_work->hr_dev = hr_dev;
4822         irq_work->qpn = qpn;
4823         irq_work->cqn = cqn;
4824         irq_work->event_type = eq->event_type;
4825         irq_work->sub_type = eq->sub_type;
4826         queue_work(hr_dev->irq_workq, &(irq_work->work));
4827 }
4828
4829 static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
4830 {
4831         struct hns_roce_dev *hr_dev = eq->hr_dev;
4832         u32 doorbell[2];
4833
4834         doorbell[0] = 0;
4835         doorbell[1] = 0;
4836
4837         if (eq->type_flag == HNS_ROCE_AEQ) {
4838                 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
4839                                HNS_ROCE_V2_EQ_DB_CMD_S,
4840                                eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
4841                                HNS_ROCE_EQ_DB_CMD_AEQ :
4842                                HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
4843         } else {
4844                 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
4845                                HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
4846
4847                 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
4848                                HNS_ROCE_V2_EQ_DB_CMD_S,
4849                                eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
4850                                HNS_ROCE_EQ_DB_CMD_CEQ :
4851                                HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
4852         }
4853
4854         roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
4855                        HNS_ROCE_V2_EQ_DB_PARA_S,
4856                        (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
4857
4858         hns_roce_write64(hr_dev, doorbell, eq->doorbell);
4859 }
4860
4861 static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
4862 {
4863         u32 buf_chk_sz;
4864         unsigned long off;
4865
4866         buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4867         off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
4868
4869         return (struct hns_roce_aeqe *)((char *)(eq->buf_list->buf) +
4870                 off % buf_chk_sz);
4871 }
4872
4873 static struct hns_roce_aeqe *mhop_get_aeqe(struct hns_roce_eq *eq, u32 entry)
4874 {
4875         u32 buf_chk_sz;
4876         unsigned long off;
4877
4878         buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4879
4880         off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
4881
4882         if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
4883                 return (struct hns_roce_aeqe *)((u8 *)(eq->bt_l0) +
4884                         off % buf_chk_sz);
4885         else
4886                 return (struct hns_roce_aeqe *)((u8 *)
4887                         (eq->buf[off / buf_chk_sz]) + off % buf_chk_sz);
4888 }
4889
4890 static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
4891 {
4892         struct hns_roce_aeqe *aeqe;
4893
4894         if (!eq->hop_num)
4895                 aeqe = get_aeqe_v2(eq, eq->cons_index);
4896         else
4897                 aeqe = mhop_get_aeqe(eq, eq->cons_index);
4898
4899         return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
4900                 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
4901 }
4902
4903 static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
4904                                struct hns_roce_eq *eq)
4905 {
4906         struct device *dev = hr_dev->dev;
4907         struct hns_roce_aeqe *aeqe;
4908         int aeqe_found = 0;
4909         int event_type;
4910         int sub_type;
4911         u32 srqn;
4912         u32 qpn;
4913         u32 cqn;
4914
4915         while ((aeqe = next_aeqe_sw_v2(eq))) {
4916
4917                 /* Make sure we read AEQ entry after we have checked the
4918                  * ownership bit
4919                  */
4920                 dma_rmb();
4921
4922                 event_type = roce_get_field(aeqe->asyn,
4923                                             HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
4924                                             HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
4925                 sub_type = roce_get_field(aeqe->asyn,
4926                                           HNS_ROCE_V2_AEQE_SUB_TYPE_M,
4927                                           HNS_ROCE_V2_AEQE_SUB_TYPE_S);
4928                 qpn = roce_get_field(aeqe->event.qp_event.qp,
4929                                      HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
4930                                      HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
4931                 cqn = roce_get_field(aeqe->event.cq_event.cq,
4932                                      HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
4933                                      HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
4934                 srqn = roce_get_field(aeqe->event.srq_event.srq,
4935                                      HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
4936                                      HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
4937
4938                 switch (event_type) {
4939                 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
4940                 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
4941                 case HNS_ROCE_EVENT_TYPE_COMM_EST:
4942                 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
4943                 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
4944                 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
4945                 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
4946                 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
4947                         hns_roce_qp_event(hr_dev, qpn, event_type);
4948                         break;
4949                 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
4950                 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
4951                         hns_roce_srq_event(hr_dev, srqn, event_type);
4952                         break;
4953                 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
4954                 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
4955                         hns_roce_cq_event(hr_dev, cqn, event_type);
4956                         break;
4957                 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
4958                         break;
4959                 case HNS_ROCE_EVENT_TYPE_MB:
4960                         hns_roce_cmd_event(hr_dev,
4961                                         le16_to_cpu(aeqe->event.cmd.token),
4962                                         aeqe->event.cmd.status,
4963                                         le64_to_cpu(aeqe->event.cmd.out_param));
4964                         break;
4965                 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
4966                         break;
4967                 case HNS_ROCE_EVENT_TYPE_FLR:
4968                         break;
4969                 default:
4970                         dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
4971                                 event_type, eq->eqn, eq->cons_index);
4972                         break;
4973                 }
4974
4975                 eq->event_type = event_type;
4976                 eq->sub_type = sub_type;
4977                 ++eq->cons_index;
4978                 aeqe_found = 1;
4979
4980                 if (eq->cons_index > (2 * eq->entries - 1)) {
4981                         dev_warn(dev, "cons_index overflow, set back to 0.\n");
4982                         eq->cons_index = 0;
4983                 }
4984                 hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn);
4985         }
4986
4987         set_eq_cons_index_v2(eq);
4988         return aeqe_found;
4989 }
4990
4991 static struct hns_roce_ceqe *get_ceqe_v2(struct hns_roce_eq *eq, u32 entry)
4992 {
4993         u32 buf_chk_sz;
4994         unsigned long off;
4995
4996         buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4997         off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
4998
4999         return (struct hns_roce_ceqe *)((char *)(eq->buf_list->buf) +
5000                 off % buf_chk_sz);
5001 }
5002
5003 static struct hns_roce_ceqe *mhop_get_ceqe(struct hns_roce_eq *eq, u32 entry)
5004 {
5005         u32 buf_chk_sz;
5006         unsigned long off;
5007
5008         buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
5009
5010         off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
5011
5012         if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
5013                 return (struct hns_roce_ceqe *)((u8 *)(eq->bt_l0) +
5014                         off % buf_chk_sz);
5015         else
5016                 return (struct hns_roce_ceqe *)((u8 *)(eq->buf[off /
5017                         buf_chk_sz]) + off % buf_chk_sz);
5018 }
5019
5020 static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
5021 {
5022         struct hns_roce_ceqe *ceqe;
5023
5024         if (!eq->hop_num)
5025                 ceqe = get_ceqe_v2(eq, eq->cons_index);
5026         else
5027                 ceqe = mhop_get_ceqe(eq, eq->cons_index);
5028
5029         return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
5030                 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
5031 }
5032
5033 static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
5034                                struct hns_roce_eq *eq)
5035 {
5036         struct device *dev = hr_dev->dev;
5037         struct hns_roce_ceqe *ceqe;
5038         int ceqe_found = 0;
5039         u32 cqn;
5040
5041         while ((ceqe = next_ceqe_sw_v2(eq))) {
5042
5043                 /* Make sure we read CEQ entry after we have checked the
5044                  * ownership bit
5045                  */
5046                 dma_rmb();
5047
5048                 cqn = roce_get_field(ceqe->comp,
5049                                      HNS_ROCE_V2_CEQE_COMP_CQN_M,
5050                                      HNS_ROCE_V2_CEQE_COMP_CQN_S);
5051
5052                 hns_roce_cq_completion(hr_dev, cqn);
5053
5054                 ++eq->cons_index;
5055                 ceqe_found = 1;
5056
5057                 if (eq->cons_index > (2 * eq->entries - 1)) {
5058                         dev_warn(dev, "cons_index overflow, set back to 0.\n");
5059                         eq->cons_index = 0;
5060                 }
5061         }
5062
5063         set_eq_cons_index_v2(eq);
5064
5065         return ceqe_found;
5066 }
5067
5068 static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
5069 {
5070         struct hns_roce_eq *eq = eq_ptr;
5071         struct hns_roce_dev *hr_dev = eq->hr_dev;
5072         int int_work = 0;
5073
5074         if (eq->type_flag == HNS_ROCE_CEQ)
5075                 /* Completion event interrupt */
5076                 int_work = hns_roce_v2_ceq_int(hr_dev, eq);
5077         else
5078                 /* Asychronous event interrupt */
5079                 int_work = hns_roce_v2_aeq_int(hr_dev, eq);
5080
5081         return IRQ_RETVAL(int_work);
5082 }
5083
5084 static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
5085 {
5086         struct hns_roce_dev *hr_dev = dev_id;
5087         struct device *dev = hr_dev->dev;
5088         int int_work = 0;
5089         u32 int_st;
5090         u32 int_en;
5091
5092         /* Abnormal interrupt */
5093         int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
5094         int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
5095
5096         if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
5097                 struct pci_dev *pdev = hr_dev->pci_dev;
5098                 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
5099                 const struct hnae3_ae_ops *ops = ae_dev->ops;
5100
5101                 dev_err(dev, "AEQ overflow!\n");
5102
5103                 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S, 1);
5104                 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5105
5106                 /* Set reset level for reset_event() */
5107                 if (ops->set_default_reset_request)
5108                         ops->set_default_reset_request(ae_dev,
5109                                                        HNAE3_FUNC_RESET);
5110                 if (ops->reset_event)
5111                         ops->reset_event(pdev, NULL);
5112
5113                 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
5114                 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5115
5116                 int_work = 1;
5117         } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
5118                 dev_err(dev, "BUS ERR!\n");
5119
5120                 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S, 1);
5121                 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5122
5123                 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
5124                 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5125
5126                 int_work = 1;
5127         } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
5128                 dev_err(dev, "OTHER ERR!\n");
5129
5130                 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S, 1);
5131                 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5132
5133                 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
5134                 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5135
5136                 int_work = 1;
5137         } else
5138                 dev_err(dev, "There is no abnormal irq found!\n");
5139
5140         return IRQ_RETVAL(int_work);
5141 }
5142
5143 static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
5144                                         int eq_num, int enable_flag)
5145 {
5146         int i;
5147
5148         if (enable_flag == EQ_ENABLE) {
5149                 for (i = 0; i < eq_num; i++)
5150                         roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5151                                    i * EQ_REG_OFFSET,
5152                                    HNS_ROCE_V2_VF_EVENT_INT_EN_M);
5153
5154                 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
5155                            HNS_ROCE_V2_VF_ABN_INT_EN_M);
5156                 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
5157                            HNS_ROCE_V2_VF_ABN_INT_CFG_M);
5158         } else {
5159                 for (i = 0; i < eq_num; i++)
5160                         roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5161                                    i * EQ_REG_OFFSET,
5162                                    HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
5163
5164                 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
5165                            HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
5166                 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
5167                            HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
5168         }
5169 }
5170
5171 static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
5172 {
5173         struct device *dev = hr_dev->dev;
5174         int ret;
5175
5176         if (eqn < hr_dev->caps.num_comp_vectors)
5177                 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5178                                         0, HNS_ROCE_CMD_DESTROY_CEQC,
5179                                         HNS_ROCE_CMD_TIMEOUT_MSECS);
5180         else
5181                 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5182                                         0, HNS_ROCE_CMD_DESTROY_AEQC,
5183                                         HNS_ROCE_CMD_TIMEOUT_MSECS);
5184         if (ret)
5185                 dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
5186 }
5187
5188 static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
5189                                   struct hns_roce_eq *eq)
5190 {
5191         struct device *dev = hr_dev->dev;
5192         u64 idx;
5193         u64 size;
5194         u32 buf_chk_sz;
5195         u32 bt_chk_sz;
5196         u32 mhop_num;
5197         int eqe_alloc;
5198         int i = 0;
5199         int j = 0;
5200
5201         mhop_num = hr_dev->caps.eqe_hop_num;
5202         buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
5203         bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
5204
5205         /* hop_num = 0 */
5206         if (mhop_num == HNS_ROCE_HOP_NUM_0) {
5207                 dma_free_coherent(dev, (unsigned int)(eq->entries *
5208                                   eq->eqe_size), eq->bt_l0, eq->l0_dma);
5209                 return;
5210         }
5211
5212         /* hop_num = 1 or hop = 2 */
5213         dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
5214         if (mhop_num == 1) {
5215                 for (i = 0; i < eq->l0_last_num; i++) {
5216                         if (i == eq->l0_last_num - 1) {
5217                                 eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
5218                                 size = (eq->entries - eqe_alloc) * eq->eqe_size;
5219                                 dma_free_coherent(dev, size, eq->buf[i],
5220                                                   eq->buf_dma[i]);
5221                                 break;
5222                         }
5223                         dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
5224                                           eq->buf_dma[i]);
5225                 }
5226         } else if (mhop_num == 2) {
5227                 for (i = 0; i < eq->l0_last_num; i++) {
5228                         dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
5229                                           eq->l1_dma[i]);
5230
5231                         for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
5232                                 idx = i * (bt_chk_sz / BA_BYTE_LEN) + j;
5233                                 if ((i == eq->l0_last_num - 1)
5234                                      && j == eq->l1_last_num - 1) {
5235                                         eqe_alloc = (buf_chk_sz / eq->eqe_size)
5236                                                     * idx;
5237                                         size = (eq->entries - eqe_alloc)
5238                                                 * eq->eqe_size;
5239                                         dma_free_coherent(dev, size,
5240                                                           eq->buf[idx],
5241                                                           eq->buf_dma[idx]);
5242                                         break;
5243                                 }
5244                                 dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
5245                                                   eq->buf_dma[idx]);
5246                         }
5247                 }
5248         }
5249         kfree(eq->buf_dma);
5250         kfree(eq->buf);
5251         kfree(eq->l1_dma);
5252         kfree(eq->bt_l1);
5253         eq->buf_dma = NULL;
5254         eq->buf = NULL;
5255         eq->l1_dma = NULL;
5256         eq->bt_l1 = NULL;
5257 }
5258
5259 static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
5260                                 struct hns_roce_eq *eq)
5261 {
5262         u32 buf_chk_sz;
5263
5264         buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
5265
5266         if (hr_dev->caps.eqe_hop_num) {
5267                 hns_roce_mhop_free_eq(hr_dev, eq);
5268                 return;
5269         }
5270
5271         dma_free_coherent(hr_dev->dev, buf_chk_sz, eq->buf_list->buf,
5272                           eq->buf_list->map);
5273         kfree(eq->buf_list);
5274 }
5275
5276 static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
5277                                 struct hns_roce_eq *eq,
5278                                 void *mb_buf)
5279 {
5280         struct hns_roce_eq_context *eqc;
5281
5282         eqc = mb_buf;
5283         memset(eqc, 0, sizeof(struct hns_roce_eq_context));
5284
5285         /* init eqc */
5286         eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
5287         eq->hop_num = hr_dev->caps.eqe_hop_num;
5288         eq->cons_index = 0;
5289         eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
5290         eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
5291         eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
5292         eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz;
5293         eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
5294         eq->shift = ilog2((unsigned int)eq->entries);
5295
5296         if (!eq->hop_num)
5297                 eq->eqe_ba = eq->buf_list->map;
5298         else
5299                 eq->eqe_ba = eq->l0_dma;
5300
5301         /* set eqc state */
5302         roce_set_field(eqc->byte_4,
5303                        HNS_ROCE_EQC_EQ_ST_M,
5304                        HNS_ROCE_EQC_EQ_ST_S,
5305                        HNS_ROCE_V2_EQ_STATE_VALID);
5306
5307         /* set eqe hop num */
5308         roce_set_field(eqc->byte_4,
5309                        HNS_ROCE_EQC_HOP_NUM_M,
5310                        HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
5311
5312         /* set eqc over_ignore */
5313         roce_set_field(eqc->byte_4,
5314                        HNS_ROCE_EQC_OVER_IGNORE_M,
5315                        HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
5316
5317         /* set eqc coalesce */
5318         roce_set_field(eqc->byte_4,
5319                        HNS_ROCE_EQC_COALESCE_M,
5320                        HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
5321
5322         /* set eqc arm_state */
5323         roce_set_field(eqc->byte_4,
5324                        HNS_ROCE_EQC_ARM_ST_M,
5325                        HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
5326
5327         /* set eqn */
5328         roce_set_field(eqc->byte_4,
5329                        HNS_ROCE_EQC_EQN_M,
5330                        HNS_ROCE_EQC_EQN_S, eq->eqn);
5331
5332         /* set eqe_cnt */
5333         roce_set_field(eqc->byte_4,
5334                        HNS_ROCE_EQC_EQE_CNT_M,
5335                        HNS_ROCE_EQC_EQE_CNT_S,
5336                        HNS_ROCE_EQ_INIT_EQE_CNT);
5337
5338         /* set eqe_ba_pg_sz */
5339         roce_set_field(eqc->byte_8,
5340                        HNS_ROCE_EQC_BA_PG_SZ_M,
5341                        HNS_ROCE_EQC_BA_PG_SZ_S,
5342                        eq->eqe_ba_pg_sz + PG_SHIFT_OFFSET);
5343
5344         /* set eqe_buf_pg_sz */
5345         roce_set_field(eqc->byte_8,
5346                        HNS_ROCE_EQC_BUF_PG_SZ_M,
5347                        HNS_ROCE_EQC_BUF_PG_SZ_S,
5348                        eq->eqe_buf_pg_sz + PG_SHIFT_OFFSET);
5349
5350         /* set eq_producer_idx */
5351         roce_set_field(eqc->byte_8,
5352                        HNS_ROCE_EQC_PROD_INDX_M,
5353                        HNS_ROCE_EQC_PROD_INDX_S,
5354                        HNS_ROCE_EQ_INIT_PROD_IDX);
5355
5356         /* set eq_max_cnt */
5357         roce_set_field(eqc->byte_12,
5358                        HNS_ROCE_EQC_MAX_CNT_M,
5359                        HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
5360
5361         /* set eq_period */
5362         roce_set_field(eqc->byte_12,
5363                        HNS_ROCE_EQC_PERIOD_M,
5364                        HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
5365
5366         /* set eqe_report_timer */
5367         roce_set_field(eqc->eqe_report_timer,
5368                        HNS_ROCE_EQC_REPORT_TIMER_M,
5369                        HNS_ROCE_EQC_REPORT_TIMER_S,
5370                        HNS_ROCE_EQ_INIT_REPORT_TIMER);
5371
5372         /* set eqe_ba [34:3] */
5373         roce_set_field(eqc->eqe_ba0,
5374                        HNS_ROCE_EQC_EQE_BA_L_M,
5375                        HNS_ROCE_EQC_EQE_BA_L_S, eq->eqe_ba >> 3);
5376
5377         /* set eqe_ba [64:35] */
5378         roce_set_field(eqc->eqe_ba1,
5379                        HNS_ROCE_EQC_EQE_BA_H_M,
5380                        HNS_ROCE_EQC_EQE_BA_H_S, eq->eqe_ba >> 35);
5381
5382         /* set eq shift */
5383         roce_set_field(eqc->byte_28,
5384                        HNS_ROCE_EQC_SHIFT_M,
5385                        HNS_ROCE_EQC_SHIFT_S, eq->shift);
5386
5387         /* set eq MSI_IDX */
5388         roce_set_field(eqc->byte_28,
5389                        HNS_ROCE_EQC_MSI_INDX_M,
5390                        HNS_ROCE_EQC_MSI_INDX_S,
5391                        HNS_ROCE_EQ_INIT_MSI_IDX);
5392
5393         /* set cur_eqe_ba [27:12] */
5394         roce_set_field(eqc->byte_28,
5395                        HNS_ROCE_EQC_CUR_EQE_BA_L_M,
5396                        HNS_ROCE_EQC_CUR_EQE_BA_L_S, eq->cur_eqe_ba >> 12);
5397
5398         /* set cur_eqe_ba [59:28] */
5399         roce_set_field(eqc->byte_32,
5400                        HNS_ROCE_EQC_CUR_EQE_BA_M_M,
5401                        HNS_ROCE_EQC_CUR_EQE_BA_M_S, eq->cur_eqe_ba >> 28);
5402
5403         /* set cur_eqe_ba [63:60] */
5404         roce_set_field(eqc->byte_36,
5405                        HNS_ROCE_EQC_CUR_EQE_BA_H_M,
5406                        HNS_ROCE_EQC_CUR_EQE_BA_H_S, eq->cur_eqe_ba >> 60);
5407
5408         /* set eq consumer idx */
5409         roce_set_field(eqc->byte_36,
5410                        HNS_ROCE_EQC_CONS_INDX_M,
5411                        HNS_ROCE_EQC_CONS_INDX_S,
5412                        HNS_ROCE_EQ_INIT_CONS_IDX);
5413
5414         /* set nex_eqe_ba[43:12] */
5415         roce_set_field(eqc->nxt_eqe_ba0,
5416                        HNS_ROCE_EQC_NXT_EQE_BA_L_M,
5417                        HNS_ROCE_EQC_NXT_EQE_BA_L_S, eq->nxt_eqe_ba >> 12);
5418
5419         /* set nex_eqe_ba[63:44] */
5420         roce_set_field(eqc->nxt_eqe_ba1,
5421                        HNS_ROCE_EQC_NXT_EQE_BA_H_M,
5422                        HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
5423 }
5424
5425 static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
5426                                   struct hns_roce_eq *eq)
5427 {
5428         struct device *dev = hr_dev->dev;
5429         int eq_alloc_done = 0;
5430         int eq_buf_cnt = 0;
5431         int eqe_alloc;
5432         u32 buf_chk_sz;
5433         u32 bt_chk_sz;
5434         u32 mhop_num;
5435         u64 size;
5436         u64 idx;
5437         int ba_num;
5438         int bt_num;
5439         int record_i;
5440         int record_j;
5441         int i = 0;
5442         int j = 0;
5443
5444         mhop_num = hr_dev->caps.eqe_hop_num;
5445         buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
5446         bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
5447
5448         ba_num = DIV_ROUND_UP(PAGE_ALIGN(eq->entries * eq->eqe_size),
5449                               buf_chk_sz);
5450         bt_num = DIV_ROUND_UP(ba_num, bt_chk_sz / BA_BYTE_LEN);
5451
5452         /* hop_num = 0 */
5453         if (mhop_num == HNS_ROCE_HOP_NUM_0) {
5454                 if (eq->entries > buf_chk_sz / eq->eqe_size) {
5455                         dev_err(dev, "eq entries %d is larger than buf_pg_sz!",
5456                                 eq->entries);
5457                         return -EINVAL;
5458                 }
5459                 eq->bt_l0 = dma_alloc_coherent(dev, eq->entries * eq->eqe_size,
5460                                                &(eq->l0_dma), GFP_KERNEL);
5461                 if (!eq->bt_l0)
5462                         return -ENOMEM;
5463
5464                 eq->cur_eqe_ba = eq->l0_dma;
5465                 eq->nxt_eqe_ba = 0;
5466
5467                 return 0;
5468         }
5469
5470         eq->buf_dma = kcalloc(ba_num, sizeof(*eq->buf_dma), GFP_KERNEL);
5471         if (!eq->buf_dma)
5472                 return -ENOMEM;
5473         eq->buf = kcalloc(ba_num, sizeof(*eq->buf), GFP_KERNEL);
5474         if (!eq->buf)
5475                 goto err_kcalloc_buf;
5476
5477         if (mhop_num == 2) {
5478                 eq->l1_dma = kcalloc(bt_num, sizeof(*eq->l1_dma), GFP_KERNEL);
5479                 if (!eq->l1_dma)
5480                         goto err_kcalloc_l1_dma;
5481
5482                 eq->bt_l1 = kcalloc(bt_num, sizeof(*eq->bt_l1), GFP_KERNEL);
5483                 if (!eq->bt_l1)
5484                         goto err_kcalloc_bt_l1;
5485         }
5486
5487         /* alloc L0 BT */
5488         eq->bt_l0 = dma_alloc_coherent(dev, bt_chk_sz, &eq->l0_dma, GFP_KERNEL);
5489         if (!eq->bt_l0)
5490                 goto err_dma_alloc_l0;
5491
5492         if (mhop_num == 1) {
5493                 if (ba_num > (bt_chk_sz / BA_BYTE_LEN))
5494                         dev_err(dev, "ba_num %d is too large for 1 hop\n",
5495                                 ba_num);
5496
5497                 /* alloc buf */
5498                 for (i = 0; i < bt_chk_sz / BA_BYTE_LEN; i++) {
5499                         if (eq_buf_cnt + 1 < ba_num) {
5500                                 size = buf_chk_sz;
5501                         } else {
5502                                 eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
5503                                 size = (eq->entries - eqe_alloc) * eq->eqe_size;
5504                         }
5505                         eq->buf[i] = dma_alloc_coherent(dev, size,
5506                                                         &(eq->buf_dma[i]),
5507                                                         GFP_KERNEL);
5508                         if (!eq->buf[i])
5509                                 goto err_dma_alloc_buf;
5510
5511                         *(eq->bt_l0 + i) = eq->buf_dma[i];
5512
5513                         eq_buf_cnt++;
5514                         if (eq_buf_cnt >= ba_num)
5515                                 break;
5516                 }
5517                 eq->cur_eqe_ba = eq->buf_dma[0];
5518                 eq->nxt_eqe_ba = eq->buf_dma[1];
5519
5520         } else if (mhop_num == 2) {
5521                 /* alloc L1 BT and buf */
5522                 for (i = 0; i < bt_chk_sz / BA_BYTE_LEN; i++) {
5523                         eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz,
5524                                                           &(eq->l1_dma[i]),
5525                                                           GFP_KERNEL);
5526                         if (!eq->bt_l1[i])
5527                                 goto err_dma_alloc_l1;
5528                         *(eq->bt_l0 + i) = eq->l1_dma[i];
5529
5530                         for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
5531                                 idx = i * bt_chk_sz / BA_BYTE_LEN + j;
5532                                 if (eq_buf_cnt + 1 < ba_num) {
5533                                         size = buf_chk_sz;
5534                                 } else {
5535                                         eqe_alloc = (buf_chk_sz / eq->eqe_size)
5536                                                     * idx;
5537                                         size = (eq->entries - eqe_alloc)
5538                                                 * eq->eqe_size;
5539                                 }
5540                                 eq->buf[idx] = dma_alloc_coherent(dev, size,
5541                                                                   &(eq->buf_dma[idx]),
5542                                                                   GFP_KERNEL);
5543                                 if (!eq->buf[idx])
5544                                         goto err_dma_alloc_buf;
5545
5546                                 *(eq->bt_l1[i] + j) = eq->buf_dma[idx];
5547
5548                                 eq_buf_cnt++;
5549                                 if (eq_buf_cnt >= ba_num) {
5550                                         eq_alloc_done = 1;
5551                                         break;
5552                                 }
5553                         }
5554
5555                         if (eq_alloc_done)
5556                                 break;
5557                 }
5558                 eq->cur_eqe_ba = eq->buf_dma[0];
5559                 eq->nxt_eqe_ba = eq->buf_dma[1];
5560         }
5561
5562         eq->l0_last_num = i + 1;
5563         if (mhop_num == 2)
5564                 eq->l1_last_num = j + 1;
5565
5566         return 0;
5567
5568 err_dma_alloc_l1:
5569         dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
5570         eq->bt_l0 = NULL;
5571         eq->l0_dma = 0;
5572         for (i -= 1; i >= 0; i--) {
5573                 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
5574                                   eq->l1_dma[i]);
5575
5576                 for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
5577                         idx = i * bt_chk_sz / BA_BYTE_LEN + j;
5578                         dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
5579                                           eq->buf_dma[idx]);
5580                 }
5581         }
5582         goto err_dma_alloc_l0;
5583
5584 err_dma_alloc_buf:
5585         dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
5586         eq->bt_l0 = NULL;
5587         eq->l0_dma = 0;
5588
5589         if (mhop_num == 1)
5590                 for (i -= 1; i >= 0; i--)
5591                         dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
5592                                           eq->buf_dma[i]);
5593         else if (mhop_num == 2) {
5594                 record_i = i;
5595                 record_j = j;
5596                 for (; i >= 0; i--) {
5597                         dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
5598                                           eq->l1_dma[i]);
5599
5600                         for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
5601                                 if (i == record_i && j >= record_j)
5602                                         break;
5603
5604                                 idx = i * bt_chk_sz / BA_BYTE_LEN + j;
5605                                 dma_free_coherent(dev, buf_chk_sz,
5606                                                   eq->buf[idx],
5607                                                   eq->buf_dma[idx]);
5608                         }
5609                 }
5610         }
5611
5612 err_dma_alloc_l0:
5613         kfree(eq->bt_l1);
5614         eq->bt_l1 = NULL;
5615
5616 err_kcalloc_bt_l1:
5617         kfree(eq->l1_dma);
5618         eq->l1_dma = NULL;
5619
5620 err_kcalloc_l1_dma:
5621         kfree(eq->buf);
5622         eq->buf = NULL;
5623
5624 err_kcalloc_buf:
5625         kfree(eq->buf_dma);
5626         eq->buf_dma = NULL;
5627
5628         return -ENOMEM;
5629 }
5630
5631 static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
5632                                  struct hns_roce_eq *eq,
5633                                  unsigned int eq_cmd)
5634 {
5635         struct device *dev = hr_dev->dev;
5636         struct hns_roce_cmd_mailbox *mailbox;
5637         u32 buf_chk_sz = 0;
5638         int ret;
5639
5640         /* Allocate mailbox memory */
5641         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5642         if (IS_ERR(mailbox))
5643                 return PTR_ERR(mailbox);
5644
5645         if (!hr_dev->caps.eqe_hop_num) {
5646                 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
5647
5648                 eq->buf_list = kzalloc(sizeof(struct hns_roce_buf_list),
5649                                        GFP_KERNEL);
5650                 if (!eq->buf_list) {
5651                         ret = -ENOMEM;
5652                         goto free_cmd_mbox;
5653                 }
5654
5655                 eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
5656                                                        &(eq->buf_list->map),
5657                                                        GFP_KERNEL);
5658                 if (!eq->buf_list->buf) {
5659                         ret = -ENOMEM;
5660                         goto err_alloc_buf;
5661                 }
5662
5663         } else {
5664                 ret = hns_roce_mhop_alloc_eq(hr_dev, eq);
5665                 if (ret) {
5666                         ret = -ENOMEM;
5667                         goto free_cmd_mbox;
5668                 }
5669         }
5670
5671         hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
5672
5673         ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
5674                                 eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
5675         if (ret) {
5676                 dev_err(dev, "[mailbox cmd] create eqc failed.\n");
5677                 goto err_cmd_mbox;
5678         }
5679
5680         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5681
5682         return 0;
5683
5684 err_cmd_mbox:
5685         if (!hr_dev->caps.eqe_hop_num)
5686                 dma_free_coherent(dev, buf_chk_sz, eq->buf_list->buf,
5687                                   eq->buf_list->map);
5688         else {
5689                 hns_roce_mhop_free_eq(hr_dev, eq);
5690                 goto free_cmd_mbox;
5691         }
5692
5693 err_alloc_buf:
5694         kfree(eq->buf_list);
5695
5696 free_cmd_mbox:
5697         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5698
5699         return ret;
5700 }
5701
5702 static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
5703 {
5704         struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5705         struct device *dev = hr_dev->dev;
5706         struct hns_roce_eq *eq;
5707         unsigned int eq_cmd;
5708         int irq_num;
5709         int eq_num;
5710         int other_num;
5711         int comp_num;
5712         int aeq_num;
5713         int i, j, k;
5714         int ret;
5715
5716         other_num = hr_dev->caps.num_other_vectors;
5717         comp_num = hr_dev->caps.num_comp_vectors;
5718         aeq_num = hr_dev->caps.num_aeq_vectors;
5719
5720         eq_num = comp_num + aeq_num;
5721         irq_num = eq_num + other_num;
5722
5723         eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
5724         if (!eq_table->eq)
5725                 return -ENOMEM;
5726
5727         for (i = 0; i < irq_num; i++) {
5728                 hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
5729                                                GFP_KERNEL);
5730                 if (!hr_dev->irq_names[i]) {
5731                         ret = -ENOMEM;
5732                         goto err_failed_kzalloc;
5733                 }
5734         }
5735
5736         /* create eq */
5737         for (j = 0; j < eq_num; j++) {
5738                 eq = &eq_table->eq[j];
5739                 eq->hr_dev = hr_dev;
5740                 eq->eqn = j;
5741                 if (j < comp_num) {
5742                         /* CEQ */
5743                         eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
5744                         eq->type_flag = HNS_ROCE_CEQ;
5745                         eq->entries = hr_dev->caps.ceqe_depth;
5746                         eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
5747                         eq->irq = hr_dev->irq[j + other_num + aeq_num];
5748                         eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
5749                         eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
5750                 } else {
5751                         /* AEQ */
5752                         eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
5753                         eq->type_flag = HNS_ROCE_AEQ;
5754                         eq->entries = hr_dev->caps.aeqe_depth;
5755                         eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
5756                         eq->irq = hr_dev->irq[j - comp_num + other_num];
5757                         eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
5758                         eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
5759                 }
5760
5761                 ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
5762                 if (ret) {
5763                         dev_err(dev, "eq create failed.\n");
5764                         goto err_create_eq_fail;
5765                 }
5766         }
5767
5768         /* enable irq */
5769         hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
5770
5771         /* irq contains: abnormal + AEQ + CEQ*/
5772         for (k = 0; k < irq_num; k++)
5773                 if (k < other_num)
5774                         snprintf((char *)hr_dev->irq_names[k],
5775                                  HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", k);
5776                 else if (k < (other_num + aeq_num))
5777                         snprintf((char *)hr_dev->irq_names[k],
5778                                  HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d",
5779                                  k - other_num);
5780                 else
5781                         snprintf((char *)hr_dev->irq_names[k],
5782                                  HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d",
5783                                  k - other_num - aeq_num);
5784
5785         for (k = 0; k < irq_num; k++) {
5786                 if (k < other_num)
5787                         ret = request_irq(hr_dev->irq[k],
5788                                           hns_roce_v2_msix_interrupt_abn,
5789                                           0, hr_dev->irq_names[k], hr_dev);
5790
5791                 else if (k < (other_num + comp_num))
5792                         ret = request_irq(eq_table->eq[k - other_num].irq,
5793                                           hns_roce_v2_msix_interrupt_eq,
5794                                           0, hr_dev->irq_names[k + aeq_num],
5795                                           &eq_table->eq[k - other_num]);
5796                 else
5797                         ret = request_irq(eq_table->eq[k - other_num].irq,
5798                                           hns_roce_v2_msix_interrupt_eq,
5799                                           0, hr_dev->irq_names[k - comp_num],
5800                                           &eq_table->eq[k - other_num]);
5801                 if (ret) {
5802                         dev_err(dev, "Request irq error!\n");
5803                         goto err_request_irq_fail;
5804                 }
5805         }
5806
5807         hr_dev->irq_workq =
5808                 create_singlethread_workqueue("hns_roce_irq_workqueue");
5809         if (!hr_dev->irq_workq) {
5810                 dev_err(dev, "Create irq workqueue failed!\n");
5811                 ret = -ENOMEM;
5812                 goto err_request_irq_fail;
5813         }
5814
5815         return 0;
5816
5817 err_request_irq_fail:
5818         for (k -= 1; k >= 0; k--)
5819                 if (k < other_num)
5820                         free_irq(hr_dev->irq[k], hr_dev);
5821                 else
5822                         free_irq(eq_table->eq[k - other_num].irq,
5823                                  &eq_table->eq[k - other_num]);
5824
5825 err_create_eq_fail:
5826         for (j -= 1; j >= 0; j--)
5827                 hns_roce_v2_free_eq(hr_dev, &eq_table->eq[j]);
5828
5829 err_failed_kzalloc:
5830         for (i -= 1; i >= 0; i--)
5831                 kfree(hr_dev->irq_names[i]);
5832         kfree(eq_table->eq);
5833
5834         return ret;
5835 }
5836
5837 static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
5838 {
5839         struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5840         int irq_num;
5841         int eq_num;
5842         int i;
5843
5844         eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
5845         irq_num = eq_num + hr_dev->caps.num_other_vectors;
5846
5847         /* Disable irq */
5848         hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
5849
5850         for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
5851                 free_irq(hr_dev->irq[i], hr_dev);
5852
5853         for (i = 0; i < eq_num; i++) {
5854                 hns_roce_v2_destroy_eqc(hr_dev, i);
5855
5856                 free_irq(eq_table->eq[i].irq, &eq_table->eq[i]);
5857
5858                 hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
5859         }
5860
5861         for (i = 0; i < irq_num; i++)
5862                 kfree(hr_dev->irq_names[i]);
5863
5864         kfree(eq_table->eq);
5865
5866         flush_workqueue(hr_dev->irq_workq);
5867         destroy_workqueue(hr_dev->irq_workq);
5868 }
5869
5870 static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
5871                                    struct hns_roce_srq *srq, u32 pdn, u16 xrcd,
5872                                    u32 cqn, void *mb_buf, u64 *mtts_wqe,
5873                                    u64 *mtts_idx, dma_addr_t dma_handle_wqe,
5874                                    dma_addr_t dma_handle_idx)
5875 {
5876         struct hns_roce_srq_context *srq_context;
5877
5878         srq_context = mb_buf;
5879         memset(srq_context, 0, sizeof(*srq_context));
5880
5881         roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_ST_M,
5882                        SRQC_BYTE_4_SRQ_ST_S, 1);
5883
5884         roce_set_field(srq_context->byte_4_srqn_srqst,
5885                        SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M,
5886                        SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S,
5887                        (hr_dev->caps.srqwqe_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
5888                        hr_dev->caps.srqwqe_hop_num));
5889         roce_set_field(srq_context->byte_4_srqn_srqst,
5890                        SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
5891                        ilog2(srq->max));
5892
5893         roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
5894                        SRQC_BYTE_4_SRQN_S, srq->srqn);
5895
5896         roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5897                        SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
5898
5899         roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M,
5900                        SRQC_BYTE_12_SRQ_XRCD_S, xrcd);
5901
5902         srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3));
5903
5904         roce_set_field(srq_context->byte_24_wqe_bt_ba,
5905                        SRQC_BYTE_24_SRQ_WQE_BT_BA_M,
5906                        SRQC_BYTE_24_SRQ_WQE_BT_BA_S,
5907                        cpu_to_le32(dma_handle_wqe >> 35));
5908
5909         roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M,
5910                        SRQC_BYTE_28_PD_S, pdn);
5911         roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M,
5912                        SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 :
5913                        fls(srq->max_gs - 1));
5914
5915         srq_context->idx_bt_ba = (u32)(dma_handle_idx >> 3);
5916         srq_context->idx_bt_ba = cpu_to_le32(srq_context->idx_bt_ba);
5917         roce_set_field(srq_context->rsv_idx_bt_ba,
5918                        SRQC_BYTE_36_SRQ_IDX_BT_BA_M,
5919                        SRQC_BYTE_36_SRQ_IDX_BT_BA_S,
5920                        cpu_to_le32(dma_handle_idx >> 35));
5921
5922         srq_context->idx_cur_blk_addr = (u32)(mtts_idx[0] >> PAGE_ADDR_SHIFT);
5923         srq_context->idx_cur_blk_addr =
5924                                      cpu_to_le32(srq_context->idx_cur_blk_addr);
5925         roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5926                        SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M,
5927                        SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S,
5928                        cpu_to_le32((mtts_idx[0]) >> (32 + PAGE_ADDR_SHIFT)));
5929         roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5930                        SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
5931                        SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
5932                        hr_dev->caps.idx_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
5933                        hr_dev->caps.idx_hop_num);
5934
5935         roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5936                        SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
5937                        SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
5938                        hr_dev->caps.idx_ba_pg_sz);
5939         roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5940                        SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
5941                        SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
5942                        hr_dev->caps.idx_buf_pg_sz);
5943
5944         srq_context->idx_nxt_blk_addr = (u32)(mtts_idx[1] >> PAGE_ADDR_SHIFT);
5945         srq_context->idx_nxt_blk_addr =
5946                                    cpu_to_le32(srq_context->idx_nxt_blk_addr);
5947         roce_set_field(srq_context->rsv_idxnxtblkaddr,
5948                        SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M,
5949                        SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S,
5950                        cpu_to_le32((mtts_idx[1]) >> (32 + PAGE_ADDR_SHIFT)));
5951         roce_set_field(srq_context->byte_56_xrc_cqn,
5952                        SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S,
5953                        cqn);
5954         roce_set_field(srq_context->byte_56_xrc_cqn,
5955                        SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M,
5956                        SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S,
5957                        hr_dev->caps.srqwqe_ba_pg_sz + PG_SHIFT_OFFSET);
5958         roce_set_field(srq_context->byte_56_xrc_cqn,
5959                        SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M,
5960                        SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S,
5961                        hr_dev->caps.srqwqe_buf_pg_sz + PG_SHIFT_OFFSET);
5962
5963         roce_set_bit(srq_context->db_record_addr_record_en,
5964                      SRQC_BYTE_60_SRQ_RECORD_EN_S, 0);
5965 }
5966
5967 static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
5968                                   struct ib_srq_attr *srq_attr,
5969                                   enum ib_srq_attr_mask srq_attr_mask,
5970                                   struct ib_udata *udata)
5971 {
5972         struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5973         struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5974         struct hns_roce_srq_context *srq_context;
5975         struct hns_roce_srq_context *srqc_mask;
5976         struct hns_roce_cmd_mailbox *mailbox;
5977         int ret;
5978
5979         if (srq_attr_mask & IB_SRQ_LIMIT) {
5980                 if (srq_attr->srq_limit >= srq->max)
5981                         return -EINVAL;
5982
5983                 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5984                 if (IS_ERR(mailbox))
5985                         return PTR_ERR(mailbox);
5986
5987                 srq_context = mailbox->buf;
5988                 srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
5989
5990                 memset(srqc_mask, 0xff, sizeof(*srqc_mask));
5991
5992                 roce_set_field(srq_context->byte_8_limit_wl,
5993                                SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5994                                SRQC_BYTE_8_SRQ_LIMIT_WL_S, srq_attr->srq_limit);
5995                 roce_set_field(srqc_mask->byte_8_limit_wl,
5996                                SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5997                                SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
5998
5999                 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
6000                                         HNS_ROCE_CMD_MODIFY_SRQC,
6001                                         HNS_ROCE_CMD_TIMEOUT_MSECS);
6002                 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6003                 if (ret) {
6004                         dev_err(hr_dev->dev,
6005                                 "MODIFY SRQ Failed to cmd mailbox.\n");
6006                         return ret;
6007                 }
6008         }
6009
6010         return 0;
6011 }
6012
6013 static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
6014 {
6015         struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
6016         struct hns_roce_srq *srq = to_hr_srq(ibsrq);
6017         struct hns_roce_srq_context *srq_context;
6018         struct hns_roce_cmd_mailbox *mailbox;
6019         int limit_wl;
6020         int ret;
6021
6022         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
6023         if (IS_ERR(mailbox))
6024                 return PTR_ERR(mailbox);
6025
6026         srq_context = mailbox->buf;
6027         ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0,
6028                                 HNS_ROCE_CMD_QUERY_SRQC,
6029                                 HNS_ROCE_CMD_TIMEOUT_MSECS);
6030         if (ret) {
6031                 dev_err(hr_dev->dev, "QUERY SRQ cmd process error\n");
6032                 goto out;
6033         }
6034
6035         limit_wl = roce_get_field(srq_context->byte_8_limit_wl,
6036                                   SRQC_BYTE_8_SRQ_LIMIT_WL_M,
6037                                   SRQC_BYTE_8_SRQ_LIMIT_WL_S);
6038
6039         attr->srq_limit = limit_wl;
6040         attr->max_wr    = srq->max - 1;
6041         attr->max_sge   = srq->max_gs;
6042
6043         memcpy(srq_context, mailbox->buf, sizeof(*srq_context));
6044
6045 out:
6046         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6047         return ret;
6048 }
6049
6050 static int find_empty_entry(struct hns_roce_idx_que *idx_que,
6051                             unsigned long size)
6052 {
6053         int wqe_idx;
6054
6055         if (unlikely(bitmap_full(idx_que->bitmap, size)))
6056                 return -ENOSPC;
6057
6058         wqe_idx = find_first_zero_bit(idx_que->bitmap, size);
6059
6060         bitmap_set(idx_que->bitmap, wqe_idx, 1);
6061
6062         return wqe_idx;
6063 }
6064
6065 static void fill_idx_queue(struct hns_roce_idx_que *idx_que,
6066                            int cur_idx, int wqe_idx)
6067 {
6068         unsigned int *addr;
6069
6070         addr = (unsigned int *)hns_roce_buf_offset(&idx_que->idx_buf,
6071                                                    cur_idx * idx_que->entry_sz);
6072         *addr = wqe_idx;
6073 }
6074
6075 static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
6076                                      const struct ib_recv_wr *wr,
6077                                      const struct ib_recv_wr **bad_wr)
6078 {
6079         struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
6080         struct hns_roce_srq *srq = to_hr_srq(ibsrq);
6081         struct hns_roce_v2_wqe_data_seg *dseg;
6082         struct hns_roce_v2_db srq_db;
6083         unsigned long flags;
6084         int ret = 0;
6085         int wqe_idx;
6086         void *wqe;
6087         int nreq;
6088         int ind;
6089         int i;
6090
6091         spin_lock_irqsave(&srq->lock, flags);
6092
6093         ind = srq->head & (srq->max - 1);
6094
6095         for (nreq = 0; wr; ++nreq, wr = wr->next) {
6096                 if (unlikely(wr->num_sge > srq->max_gs)) {
6097                         ret = -EINVAL;
6098                         *bad_wr = wr;
6099                         break;
6100                 }
6101
6102                 if (unlikely(srq->head == srq->tail)) {
6103                         ret = -ENOMEM;
6104                         *bad_wr = wr;
6105                         break;
6106                 }
6107
6108                 wqe_idx = find_empty_entry(&srq->idx_que, srq->max);
6109                 if (wqe_idx < 0) {
6110                         ret = -ENOMEM;
6111                         *bad_wr = wr;
6112                         break;
6113                 }
6114
6115                 fill_idx_queue(&srq->idx_que, ind, wqe_idx);
6116                 wqe = get_srq_wqe(srq, wqe_idx);
6117                 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
6118
6119                 for (i = 0; i < wr->num_sge; ++i) {
6120                         dseg[i].len = cpu_to_le32(wr->sg_list[i].length);
6121                         dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey);
6122                         dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
6123                 }
6124
6125                 if (i < srq->max_gs) {
6126                         dseg[i].len = 0;
6127                         dseg[i].lkey = cpu_to_le32(0x100);
6128                         dseg[i].addr = 0;
6129                 }
6130
6131                 srq->wrid[wqe_idx] = wr->wr_id;
6132                 ind = (ind + 1) & (srq->max - 1);
6133         }
6134
6135         if (likely(nreq)) {
6136                 srq->head += nreq;
6137
6138                 /*
6139                  * Make sure that descriptors are written before
6140                  * doorbell record.
6141                  */
6142                 wmb();
6143
6144                 srq_db.byte_4 = HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
6145                                 (srq->srqn & V2_DB_BYTE_4_TAG_M);
6146                 srq_db.parameter = srq->head;
6147
6148                 hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
6149
6150         }
6151
6152         spin_unlock_irqrestore(&srq->lock, flags);
6153
6154         return ret;
6155 }
6156
6157 static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
6158         .query_cqc_info = hns_roce_v2_query_cqc_info,
6159 };
6160
6161 static const struct ib_device_ops hns_roce_v2_dev_ops = {
6162         .destroy_qp = hns_roce_v2_destroy_qp,
6163         .modify_cq = hns_roce_v2_modify_cq,
6164         .poll_cq = hns_roce_v2_poll_cq,
6165         .post_recv = hns_roce_v2_post_recv,
6166         .post_send = hns_roce_v2_post_send,
6167         .query_qp = hns_roce_v2_query_qp,
6168         .req_notify_cq = hns_roce_v2_req_notify_cq,
6169 };
6170
6171 static const struct ib_device_ops hns_roce_v2_dev_srq_ops = {
6172         .modify_srq = hns_roce_v2_modify_srq,
6173         .post_srq_recv = hns_roce_v2_post_srq_recv,
6174         .query_srq = hns_roce_v2_query_srq,
6175 };
6176
6177 static const struct hns_roce_hw hns_roce_hw_v2 = {
6178         .cmq_init = hns_roce_v2_cmq_init,
6179         .cmq_exit = hns_roce_v2_cmq_exit,
6180         .hw_profile = hns_roce_v2_profile,
6181         .hw_init = hns_roce_v2_init,
6182         .hw_exit = hns_roce_v2_exit,
6183         .post_mbox = hns_roce_v2_post_mbox,
6184         .chk_mbox = hns_roce_v2_chk_mbox,
6185         .rst_prc_mbox = hns_roce_v2_rst_process_cmd,
6186         .set_gid = hns_roce_v2_set_gid,
6187         .set_mac = hns_roce_v2_set_mac,
6188         .write_mtpt = hns_roce_v2_write_mtpt,
6189         .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
6190         .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
6191         .mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
6192         .write_cqc = hns_roce_v2_write_cqc,
6193         .set_hem = hns_roce_v2_set_hem,
6194         .clear_hem = hns_roce_v2_clear_hem,
6195         .modify_qp = hns_roce_v2_modify_qp,
6196         .query_qp = hns_roce_v2_query_qp,
6197         .destroy_qp = hns_roce_v2_destroy_qp,
6198         .qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
6199         .modify_cq = hns_roce_v2_modify_cq,
6200         .post_send = hns_roce_v2_post_send,
6201         .post_recv = hns_roce_v2_post_recv,
6202         .req_notify_cq = hns_roce_v2_req_notify_cq,
6203         .poll_cq = hns_roce_v2_poll_cq,
6204         .init_eq = hns_roce_v2_init_eq_table,
6205         .cleanup_eq = hns_roce_v2_cleanup_eq_table,
6206         .write_srqc = hns_roce_v2_write_srqc,
6207         .modify_srq = hns_roce_v2_modify_srq,
6208         .query_srq = hns_roce_v2_query_srq,
6209         .post_srq_recv = hns_roce_v2_post_srq_recv,
6210         .hns_roce_dev_ops = &hns_roce_v2_dev_ops,
6211         .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
6212 };
6213
6214 static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
6215         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
6216         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
6217         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
6218         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
6219         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
6220         /* required last entry */
6221         {0, }
6222 };
6223
6224 MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
6225
6226 static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
6227                                   struct hnae3_handle *handle)
6228 {
6229         struct hns_roce_v2_priv *priv = hr_dev->priv;
6230         int i;
6231
6232         hr_dev->hw = &hns_roce_hw_v2;
6233         hr_dev->dfx = &hns_roce_dfx_hw_v2;
6234         hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
6235         hr_dev->odb_offset = hr_dev->sdb_offset;
6236
6237         /* Get info from NIC driver. */
6238         hr_dev->reg_base = handle->rinfo.roce_io_base;
6239         hr_dev->caps.num_ports = 1;
6240         hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
6241         hr_dev->iboe.phy_port[0] = 0;
6242
6243         addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
6244                             hr_dev->iboe.netdevs[0]->dev_addr);
6245
6246         for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
6247                 hr_dev->irq[i] = pci_irq_vector(handle->pdev,
6248                                                 i + handle->rinfo.base_vector);
6249
6250         /* cmd issue mode: 0 is poll, 1 is event */
6251         hr_dev->cmd_mod = 1;
6252         hr_dev->loop_idc = 0;
6253
6254         hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
6255         priv->handle = handle;
6256
6257         return 0;
6258 }
6259
6260 static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6261 {
6262         struct hns_roce_dev *hr_dev;
6263         int ret;
6264
6265         hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
6266         if (!hr_dev)
6267                 return -ENOMEM;
6268
6269         hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
6270         if (!hr_dev->priv) {
6271                 ret = -ENOMEM;
6272                 goto error_failed_kzalloc;
6273         }
6274
6275         hr_dev->pci_dev = handle->pdev;
6276         hr_dev->dev = &handle->pdev->dev;
6277
6278         ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
6279         if (ret) {
6280                 dev_err(hr_dev->dev, "Get Configuration failed!\n");
6281                 goto error_failed_get_cfg;
6282         }
6283
6284         ret = hns_roce_init(hr_dev);
6285         if (ret) {
6286                 dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
6287                 goto error_failed_get_cfg;
6288         }
6289
6290         handle->priv = hr_dev;
6291
6292         return 0;
6293
6294 error_failed_get_cfg:
6295         kfree(hr_dev->priv);
6296
6297 error_failed_kzalloc:
6298         ib_dealloc_device(&hr_dev->ib_dev);
6299
6300         return ret;
6301 }
6302
6303 static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6304                                            bool reset)
6305 {
6306         struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
6307
6308         if (!hr_dev)
6309                 return;
6310
6311         handle->priv = NULL;
6312         hns_roce_exit(hr_dev);
6313         kfree(hr_dev->priv);
6314         ib_dealloc_device(&hr_dev->ib_dev);
6315 }
6316
6317 static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6318 {
6319         const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
6320         const struct pci_device_id *id;
6321         struct device *dev = &handle->pdev->dev;
6322         int ret;
6323
6324         handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
6325
6326         if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
6327                 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6328                 goto reset_chk_err;
6329         }
6330
6331         id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev);
6332         if (!id)
6333                 return 0;
6334
6335         ret = __hns_roce_hw_v2_init_instance(handle);
6336         if (ret) {
6337                 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6338                 dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
6339                 if (ops->ae_dev_resetting(handle) ||
6340                     ops->get_hw_reset_stat(handle))
6341                         goto reset_chk_err;
6342                 else
6343                         return ret;
6344         }
6345
6346         handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
6347
6348
6349         return 0;
6350
6351 reset_chk_err:
6352         dev_err(dev, "Device is busy in resetting state.\n"
6353                      "please retry later.\n");
6354
6355         return -EBUSY;
6356 }
6357
6358 static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6359                                            bool reset)
6360 {
6361         if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
6362                 return;
6363
6364         handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
6365
6366         __hns_roce_hw_v2_uninit_instance(handle, reset);
6367
6368         handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6369 }
6370 static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
6371 {
6372         struct hns_roce_dev *hr_dev;
6373         struct ib_event event;
6374
6375         if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
6376                 set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6377                 return 0;
6378         }
6379
6380         handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
6381         clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6382
6383         hr_dev = (struct hns_roce_dev *)handle->priv;
6384         if (!hr_dev)
6385                 return 0;
6386
6387         hr_dev->is_reset = true;
6388         hr_dev->active = false;
6389         hr_dev->dis_db = true;
6390
6391         event.event = IB_EVENT_DEVICE_FATAL;
6392         event.device = &hr_dev->ib_dev;
6393         event.element.port_num = 1;
6394         ib_dispatch_event(&event);
6395
6396         return 0;
6397 }
6398
6399 static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
6400 {
6401         struct device *dev = &handle->pdev->dev;
6402         int ret;
6403
6404         if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
6405                                &handle->rinfo.state)) {
6406                 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6407                 return 0;
6408         }
6409
6410         handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
6411
6412         dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
6413         ret = __hns_roce_hw_v2_init_instance(handle);
6414         if (ret) {
6415                 /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
6416                  * callback function, RoCE Engine reinitialize. If RoCE reinit
6417                  * failed, we should inform NIC driver.
6418                  */
6419                 handle->priv = NULL;
6420                 dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
6421         } else {
6422                 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6423                 dev_info(dev, "Reset done, RoCE client reinit finished.\n");
6424         }
6425
6426         return ret;
6427 }
6428
6429 static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
6430 {
6431         if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
6432                 return 0;
6433
6434         handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
6435         dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
6436         msleep(100);
6437         __hns_roce_hw_v2_uninit_instance(handle, false);
6438
6439         return 0;
6440 }
6441
6442 static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
6443                                        enum hnae3_reset_notify_type type)
6444 {
6445         int ret = 0;
6446
6447         switch (type) {
6448         case HNAE3_DOWN_CLIENT:
6449                 ret = hns_roce_hw_v2_reset_notify_down(handle);
6450                 break;
6451         case HNAE3_INIT_CLIENT:
6452                 ret = hns_roce_hw_v2_reset_notify_init(handle);
6453                 break;
6454         case HNAE3_UNINIT_CLIENT:
6455                 ret = hns_roce_hw_v2_reset_notify_uninit(handle);
6456                 break;
6457         default:
6458                 break;
6459         }
6460
6461         return ret;
6462 }
6463
6464 static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
6465         .init_instance = hns_roce_hw_v2_init_instance,
6466         .uninit_instance = hns_roce_hw_v2_uninit_instance,
6467         .reset_notify = hns_roce_hw_v2_reset_notify,
6468 };
6469
6470 static struct hnae3_client hns_roce_hw_v2_client = {
6471         .name = "hns_roce_hw_v2",
6472         .type = HNAE3_CLIENT_ROCE,
6473         .ops = &hns_roce_hw_v2_ops,
6474 };
6475
6476 static int __init hns_roce_hw_v2_init(void)
6477 {
6478         return hnae3_register_client(&hns_roce_hw_v2_client);
6479 }
6480
6481 static void __exit hns_roce_hw_v2_exit(void)
6482 {
6483         hnae3_unregister_client(&hns_roce_hw_v2_client);
6484 }
6485
6486 module_init(hns_roce_hw_v2_init);
6487 module_exit(hns_roce_hw_v2_exit);
6488
6489 MODULE_LICENSE("Dual BSD/GPL");
6490 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
6491 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
6492 MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
6493 MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");