2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
37 static int db_delay_usecs = 1;
38 module_param(db_delay_usecs, int, 0644);
39 MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain");
41 static int ocqp_support = 1;
42 module_param(ocqp_support, int, 0644);
43 MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
45 int db_fc_threshold = 1000;
46 module_param(db_fc_threshold, int, 0644);
47 MODULE_PARM_DESC(db_fc_threshold,
48 "QP count/threshold that triggers"
49 " automatic db flow control mode (default = 1000)");
51 int db_coalescing_threshold;
52 module_param(db_coalescing_threshold, int, 0644);
53 MODULE_PARM_DESC(db_coalescing_threshold,
54 "QP count/threshold that triggers"
55 " disabling db coalescing (default = 0)");
57 static int max_fr_immd = T4_MAX_FR_IMMD;
58 module_param(max_fr_immd, int, 0644);
59 MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
61 static int alloc_ird(struct c4iw_dev *dev, u32 ird)
65 spin_lock_irq(&dev->lock);
66 if (ird <= dev->avail_ird)
67 dev->avail_ird -= ird;
70 spin_unlock_irq(&dev->lock);
73 dev_warn(&dev->rdev.lldi.pdev->dev,
74 "device IRD resources exhausted\n");
79 static void free_ird(struct c4iw_dev *dev, int ird)
81 spin_lock_irq(&dev->lock);
82 dev->avail_ird += ird;
83 spin_unlock_irq(&dev->lock);
86 static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
89 spin_lock_irqsave(&qhp->lock, flag);
90 qhp->attr.state = state;
91 spin_unlock_irqrestore(&qhp->lock, flag);
94 static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
96 c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize);
99 static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
101 dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
102 pci_unmap_addr(sq, mapping));
105 static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
107 if (t4_sq_onchip(sq))
108 dealloc_oc_sq(rdev, sq);
110 dealloc_host_sq(rdev, sq);
113 static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
115 if (!ocqp_support || !ocqp_supported(&rdev->lldi))
117 sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
120 sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr -
121 rdev->lldi.vr->ocq.start;
122 sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr -
123 rdev->lldi.vr->ocq.start);
124 sq->flags |= T4_SQ_ONCHIP;
128 static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
130 sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize,
131 &(sq->dma_addr), GFP_KERNEL);
134 sq->phys_addr = virt_to_phys(sq->queue);
135 pci_unmap_addr_set(sq, mapping, sq->dma_addr);
139 static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
143 ret = alloc_oc_sq(rdev, sq);
145 ret = alloc_host_sq(rdev, sq);
149 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
150 struct c4iw_dev_ucontext *uctx)
153 * uP clears EQ contexts when the connection exits rdma mode,
154 * so no need to post a RESET WR for these EQs.
156 dma_free_coherent(&(rdev->lldi.pdev->dev),
157 wq->rq.memsize, wq->rq.queue,
158 dma_unmap_addr(&wq->rq, mapping));
159 dealloc_sq(rdev, &wq->sq);
160 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
163 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
164 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
169 * Determine the BAR2 virtual address and qid. If pbar2_pa is not NULL,
170 * then this is a user mapping so compute the page-aligned physical address
173 void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
174 enum cxgb4_bar2_qtype qtype,
175 unsigned int *pbar2_qid, u64 *pbar2_pa)
180 ret = cxgb4_bar2_sge_qregs(rdev->lldi.ports[0], qid, qtype,
182 &bar2_qoffset, pbar2_qid);
187 *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
189 if (is_t4(rdev->lldi.adapter_type))
192 return rdev->bar2_kva + bar2_qoffset;
195 static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
196 struct t4_cq *rcq, struct t4_cq *scq,
197 struct c4iw_dev_ucontext *uctx)
199 int user = (uctx != &rdev->uctx);
200 struct fw_ri_res_wr *res_wr;
201 struct fw_ri_res *res;
203 struct c4iw_wr_wait wr_wait;
208 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
212 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
219 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
226 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
235 * RQT must be a power of 2 and at least 16 deep.
237 wq->rq.rqt_size = roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
238 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
239 if (!wq->rq.rqt_hwaddr) {
244 ret = alloc_sq(rdev, &wq->sq, user);
247 memset(wq->sq.queue, 0, wq->sq.memsize);
248 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
250 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
251 wq->rq.memsize, &(wq->rq.dma_addr),
257 pr_debug("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
258 __func__, wq->sq.queue,
259 (unsigned long long)virt_to_phys(wq->sq.queue),
261 (unsigned long long)virt_to_phys(wq->rq.queue));
262 memset(wq->rq.queue, 0, wq->rq.memsize);
263 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
265 wq->db = rdev->lldi.db_reg;
267 wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid, T4_BAR2_QTYPE_EGRESS,
269 user ? &wq->sq.bar2_pa : NULL);
270 wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid, T4_BAR2_QTYPE_EGRESS,
272 user ? &wq->rq.bar2_pa : NULL);
275 * User mode must have bar2 access.
277 if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
278 pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n",
279 pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
286 /* build fw_ri_res_wr */
287 wr_len = sizeof *res_wr + 2 * sizeof *res;
289 skb = alloc_skb(wr_len, GFP_KERNEL);
294 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
296 res_wr = __skb_put_zero(skb, wr_len);
297 res_wr->op_nres = cpu_to_be32(
298 FW_WR_OP_V(FW_RI_RES_WR) |
299 FW_RI_RES_WR_NRES_V(2) |
301 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
302 res_wr->cookie = (uintptr_t)&wr_wait;
304 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
305 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
308 * eqsize is the number of 64B entries plus the status page size.
310 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS +
311 rdev->hw_queue.t4_eq_status_entries;
313 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
314 FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
315 FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
316 FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
317 (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) |
318 FW_RI_RES_WR_IQID_V(scq->cqid));
319 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
320 FW_RI_RES_WR_DCAEN_V(0) |
321 FW_RI_RES_WR_DCACPU_V(0) |
322 FW_RI_RES_WR_FBMIN_V(2) |
323 (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) :
324 FW_RI_RES_WR_FBMAX_V(3)) |
325 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
326 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
327 FW_RI_RES_WR_EQSIZE_V(eqsize));
328 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
329 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
331 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
332 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
335 * eqsize is the number of 64B entries plus the status page size.
337 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
338 rdev->hw_queue.t4_eq_status_entries;
339 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
340 FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
341 FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
342 FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
343 FW_RI_RES_WR_IQID_V(rcq->cqid));
344 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
345 FW_RI_RES_WR_DCAEN_V(0) |
346 FW_RI_RES_WR_DCACPU_V(0) |
347 FW_RI_RES_WR_FBMIN_V(2) |
348 FW_RI_RES_WR_FBMAX_V(3) |
349 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
350 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
351 FW_RI_RES_WR_EQSIZE_V(eqsize));
352 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
353 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
355 c4iw_init_wr_wait(&wr_wait);
357 ret = c4iw_ofld_send(rdev, skb);
360 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
364 pr_debug("%s sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
365 __func__, wq->sq.qid, wq->rq.qid, wq->db,
366 wq->sq.bar2_va, wq->rq.bar2_va);
370 dma_free_coherent(&(rdev->lldi.pdev->dev),
371 wq->rq.memsize, wq->rq.queue,
372 dma_unmap_addr(&wq->rq, mapping));
374 dealloc_sq(rdev, &wq->sq);
376 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
382 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
384 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
388 static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
389 struct ib_send_wr *wr, int max, u32 *plenp)
396 dstp = (u8 *)immdp->data;
397 for (i = 0; i < wr->num_sge; i++) {
398 if ((plen + wr->sg_list[i].length) > max)
400 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
401 plen += wr->sg_list[i].length;
402 rem = wr->sg_list[i].length;
404 if (dstp == (u8 *)&sq->queue[sq->size])
405 dstp = (u8 *)sq->queue;
406 if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
409 len = (u8 *)&sq->queue[sq->size] - dstp;
410 memcpy(dstp, srcp, len);
416 len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
418 memset(dstp, 0, len);
419 immdp->op = FW_RI_DATA_IMMD;
422 immdp->immdlen = cpu_to_be32(plen);
427 static int build_isgl(__be64 *queue_start, __be64 *queue_end,
428 struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
429 int num_sge, u32 *plenp)
434 __be64 *flitp = (__be64 *)isglp->sge;
436 for (i = 0; i < num_sge; i++) {
437 if ((plen + sg_list[i].length) < plen)
439 plen += sg_list[i].length;
440 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
442 if (++flitp == queue_end)
444 *flitp = cpu_to_be64(sg_list[i].addr);
445 if (++flitp == queue_end)
448 *flitp = (__force __be64)0;
449 isglp->op = FW_RI_DATA_ISGL;
451 isglp->nsge = cpu_to_be16(num_sge);
458 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
459 struct ib_send_wr *wr, u8 *len16)
465 if (wr->num_sge > T4_MAX_SEND_SGE)
467 switch (wr->opcode) {
469 if (wr->send_flags & IB_SEND_SOLICITED)
470 wqe->send.sendop_pkd = cpu_to_be32(
471 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE));
473 wqe->send.sendop_pkd = cpu_to_be32(
474 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND));
475 wqe->send.stag_inv = 0;
477 case IB_WR_SEND_WITH_INV:
478 if (wr->send_flags & IB_SEND_SOLICITED)
479 wqe->send.sendop_pkd = cpu_to_be32(
480 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE_INV));
482 wqe->send.sendop_pkd = cpu_to_be32(
483 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_INV));
484 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
495 if (wr->send_flags & IB_SEND_INLINE) {
496 ret = build_immd(sq, wqe->send.u.immd_src, wr,
497 T4_MAX_SEND_INLINE, &plen);
500 size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
503 ret = build_isgl((__be64 *)sq->queue,
504 (__be64 *)&sq->queue[sq->size],
505 wqe->send.u.isgl_src,
506 wr->sg_list, wr->num_sge, &plen);
509 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
510 wr->num_sge * sizeof(struct fw_ri_sge);
513 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
514 wqe->send.u.immd_src[0].r1 = 0;
515 wqe->send.u.immd_src[0].r2 = 0;
516 wqe->send.u.immd_src[0].immdlen = 0;
517 size = sizeof wqe->send + sizeof(struct fw_ri_immd);
520 *len16 = DIV_ROUND_UP(size, 16);
521 wqe->send.plen = cpu_to_be32(plen);
525 static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
526 struct ib_send_wr *wr, u8 *len16)
532 if (wr->num_sge > T4_MAX_SEND_SGE)
535 wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
536 wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
538 if (wr->send_flags & IB_SEND_INLINE) {
539 ret = build_immd(sq, wqe->write.u.immd_src, wr,
540 T4_MAX_WRITE_INLINE, &plen);
543 size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
546 ret = build_isgl((__be64 *)sq->queue,
547 (__be64 *)&sq->queue[sq->size],
548 wqe->write.u.isgl_src,
549 wr->sg_list, wr->num_sge, &plen);
552 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
553 wr->num_sge * sizeof(struct fw_ri_sge);
556 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
557 wqe->write.u.immd_src[0].r1 = 0;
558 wqe->write.u.immd_src[0].r2 = 0;
559 wqe->write.u.immd_src[0].immdlen = 0;
560 size = sizeof wqe->write + sizeof(struct fw_ri_immd);
563 *len16 = DIV_ROUND_UP(size, 16);
564 wqe->write.plen = cpu_to_be32(plen);
568 static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
572 if (wr->num_sge && wr->sg_list[0].length) {
573 wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey);
574 wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr
576 wqe->read.to_src_lo = cpu_to_be32((u32)rdma_wr(wr)->remote_addr);
577 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
578 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
579 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
581 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
583 wqe->read.stag_src = cpu_to_be32(2);
584 wqe->read.to_src_hi = 0;
585 wqe->read.to_src_lo = 0;
586 wqe->read.stag_sink = cpu_to_be32(2);
588 wqe->read.to_sink_hi = 0;
589 wqe->read.to_sink_lo = 0;
593 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
597 static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
598 struct ib_recv_wr *wr, u8 *len16)
602 ret = build_isgl((__be64 *)qhp->wq.rq.queue,
603 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
604 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
607 *len16 = DIV_ROUND_UP(sizeof wqe->recv +
608 wr->num_sge * sizeof(struct fw_ri_sge), 16);
612 static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr,
613 struct ib_reg_wr *wr, struct c4iw_mr *mhp,
616 __be64 *p = (__be64 *)fr->pbl;
618 fr->r2 = cpu_to_be32(0);
619 fr->stag = cpu_to_be32(mhp->ibmr.rkey);
621 fr->tpte.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
622 FW_RI_TPTE_STAGKEY_V((mhp->ibmr.rkey & FW_RI_TPTE_STAGKEY_M)) |
623 FW_RI_TPTE_STAGSTATE_V(1) |
624 FW_RI_TPTE_STAGTYPE_V(FW_RI_STAG_NSMR) |
625 FW_RI_TPTE_PDID_V(mhp->attr.pdid));
626 fr->tpte.locread_to_qpid = cpu_to_be32(
627 FW_RI_TPTE_PERM_V(c4iw_ib_to_tpt_access(wr->access)) |
628 FW_RI_TPTE_ADDRTYPE_V(FW_RI_VA_BASED_TO) |
629 FW_RI_TPTE_PS_V(ilog2(wr->mr->page_size) - 12));
630 fr->tpte.nosnoop_pbladdr = cpu_to_be32(FW_RI_TPTE_PBLADDR_V(
631 PBL_OFF(&mhp->rhp->rdev, mhp->attr.pbl_addr)>>3));
632 fr->tpte.dca_mwbcnt_pstag = cpu_to_be32(0);
633 fr->tpte.len_hi = cpu_to_be32(0);
634 fr->tpte.len_lo = cpu_to_be32(mhp->ibmr.length);
635 fr->tpte.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
636 fr->tpte.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 0xffffffff);
638 p[0] = cpu_to_be64((u64)mhp->mpl[0]);
639 p[1] = cpu_to_be64((u64)mhp->mpl[1]);
641 *len16 = DIV_ROUND_UP(sizeof(*fr), 16);
644 static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
645 struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16,
648 struct fw_ri_immd *imdp;
651 int pbllen = roundup(mhp->mpl_len * sizeof(u64), 32);
654 if (mhp->mpl_len > t4_max_fr_depth(dsgl_supported && use_dsgl))
657 wqe->fr.qpbinde_to_dcacpu = 0;
658 wqe->fr.pgsz_shift = ilog2(wr->mr->page_size) - 12;
659 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
660 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access);
662 wqe->fr.len_lo = cpu_to_be32(mhp->ibmr.length);
663 wqe->fr.stag = cpu_to_be32(wr->key);
664 wqe->fr.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
665 wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova &
668 if (dsgl_supported && use_dsgl && (pbllen > max_fr_immd)) {
669 struct fw_ri_dsgl *sglp;
671 for (i = 0; i < mhp->mpl_len; i++)
672 mhp->mpl[i] = (__force u64)cpu_to_be64((u64)mhp->mpl[i]);
674 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
675 sglp->op = FW_RI_DATA_DSGL;
677 sglp->nsge = cpu_to_be16(1);
678 sglp->addr0 = cpu_to_be64(mhp->mpl_addr);
679 sglp->len0 = cpu_to_be32(pbllen);
681 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
683 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
684 imdp->op = FW_RI_DATA_IMMD;
687 imdp->immdlen = cpu_to_be32(pbllen);
688 p = (__be64 *)(imdp + 1);
690 for (i = 0; i < mhp->mpl_len; i++) {
691 *p = cpu_to_be64((u64)mhp->mpl[i]);
693 if (++p == (__be64 *)&sq->queue[sq->size])
694 p = (__be64 *)sq->queue;
700 if (++p == (__be64 *)&sq->queue[sq->size])
701 p = (__be64 *)sq->queue;
703 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
709 static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
711 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
713 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
717 static void free_qp_work(struct work_struct *work)
719 struct c4iw_ucontext *ucontext;
721 struct c4iw_dev *rhp;
723 qhp = container_of(work, struct c4iw_qp, free_work);
724 ucontext = qhp->ucontext;
727 pr_debug("%s qhp %p ucontext %p\n", __func__, qhp, ucontext);
728 destroy_qp(&rhp->rdev, &qhp->wq,
729 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
732 c4iw_put_ucontext(ucontext);
736 static void queue_qp_free(struct kref *kref)
740 qhp = container_of(kref, struct c4iw_qp, kref);
741 pr_debug("%s qhp %p\n", __func__, qhp);
742 queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
745 void c4iw_qp_add_ref(struct ib_qp *qp)
747 pr_debug("%s ib_qp %p\n", __func__, qp);
748 kref_get(&to_c4iw_qp(qp)->kref);
751 void c4iw_qp_rem_ref(struct ib_qp *qp)
753 pr_debug("%s ib_qp %p\n", __func__, qp);
754 kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
757 static void add_to_fc_list(struct list_head *head, struct list_head *entry)
759 if (list_empty(entry))
760 list_add_tail(entry, head);
763 static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
767 spin_lock_irqsave(&qhp->rhp->lock, flags);
768 spin_lock(&qhp->lock);
769 if (qhp->rhp->db_state == NORMAL)
770 t4_ring_sq_db(&qhp->wq, inc, NULL);
772 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
773 qhp->wq.sq.wq_pidx_inc += inc;
775 spin_unlock(&qhp->lock);
776 spin_unlock_irqrestore(&qhp->rhp->lock, flags);
780 static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
784 spin_lock_irqsave(&qhp->rhp->lock, flags);
785 spin_lock(&qhp->lock);
786 if (qhp->rhp->db_state == NORMAL)
787 t4_ring_rq_db(&qhp->wq, inc, NULL);
789 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
790 qhp->wq.rq.wq_pidx_inc += inc;
792 spin_unlock(&qhp->lock);
793 spin_unlock_irqrestore(&qhp->rhp->lock, flags);
797 static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
799 struct t4_cqe cqe = {};
800 struct c4iw_cq *schp;
804 schp = to_c4iw_cq(qhp->ibqp.send_cq);
807 cqe.u.drain_cookie = wr->wr_id;
808 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
809 CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
812 CQE_QPID_V(qhp->wq.sq.qid));
814 spin_lock_irqsave(&schp->lock, flag);
815 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
816 cq->sw_queue[cq->sw_pidx] = cqe;
818 spin_unlock_irqrestore(&schp->lock, flag);
820 spin_lock_irqsave(&schp->comp_handler_lock, flag);
821 (*schp->ibcq.comp_handler)(&schp->ibcq,
822 schp->ibcq.cq_context);
823 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
826 static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
828 struct t4_cqe cqe = {};
829 struct c4iw_cq *rchp;
833 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
836 cqe.u.drain_cookie = wr->wr_id;
837 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
838 CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
841 CQE_QPID_V(qhp->wq.sq.qid));
843 spin_lock_irqsave(&rchp->lock, flag);
844 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
845 cq->sw_queue[cq->sw_pidx] = cqe;
847 spin_unlock_irqrestore(&rchp->lock, flag);
849 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
850 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
851 rchp->ibcq.cq_context);
852 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
855 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
856 struct ib_send_wr **bad_wr)
860 enum fw_wr_opcodes fw_opcode = 0;
861 enum fw_ri_wr_flags fw_flags;
863 union t4_wr *wqe = NULL;
865 struct t4_swsqe *swsqe;
869 qhp = to_c4iw_qp(ibqp);
870 spin_lock_irqsave(&qhp->lock, flag);
873 * If the qp has been flushed, then just insert a special
876 if (qhp->wq.flushed) {
877 spin_unlock_irqrestore(&qhp->lock, flag);
878 complete_sq_drain_wr(qhp, wr);
881 num_wrs = t4_sq_avail(&qhp->wq);
883 spin_unlock_irqrestore(&qhp->lock, flag);
893 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
894 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
897 if (wr->send_flags & IB_SEND_SOLICITED)
898 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
899 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all)
900 fw_flags |= FW_RI_COMPLETION_FLAG;
901 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
902 switch (wr->opcode) {
903 case IB_WR_SEND_WITH_INV:
905 if (wr->send_flags & IB_SEND_FENCE)
906 fw_flags |= FW_RI_READ_FENCE_FLAG;
907 fw_opcode = FW_RI_SEND_WR;
908 if (wr->opcode == IB_WR_SEND)
909 swsqe->opcode = FW_RI_SEND;
911 swsqe->opcode = FW_RI_SEND_WITH_INV;
912 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
914 case IB_WR_RDMA_WRITE:
915 fw_opcode = FW_RI_RDMA_WRITE_WR;
916 swsqe->opcode = FW_RI_RDMA_WRITE;
917 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
919 case IB_WR_RDMA_READ:
920 case IB_WR_RDMA_READ_WITH_INV:
921 fw_opcode = FW_RI_RDMA_READ_WR;
922 swsqe->opcode = FW_RI_READ_REQ;
923 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) {
924 c4iw_invalidate_mr(qhp->rhp,
925 wr->sg_list[0].lkey);
926 fw_flags = FW_RI_RDMA_READ_INVALIDATE;
930 err = build_rdma_read(wqe, wr, &len16);
933 swsqe->read_len = wr->sg_list[0].length;
934 if (!qhp->wq.sq.oldest_read)
935 qhp->wq.sq.oldest_read = swsqe;
938 struct c4iw_mr *mhp = to_c4iw_mr(reg_wr(wr)->mr);
940 swsqe->opcode = FW_RI_FAST_REGISTER;
941 if (qhp->rhp->rdev.lldi.fr_nsmr_tpte_wr_support &&
942 !mhp->attr.state && mhp->mpl_len <= 2) {
943 fw_opcode = FW_RI_FR_NSMR_TPTE_WR;
944 build_tpte_memreg(&wqe->fr_tpte, reg_wr(wr),
947 fw_opcode = FW_RI_FR_NSMR_WR;
948 err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr),
950 qhp->rhp->rdev.lldi.ulptx_memwrite_dsgl);
957 case IB_WR_LOCAL_INV:
958 if (wr->send_flags & IB_SEND_FENCE)
959 fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
960 fw_opcode = FW_RI_INV_LSTAG_WR;
961 swsqe->opcode = FW_RI_LOCAL_INV;
962 err = build_inv_stag(wqe, wr, &len16);
963 c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey);
966 pr_debug("%s post of type=%d TBD!\n", __func__,
974 swsqe->idx = qhp->wq.sq.pidx;
976 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
979 swsqe->wr_id = wr->wr_id;
981 swsqe->sge_ts = cxgb4_read_sge_timestamp(
982 qhp->rhp->rdev.lldi.ports[0]);
983 getnstimeofday(&swsqe->host_ts);
986 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
988 pr_debug("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
990 (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
991 swsqe->opcode, swsqe->read_len);
994 t4_sq_produce(&qhp->wq, len16);
995 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
997 if (!qhp->rhp->rdev.status_page->db_off) {
998 t4_ring_sq_db(&qhp->wq, idx, wqe);
999 spin_unlock_irqrestore(&qhp->lock, flag);
1001 spin_unlock_irqrestore(&qhp->lock, flag);
1002 ring_kernel_sq_db(qhp, idx);
1007 int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1008 struct ib_recv_wr **bad_wr)
1011 struct c4iw_qp *qhp;
1012 union t4_recv_wr *wqe = NULL;
1018 qhp = to_c4iw_qp(ibqp);
1019 spin_lock_irqsave(&qhp->lock, flag);
1022 * If the qp has been flushed, then just insert a special
1025 if (qhp->wq.flushed) {
1026 spin_unlock_irqrestore(&qhp->lock, flag);
1027 complete_rq_drain_wr(qhp, wr);
1030 num_wrs = t4_rq_avail(&qhp->wq);
1032 spin_unlock_irqrestore(&qhp->lock, flag);
1037 if (wr->num_sge > T4_MAX_RECV_SGE) {
1042 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
1043 qhp->wq.rq.wq_pidx *
1046 err = build_rdma_recv(qhp, wqe, wr, &len16);
1054 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
1056 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts =
1057 cxgb4_read_sge_timestamp(
1058 qhp->rhp->rdev.lldi.ports[0]);
1060 &qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_ts);
1063 wqe->recv.opcode = FW_RI_RECV_WR;
1065 wqe->recv.wrid = qhp->wq.rq.pidx;
1066 wqe->recv.r2[0] = 0;
1067 wqe->recv.r2[1] = 0;
1068 wqe->recv.r2[2] = 0;
1069 wqe->recv.len16 = len16;
1070 pr_debug("%s cookie 0x%llx pidx %u\n",
1072 (unsigned long long)wr->wr_id, qhp->wq.rq.pidx);
1073 t4_rq_produce(&qhp->wq, len16);
1074 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
1078 if (!qhp->rhp->rdev.status_page->db_off) {
1079 t4_ring_rq_db(&qhp->wq, idx, wqe);
1080 spin_unlock_irqrestore(&qhp->lock, flag);
1082 spin_unlock_irqrestore(&qhp->lock, flag);
1083 ring_kernel_rq_db(qhp, idx);
1088 static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
1098 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
1103 status = CQE_STATUS(err_cqe);
1104 opcode = CQE_OPCODE(err_cqe);
1105 rqtype = RQ_TYPE(err_cqe);
1106 send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
1107 (opcode == FW_RI_SEND_WITH_SE_INV);
1108 tagged = (opcode == FW_RI_RDMA_WRITE) ||
1109 (rqtype && (opcode == FW_RI_READ_RESP));
1114 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1115 *ecode = RDMAP_CANT_INV_STAG;
1117 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1118 *ecode = RDMAP_INV_STAG;
1122 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1123 if ((opcode == FW_RI_SEND_WITH_INV) ||
1124 (opcode == FW_RI_SEND_WITH_SE_INV))
1125 *ecode = RDMAP_CANT_INV_STAG;
1127 *ecode = RDMAP_STAG_NOT_ASSOC;
1130 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1131 *ecode = RDMAP_STAG_NOT_ASSOC;
1134 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1135 *ecode = RDMAP_ACC_VIOL;
1138 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1139 *ecode = RDMAP_TO_WRAP;
1143 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1144 *ecode = DDPT_BASE_BOUNDS;
1146 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1147 *ecode = RDMAP_BASE_BOUNDS;
1150 case T4_ERR_INVALIDATE_SHARED_MR:
1151 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
1152 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1153 *ecode = RDMAP_CANT_INV_STAG;
1156 case T4_ERR_ECC_PSTAG:
1157 case T4_ERR_INTERNAL_ERR:
1158 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
1161 case T4_ERR_OUT_OF_RQE:
1162 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1163 *ecode = DDPU_INV_MSN_NOBUF;
1165 case T4_ERR_PBL_ADDR_BOUND:
1166 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1167 *ecode = DDPT_BASE_BOUNDS;
1170 *layer_type = LAYER_MPA|DDP_LLP;
1171 *ecode = MPA_CRC_ERR;
1174 *layer_type = LAYER_MPA|DDP_LLP;
1175 *ecode = MPA_MARKER_ERR;
1177 case T4_ERR_PDU_LEN_ERR:
1178 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1179 *ecode = DDPU_MSG_TOOBIG;
1181 case T4_ERR_DDP_VERSION:
1183 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1184 *ecode = DDPT_INV_VERS;
1186 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1187 *ecode = DDPU_INV_VERS;
1190 case T4_ERR_RDMA_VERSION:
1191 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1192 *ecode = RDMAP_INV_VERS;
1195 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1196 *ecode = RDMAP_INV_OPCODE;
1198 case T4_ERR_DDP_QUEUE_NUM:
1199 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1200 *ecode = DDPU_INV_QN;
1203 case T4_ERR_MSN_GAP:
1204 case T4_ERR_MSN_RANGE:
1205 case T4_ERR_IRD_OVERFLOW:
1206 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1207 *ecode = DDPU_INV_MSN_RANGE;
1210 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
1214 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1215 *ecode = DDPU_INV_MO;
1218 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
1224 static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
1227 struct fw_ri_wr *wqe;
1228 struct sk_buff *skb;
1229 struct terminate_message *term;
1231 pr_debug("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1234 skb = skb_dequeue(&qhp->ep->com.ep_skb_list);
1238 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1240 wqe = __skb_put(skb, sizeof(*wqe));
1241 memset(wqe, 0, sizeof *wqe);
1242 wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR));
1243 wqe->flowid_len16 = cpu_to_be32(
1244 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1245 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1247 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
1248 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
1249 term = (struct terminate_message *)wqe->u.terminate.termmsg;
1250 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
1251 term->layer_etype = qhp->attr.layer_etype;
1252 term->ecode = qhp->attr.ecode;
1254 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
1255 c4iw_ofld_send(&qhp->rhp->rdev, skb);
1259 * Assumes qhp lock is held.
1261 static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
1262 struct c4iw_cq *schp)
1265 int rq_flushed, sq_flushed;
1268 pr_debug("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
1270 /* locking hierarchy: cq lock first, then qp lock. */
1271 spin_lock_irqsave(&rchp->lock, flag);
1272 spin_lock(&qhp->lock);
1274 if (qhp->wq.flushed) {
1275 spin_unlock(&qhp->lock);
1276 spin_unlock_irqrestore(&rchp->lock, flag);
1279 qhp->wq.flushed = 1;
1281 c4iw_flush_hw_cq(rchp);
1282 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
1283 rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
1284 spin_unlock(&qhp->lock);
1285 spin_unlock_irqrestore(&rchp->lock, flag);
1287 /* locking hierarchy: cq lock first, then qp lock. */
1288 spin_lock_irqsave(&schp->lock, flag);
1289 spin_lock(&qhp->lock);
1291 c4iw_flush_hw_cq(schp);
1292 sq_flushed = c4iw_flush_sq(qhp);
1293 spin_unlock(&qhp->lock);
1294 spin_unlock_irqrestore(&schp->lock, flag);
1297 if (t4_clear_cq_armed(&rchp->cq) &&
1298 (rq_flushed || sq_flushed)) {
1299 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1300 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1301 rchp->ibcq.cq_context);
1302 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1305 if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) {
1306 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1307 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1308 rchp->ibcq.cq_context);
1309 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1311 if (t4_clear_cq_armed(&schp->cq) && sq_flushed) {
1312 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1313 (*schp->ibcq.comp_handler)(&schp->ibcq,
1314 schp->ibcq.cq_context);
1315 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1320 static void flush_qp(struct c4iw_qp *qhp)
1322 struct c4iw_cq *rchp, *schp;
1325 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1326 schp = to_c4iw_cq(qhp->ibqp.send_cq);
1328 t4_set_wq_in_error(&qhp->wq);
1329 if (qhp->ibqp.uobject) {
1330 t4_set_cq_in_error(&rchp->cq);
1331 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1332 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
1333 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1335 t4_set_cq_in_error(&schp->cq);
1336 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1337 (*schp->ibcq.comp_handler)(&schp->ibcq,
1338 schp->ibcq.cq_context);
1339 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1343 __flush_qp(qhp, rchp, schp);
1346 static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1349 struct fw_ri_wr *wqe;
1351 struct sk_buff *skb;
1353 pr_debug("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1356 skb = skb_dequeue(&ep->com.ep_skb_list);
1360 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1362 wqe = __skb_put(skb, sizeof(*wqe));
1363 memset(wqe, 0, sizeof *wqe);
1364 wqe->op_compl = cpu_to_be32(
1365 FW_WR_OP_V(FW_RI_INIT_WR) |
1367 wqe->flowid_len16 = cpu_to_be32(
1368 FW_WR_FLOWID_V(ep->hwtid) |
1369 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1370 wqe->cookie = (uintptr_t)&ep->com.wr_wait;
1372 wqe->u.fini.type = FW_RI_TYPE_FINI;
1373 ret = c4iw_ofld_send(&rhp->rdev, skb);
1377 ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid,
1378 qhp->wq.sq.qid, __func__);
1380 pr_debug("%s ret %d\n", __func__, ret);
1384 static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
1386 pr_debug("%s p2p_type = %d\n", __func__, p2p_type);
1387 memset(&init->u, 0, sizeof init->u);
1389 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1390 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1391 init->u.write.stag_sink = cpu_to_be32(1);
1392 init->u.write.to_sink = cpu_to_be64(1);
1393 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1394 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1395 sizeof(struct fw_ri_immd),
1398 case FW_RI_INIT_P2PTYPE_READ_REQ:
1399 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1400 init->u.read.stag_src = cpu_to_be32(1);
1401 init->u.read.to_src_lo = cpu_to_be32(1);
1402 init->u.read.stag_sink = cpu_to_be32(1);
1403 init->u.read.to_sink_lo = cpu_to_be32(1);
1404 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1409 static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1411 struct fw_ri_wr *wqe;
1413 struct sk_buff *skb;
1415 pr_debug("%s qhp %p qid 0x%x tid %u ird %u ord %u\n", __func__, qhp,
1416 qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
1418 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
1423 ret = alloc_ird(rhp, qhp->attr.max_ird);
1425 qhp->attr.max_ird = 0;
1429 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1431 wqe = __skb_put(skb, sizeof(*wqe));
1432 memset(wqe, 0, sizeof *wqe);
1433 wqe->op_compl = cpu_to_be32(
1434 FW_WR_OP_V(FW_RI_INIT_WR) |
1436 wqe->flowid_len16 = cpu_to_be32(
1437 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1438 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1440 wqe->cookie = (uintptr_t)&qhp->ep->com.wr_wait;
1442 wqe->u.init.type = FW_RI_TYPE_INIT;
1443 wqe->u.init.mpareqbit_p2ptype =
1444 FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) |
1445 FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type);
1446 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1447 if (qhp->attr.mpa_attr.recv_marker_enabled)
1448 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1449 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1450 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1451 if (qhp->attr.mpa_attr.crc_enabled)
1452 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1454 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1455 FW_RI_QP_RDMA_WRITE_ENABLE |
1456 FW_RI_QP_BIND_ENABLE;
1457 if (!qhp->ibqp.uobject)
1458 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1459 FW_RI_QP_STAG0_ENABLE;
1460 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1461 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1462 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1463 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1464 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1465 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1466 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1467 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1468 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1469 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1470 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1471 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1472 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1473 rhp->rdev.lldi.vr->rq.start);
1474 if (qhp->attr.mpa_attr.initiator)
1475 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1477 ret = c4iw_ofld_send(&rhp->rdev, skb);
1481 ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
1482 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
1486 free_ird(rhp, qhp->attr.max_ird);
1488 pr_debug("%s ret %d\n", __func__, ret);
1492 int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1493 enum c4iw_qp_attr_mask mask,
1494 struct c4iw_qp_attributes *attrs,
1498 struct c4iw_qp_attributes newattr = qhp->attr;
1503 struct c4iw_ep *ep = NULL;
1505 pr_debug("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n",
1507 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1508 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1510 mutex_lock(&qhp->mutex);
1512 /* Process attr changes if in IDLE */
1513 if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1514 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1518 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1519 newattr.enable_rdma_read = attrs->enable_rdma_read;
1520 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1521 newattr.enable_rdma_write = attrs->enable_rdma_write;
1522 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1523 newattr.enable_bind = attrs->enable_bind;
1524 if (mask & C4IW_QP_ATTR_MAX_ORD) {
1525 if (attrs->max_ord > c4iw_max_read_depth) {
1529 newattr.max_ord = attrs->max_ord;
1531 if (mask & C4IW_QP_ATTR_MAX_IRD) {
1532 if (attrs->max_ird > cur_max_read_depth(rhp)) {
1536 newattr.max_ird = attrs->max_ird;
1538 qhp->attr = newattr;
1541 if (mask & C4IW_QP_ATTR_SQ_DB) {
1542 ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc);
1545 if (mask & C4IW_QP_ATTR_RQ_DB) {
1546 ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc);
1550 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1552 if (qhp->attr.state == attrs->next_state)
1555 switch (qhp->attr.state) {
1556 case C4IW_QP_STATE_IDLE:
1557 switch (attrs->next_state) {
1558 case C4IW_QP_STATE_RTS:
1559 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1563 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1567 qhp->attr.mpa_attr = attrs->mpa_attr;
1568 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1569 qhp->ep = qhp->attr.llp_stream_handle;
1570 set_state(qhp, C4IW_QP_STATE_RTS);
1573 * Ref the endpoint here and deref when we
1574 * disassociate the endpoint from the QP. This
1575 * happens in CLOSING->IDLE transition or *->ERROR
1578 c4iw_get_ep(&qhp->ep->com);
1579 ret = rdma_init(rhp, qhp);
1583 case C4IW_QP_STATE_ERROR:
1584 set_state(qhp, C4IW_QP_STATE_ERROR);
1592 case C4IW_QP_STATE_RTS:
1593 switch (attrs->next_state) {
1594 case C4IW_QP_STATE_CLOSING:
1595 BUG_ON(kref_read(&qhp->ep->com.kref) < 2);
1596 t4_set_wq_in_error(&qhp->wq);
1597 set_state(qhp, C4IW_QP_STATE_CLOSING);
1602 c4iw_get_ep(&qhp->ep->com);
1604 ret = rdma_fini(rhp, qhp, ep);
1608 case C4IW_QP_STATE_TERMINATE:
1609 t4_set_wq_in_error(&qhp->wq);
1610 set_state(qhp, C4IW_QP_STATE_TERMINATE);
1611 qhp->attr.layer_etype = attrs->layer_etype;
1612 qhp->attr.ecode = attrs->ecode;
1615 c4iw_get_ep(&qhp->ep->com);
1619 terminate = qhp->attr.send_term;
1620 ret = rdma_fini(rhp, qhp, ep);
1625 case C4IW_QP_STATE_ERROR:
1626 t4_set_wq_in_error(&qhp->wq);
1627 set_state(qhp, C4IW_QP_STATE_ERROR);
1632 c4iw_get_ep(&qhp->ep->com);
1641 case C4IW_QP_STATE_CLOSING:
1644 * Allow kernel users to move to ERROR for qp draining.
1646 if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
1647 C4IW_QP_STATE_ERROR)) {
1651 switch (attrs->next_state) {
1652 case C4IW_QP_STATE_IDLE:
1654 set_state(qhp, C4IW_QP_STATE_IDLE);
1655 qhp->attr.llp_stream_handle = NULL;
1656 c4iw_put_ep(&qhp->ep->com);
1658 wake_up(&qhp->wait);
1660 case C4IW_QP_STATE_ERROR:
1667 case C4IW_QP_STATE_ERROR:
1668 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
1672 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1676 set_state(qhp, C4IW_QP_STATE_IDLE);
1678 case C4IW_QP_STATE_TERMINATE:
1686 pr_err("%s in a bad state %d\n", __func__, qhp->attr.state);
1693 pr_debug("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
1696 /* disassociate the LLP connection */
1697 qhp->attr.llp_stream_handle = NULL;
1701 set_state(qhp, C4IW_QP_STATE_ERROR);
1706 wake_up(&qhp->wait);
1708 mutex_unlock(&qhp->mutex);
1711 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
1714 * If disconnect is 1, then we need to initiate a disconnect
1715 * on the EP. This can be a normal close (RTS->CLOSING) or
1716 * an abnormal close (RTS/CLOSING->ERROR).
1719 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
1721 c4iw_put_ep(&ep->com);
1725 * If free is 1, then we've disassociated the EP from the QP
1726 * and we need to dereference the EP.
1729 c4iw_put_ep(&ep->com);
1730 pr_debug("%s exit state %d\n", __func__, qhp->attr.state);
1734 int c4iw_destroy_qp(struct ib_qp *ib_qp)
1736 struct c4iw_dev *rhp;
1737 struct c4iw_qp *qhp;
1738 struct c4iw_qp_attributes attrs;
1740 qhp = to_c4iw_qp(ib_qp);
1743 attrs.next_state = C4IW_QP_STATE_ERROR;
1744 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
1745 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1747 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1748 wait_event(qhp->wait, !qhp->ep);
1750 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1752 spin_lock_irq(&rhp->lock);
1753 if (!list_empty(&qhp->db_fc_entry))
1754 list_del_init(&qhp->db_fc_entry);
1755 spin_unlock_irq(&rhp->lock);
1756 free_ird(rhp, qhp->attr.max_ird);
1758 c4iw_qp_rem_ref(ib_qp);
1760 pr_debug("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
1764 struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1765 struct ib_udata *udata)
1767 struct c4iw_dev *rhp;
1768 struct c4iw_qp *qhp;
1769 struct c4iw_pd *php;
1770 struct c4iw_cq *schp;
1771 struct c4iw_cq *rchp;
1772 struct c4iw_create_qp_resp uresp;
1773 unsigned int sqsize, rqsize;
1774 struct c4iw_ucontext *ucontext;
1776 struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
1777 struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL;
1779 pr_debug("%s ib_pd %p\n", __func__, pd);
1781 if (attrs->qp_type != IB_QPT_RC)
1782 return ERR_PTR(-EINVAL);
1784 php = to_c4iw_pd(pd);
1786 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
1787 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
1789 return ERR_PTR(-EINVAL);
1791 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
1792 return ERR_PTR(-EINVAL);
1794 if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
1795 return ERR_PTR(-E2BIG);
1796 rqsize = attrs->cap.max_recv_wr + 1;
1800 if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size)
1801 return ERR_PTR(-E2BIG);
1802 sqsize = attrs->cap.max_send_wr + 1;
1806 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
1808 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1810 return ERR_PTR(-ENOMEM);
1811 qhp->wq.sq.size = sqsize;
1812 qhp->wq.sq.memsize =
1813 (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
1814 sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
1815 qhp->wq.sq.flush_cidx = -1;
1816 qhp->wq.rq.size = rqsize;
1817 qhp->wq.rq.memsize =
1818 (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
1819 sizeof(*qhp->wq.rq.queue);
1822 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1823 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1826 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1827 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1831 attrs->cap.max_recv_wr = rqsize - 1;
1832 attrs->cap.max_send_wr = sqsize - 1;
1833 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
1836 qhp->attr.pd = php->pdid;
1837 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1838 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1839 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1840 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1841 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1842 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1843 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1844 qhp->attr.state = C4IW_QP_STATE_IDLE;
1845 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1846 qhp->attr.enable_rdma_read = 1;
1847 qhp->attr.enable_rdma_write = 1;
1848 qhp->attr.enable_bind = 1;
1849 qhp->attr.max_ord = 0;
1850 qhp->attr.max_ird = 0;
1851 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
1852 spin_lock_init(&qhp->lock);
1853 mutex_init(&qhp->mutex);
1854 init_waitqueue_head(&qhp->wait);
1855 kref_init(&qhp->kref);
1856 INIT_WORK(&qhp->free_work, free_qp_work);
1858 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1863 sq_key_mm = kmalloc(sizeof(*sq_key_mm), GFP_KERNEL);
1868 rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL);
1873 sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL);
1874 if (!sq_db_key_mm) {
1878 rq_db_key_mm = kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL);
1879 if (!rq_db_key_mm) {
1883 if (t4_sq_onchip(&qhp->wq.sq)) {
1884 ma_sync_key_mm = kmalloc(sizeof(*ma_sync_key_mm),
1886 if (!ma_sync_key_mm) {
1890 uresp.flags = C4IW_QPF_ONCHIP;
1893 uresp.qid_mask = rhp->rdev.qpmask;
1894 uresp.sqid = qhp->wq.sq.qid;
1895 uresp.sq_size = qhp->wq.sq.size;
1896 uresp.sq_memsize = qhp->wq.sq.memsize;
1897 uresp.rqid = qhp->wq.rq.qid;
1898 uresp.rq_size = qhp->wq.rq.size;
1899 uresp.rq_memsize = qhp->wq.rq.memsize;
1900 spin_lock(&ucontext->mmap_lock);
1901 if (ma_sync_key_mm) {
1902 uresp.ma_sync_key = ucontext->key;
1903 ucontext->key += PAGE_SIZE;
1905 uresp.ma_sync_key = 0;
1907 uresp.sq_key = ucontext->key;
1908 ucontext->key += PAGE_SIZE;
1909 uresp.rq_key = ucontext->key;
1910 ucontext->key += PAGE_SIZE;
1911 uresp.sq_db_gts_key = ucontext->key;
1912 ucontext->key += PAGE_SIZE;
1913 uresp.rq_db_gts_key = ucontext->key;
1914 ucontext->key += PAGE_SIZE;
1915 spin_unlock(&ucontext->mmap_lock);
1916 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1919 sq_key_mm->key = uresp.sq_key;
1920 sq_key_mm->addr = qhp->wq.sq.phys_addr;
1921 sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
1922 insert_mmap(ucontext, sq_key_mm);
1923 rq_key_mm->key = uresp.rq_key;
1924 rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue);
1925 rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1926 insert_mmap(ucontext, rq_key_mm);
1927 sq_db_key_mm->key = uresp.sq_db_gts_key;
1928 sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa;
1929 sq_db_key_mm->len = PAGE_SIZE;
1930 insert_mmap(ucontext, sq_db_key_mm);
1931 rq_db_key_mm->key = uresp.rq_db_gts_key;
1932 rq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.rq.bar2_pa;
1933 rq_db_key_mm->len = PAGE_SIZE;
1934 insert_mmap(ucontext, rq_db_key_mm);
1935 if (ma_sync_key_mm) {
1936 ma_sync_key_mm->key = uresp.ma_sync_key;
1937 ma_sync_key_mm->addr =
1938 (pci_resource_start(rhp->rdev.lldi.pdev, 0) +
1939 PCIE_MA_SYNC_A) & PAGE_MASK;
1940 ma_sync_key_mm->len = PAGE_SIZE;
1941 insert_mmap(ucontext, ma_sync_key_mm);
1944 c4iw_get_ucontext(ucontext);
1945 qhp->ucontext = ucontext;
1947 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1948 init_timer(&(qhp->timer));
1949 INIT_LIST_HEAD(&qhp->db_fc_entry);
1950 pr_debug("%s sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n",
1952 qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
1953 attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
1954 qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
1957 kfree(ma_sync_key_mm);
1959 kfree(rq_db_key_mm);
1961 kfree(sq_db_key_mm);
1967 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1969 destroy_qp(&rhp->rdev, &qhp->wq,
1970 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1973 return ERR_PTR(ret);
1976 int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1977 int attr_mask, struct ib_udata *udata)
1979 struct c4iw_dev *rhp;
1980 struct c4iw_qp *qhp;
1981 enum c4iw_qp_attr_mask mask = 0;
1982 struct c4iw_qp_attributes attrs;
1984 pr_debug("%s ib_qp %p\n", __func__, ibqp);
1986 /* iwarp does not support the RTR state */
1987 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1988 attr_mask &= ~IB_QP_STATE;
1990 /* Make sure we still have something left to do */
1994 memset(&attrs, 0, sizeof attrs);
1995 qhp = to_c4iw_qp(ibqp);
1998 attrs.next_state = c4iw_convert_state(attr->qp_state);
1999 attrs.enable_rdma_read = (attr->qp_access_flags &
2000 IB_ACCESS_REMOTE_READ) ? 1 : 0;
2001 attrs.enable_rdma_write = (attr->qp_access_flags &
2002 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2003 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
2006 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
2007 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
2008 (C4IW_QP_ATTR_ENABLE_RDMA_READ |
2009 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
2010 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
2013 * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
2014 * ringing the queue db when we're in DB_FULL mode.
2015 * Only allow this on T4 devices.
2017 attrs.sq_db_inc = attr->sq_psn;
2018 attrs.rq_db_inc = attr->rq_psn;
2019 mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
2020 mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
2021 if (!is_t4(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
2022 (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
2025 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
2028 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
2030 pr_debug("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
2031 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
2034 int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2035 int attr_mask, struct ib_qp_init_attr *init_attr)
2037 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
2039 memset(attr, 0, sizeof *attr);
2040 memset(init_attr, 0, sizeof *init_attr);
2041 attr->qp_state = to_ib_qp_state(qhp->attr.state);
2042 init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
2043 init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
2044 init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
2045 init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
2046 init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
2047 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;