2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/mlx4/cq.h>
35 #include <linux/mlx4/qp.h>
36 #include <linux/mlx4/srq.h>
37 #include <linux/slab.h>
40 #include <rdma/mlx4-abi.h>
41 #include <rdma/uverbs_ioctl.h>
43 static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
45 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
46 ibcq->comp_handler(ibcq, ibcq->cq_context);
49 static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
51 struct ib_event event;
54 if (type != MLX4_EVENT_TYPE_CQ_ERROR) {
55 pr_warn("Unexpected event type %d "
56 "on CQ %06x\n", type, cq->cqn);
60 ibcq = &to_mibcq(cq)->ibcq;
61 if (ibcq->event_handler) {
62 event.device = ibcq->device;
63 event.event = IB_EVENT_CQ_ERR;
64 event.element.cq = ibcq;
65 ibcq->event_handler(&event, ibcq->cq_context);
69 static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
71 return mlx4_buf_offset(&buf->buf, n * buf->entry_size);
74 static void *get_cqe(struct mlx4_ib_cq *cq, int n)
76 return get_cqe_from_buf(&cq->buf, n);
79 static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
81 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
82 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe);
84 return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
85 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
88 static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
90 return get_sw_cqe(cq, cq->mcq.cons_index);
93 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
95 struct mlx4_ib_cq *mcq = to_mcq(cq);
96 struct mlx4_ib_dev *dev = to_mdev(cq->device);
98 return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
101 static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
105 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
106 PAGE_SIZE * 2, &buf->buf);
111 buf->entry_size = dev->dev->caps.cqe_size;
112 err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
117 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
124 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
127 mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf);
133 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
135 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
138 static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata,
139 struct mlx4_ib_cq_buf *buf,
140 struct ib_umem **umem, u64 buf_addr, int cqe)
143 int cqe_size = dev->dev->caps.cqe_size;
147 *umem = ib_umem_get(udata, buf_addr, cqe * cqe_size,
148 IB_ACCESS_LOCAL_WRITE, 1);
150 return PTR_ERR(*umem);
152 n = ib_umem_page_count(*umem);
153 shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n);
154 err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
159 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
166 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
169 ib_umem_release(*umem);
174 #define CQ_CREATE_FLAGS_SUPPORTED IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION
175 int mlx4_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
176 struct ib_udata *udata)
178 struct ib_device *ibdev = ibcq->device;
179 int entries = attr->cqe;
180 int vector = attr->comp_vector;
181 struct mlx4_ib_dev *dev = to_mdev(ibdev);
182 struct mlx4_ib_cq *cq = to_mcq(ibcq);
183 struct mlx4_uar *uar;
186 struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
187 udata, struct mlx4_ib_ucontext, ibucontext);
189 if (entries < 1 || entries > dev->dev->caps.max_cqes)
192 if (attr->flags & ~CQ_CREATE_FLAGS_SUPPORTED)
195 entries = roundup_pow_of_two(entries + 1);
196 cq->ibcq.cqe = entries - 1;
197 mutex_init(&cq->resize_mutex);
198 spin_lock_init(&cq->lock);
199 cq->resize_buf = NULL;
200 cq->resize_umem = NULL;
201 cq->create_flags = attr->flags;
202 INIT_LIST_HEAD(&cq->send_qp_list);
203 INIT_LIST_HEAD(&cq->recv_qp_list);
206 struct mlx4_ib_create_cq ucmd;
208 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
213 buf_addr = (void *)(unsigned long)ucmd.buf_addr;
214 err = mlx4_ib_get_cq_umem(dev, udata, &cq->buf, &cq->umem,
215 ucmd.buf_addr, entries);
219 err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &cq->db);
224 cq->mcq.usage = MLX4_RES_USAGE_USER_VERBS;
226 err = mlx4_db_alloc(dev->dev, &cq->db, 1);
230 cq->mcq.set_ci_db = cq->db.db;
231 cq->mcq.arm_db = cq->db.db + 1;
232 *cq->mcq.set_ci_db = 0;
235 err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
239 buf_addr = &cq->buf.buf;
241 uar = &dev->priv_uar;
242 cq->mcq.usage = MLX4_RES_USAGE_DRIVER;
246 vector = dev->eq_table[vector % ibdev->num_comp_vectors];
248 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, cq->db.dma,
250 !!(cq->create_flags &
251 IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION),
257 cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp;
259 cq->mcq.comp = mlx4_ib_cq_comp;
260 cq->mcq.event = mlx4_ib_cq_event;
263 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
271 mlx4_cq_free(dev->dev, &cq->mcq);
275 mlx4_ib_db_unmap_user(context, &cq->db);
278 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
280 ib_umem_release(cq->umem);
282 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
286 mlx4_db_free(dev->dev, &cq->db);
291 static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
299 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
303 err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
305 kfree(cq->resize_buf);
306 cq->resize_buf = NULL;
310 cq->resize_buf->cqe = entries - 1;
315 static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
316 int entries, struct ib_udata *udata)
318 struct mlx4_ib_resize_cq ucmd;
324 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
327 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
331 err = mlx4_ib_get_cq_umem(dev, udata, &cq->resize_buf->buf,
332 &cq->resize_umem, ucmd.buf_addr, entries);
334 kfree(cq->resize_buf);
335 cq->resize_buf = NULL;
339 cq->resize_buf->cqe = entries - 1;
344 static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
348 i = cq->mcq.cons_index;
349 while (get_sw_cqe(cq, i))
352 return i - cq->mcq.cons_index;
355 static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
357 struct mlx4_cqe *cqe, *new_cqe;
359 int cqe_size = cq->buf.entry_size;
360 int cqe_inc = cqe_size == 64 ? 1 : 0;
362 i = cq->mcq.cons_index;
363 cqe = get_cqe(cq, i & cq->ibcq.cqe);
366 while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
367 new_cqe = get_cqe_from_buf(&cq->resize_buf->buf,
368 (i + 1) & cq->resize_buf->cqe);
369 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size);
372 new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
373 (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
374 cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
377 ++cq->mcq.cons_index;
380 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
382 struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
383 struct mlx4_ib_cq *cq = to_mcq(ibcq);
388 mutex_lock(&cq->resize_mutex);
389 if (entries < 1 || entries > dev->dev->caps.max_cqes) {
394 entries = roundup_pow_of_two(entries + 1);
395 if (entries == ibcq->cqe + 1) {
400 if (entries > dev->dev->caps.max_cqes + 1) {
406 err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
410 /* Can't be smaller than the number of outstanding CQEs */
411 outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
412 if (entries < outst_cqe + 1) {
417 err = mlx4_alloc_resize_buf(dev, cq, entries);
424 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
428 mlx4_mtt_cleanup(dev->dev, &mtt);
430 cq->buf = cq->resize_buf->buf;
431 cq->ibcq.cqe = cq->resize_buf->cqe;
432 ib_umem_release(cq->umem);
433 cq->umem = cq->resize_umem;
435 kfree(cq->resize_buf);
436 cq->resize_buf = NULL;
437 cq->resize_umem = NULL;
439 struct mlx4_ib_cq_buf tmp_buf;
442 spin_lock_irq(&cq->lock);
443 if (cq->resize_buf) {
444 mlx4_ib_cq_resize_copy_cqes(cq);
446 tmp_cqe = cq->ibcq.cqe;
447 cq->buf = cq->resize_buf->buf;
448 cq->ibcq.cqe = cq->resize_buf->cqe;
450 kfree(cq->resize_buf);
451 cq->resize_buf = NULL;
453 spin_unlock_irq(&cq->lock);
456 mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
462 mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt);
464 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
465 cq->resize_buf->cqe);
467 kfree(cq->resize_buf);
468 cq->resize_buf = NULL;
470 ib_umem_release(cq->resize_umem);
471 cq->resize_umem = NULL;
473 mutex_unlock(&cq->resize_mutex);
478 void mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
480 struct mlx4_ib_dev *dev = to_mdev(cq->device);
481 struct mlx4_ib_cq *mcq = to_mcq(cq);
483 mlx4_cq_free(dev->dev, &mcq->mcq);
484 mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt);
487 mlx4_ib_db_unmap_user(
488 rdma_udata_to_drv_context(
490 struct mlx4_ib_ucontext,
494 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
495 mlx4_db_free(dev->dev, &mcq->db);
497 ib_umem_release(mcq->umem);
500 static void dump_cqe(void *cqe)
504 pr_debug("CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
505 be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]),
506 be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]),
507 be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
510 static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
513 if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
514 pr_debug("local QP operation err "
515 "(QPN %06x, WQE index %x, vendor syndrome %02x, "
517 be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
518 cqe->vendor_err_syndrome,
519 cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
523 switch (cqe->syndrome) {
524 case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR:
525 wc->status = IB_WC_LOC_LEN_ERR;
527 case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR:
528 wc->status = IB_WC_LOC_QP_OP_ERR;
530 case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR:
531 wc->status = IB_WC_LOC_PROT_ERR;
533 case MLX4_CQE_SYNDROME_WR_FLUSH_ERR:
534 wc->status = IB_WC_WR_FLUSH_ERR;
536 case MLX4_CQE_SYNDROME_MW_BIND_ERR:
537 wc->status = IB_WC_MW_BIND_ERR;
539 case MLX4_CQE_SYNDROME_BAD_RESP_ERR:
540 wc->status = IB_WC_BAD_RESP_ERR;
542 case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR:
543 wc->status = IB_WC_LOC_ACCESS_ERR;
545 case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
546 wc->status = IB_WC_REM_INV_REQ_ERR;
548 case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR:
549 wc->status = IB_WC_REM_ACCESS_ERR;
551 case MLX4_CQE_SYNDROME_REMOTE_OP_ERR:
552 wc->status = IB_WC_REM_OP_ERR;
554 case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
555 wc->status = IB_WC_RETRY_EXC_ERR;
557 case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
558 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
560 case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR:
561 wc->status = IB_WC_REM_ABORT_ERR;
564 wc->status = IB_WC_GENERAL_ERR;
568 wc->vendor_err = cqe->vendor_err_syndrome;
571 static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
573 return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
574 MLX4_CQE_STATUS_IPV4F |
575 MLX4_CQE_STATUS_IPV4OPT |
576 MLX4_CQE_STATUS_IPV6 |
577 MLX4_CQE_STATUS_IPOK)) ==
578 cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
579 MLX4_CQE_STATUS_IPOK)) &&
580 (status & cpu_to_be16(MLX4_CQE_STATUS_UDP |
581 MLX4_CQE_STATUS_TCP)) &&
582 checksum == cpu_to_be16(0xffff);
585 static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
586 unsigned tail, struct mlx4_cqe *cqe, int is_eth)
588 struct mlx4_ib_proxy_sqp_hdr *hdr;
590 ib_dma_sync_single_for_cpu(qp->ibqp.device,
591 qp->sqp_proxy_rcv[tail].map,
592 sizeof (struct mlx4_ib_proxy_sqp_hdr),
594 hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr);
595 wc->pkey_index = be16_to_cpu(hdr->tun.pkey_index);
596 wc->src_qp = be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF;
597 wc->wc_flags |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0;
598 wc->dlid_path_bits = 0;
602 wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
603 memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
604 memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
605 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
607 wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32);
608 wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
612 static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries,
613 struct ib_wc *wc, int *npolled, int is_send)
615 struct mlx4_ib_wq *wq;
619 wq = is_send ? &qp->sq : &qp->rq;
620 cur = wq->head - wq->tail;
625 for (i = 0; i < cur && *npolled < num_entries; i++) {
626 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
627 wc->status = IB_WC_WR_FLUSH_ERR;
628 wc->vendor_err = MLX4_CQE_SYNDROME_WR_FLUSH_ERR;
636 static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
637 struct ib_wc *wc, int *npolled)
639 struct mlx4_ib_qp *qp;
642 /* Find uncompleted WQEs belonging to that cq and return
643 * simulated FLUSH_ERR completions
645 list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
646 mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 1);
647 if (*npolled >= num_entries)
651 list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) {
652 mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 0);
653 if (*npolled >= num_entries)
661 static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
662 struct mlx4_ib_qp **cur_qp,
665 struct mlx4_cqe *cqe;
667 struct mlx4_ib_wq *wq;
668 struct mlx4_ib_srq *srq;
669 struct mlx4_srq *msrq = NULL;
678 cqe = next_cqe_sw(cq);
682 if (cq->buf.entry_size == 64)
685 ++cq->mcq.cons_index;
688 * Make sure we read CQ entry contents after we've checked the
693 is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
694 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
695 MLX4_CQE_OPCODE_ERROR;
697 /* Resize CQ in progress */
698 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
699 if (cq->resize_buf) {
700 struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
702 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
703 cq->buf = cq->resize_buf->buf;
704 cq->ibcq.cqe = cq->resize_buf->cqe;
706 kfree(cq->resize_buf);
707 cq->resize_buf = NULL;
714 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
716 * We do not have to take the QP table lock here,
717 * because CQs will be locked while QPs are removed
720 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
721 be32_to_cpu(cqe->vlan_my_qpn));
722 *cur_qp = to_mibqp(mqp);
725 wc->qp = &(*cur_qp)->ibqp;
727 if (wc->qp->qp_type == IB_QPT_XRC_TGT) {
729 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
730 srq_num = g_mlpath_rqpn & 0xffffff;
731 /* SRQ is also in the radix tree */
732 msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
738 if (!(*cur_qp)->sq_signal_bits) {
739 wqe_ctr = be16_to_cpu(cqe->wqe_index);
740 wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
742 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
744 } else if ((*cur_qp)->ibqp.srq) {
745 srq = to_msrq((*cur_qp)->ibqp.srq);
746 wqe_ctr = be16_to_cpu(cqe->wqe_index);
747 wc->wr_id = srq->wrid[wqe_ctr];
748 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
750 srq = to_mibsrq(msrq);
751 wqe_ctr = be16_to_cpu(cqe->wqe_index);
752 wc->wr_id = srq->wrid[wqe_ctr];
753 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
756 tail = wq->tail & (wq->wqe_cnt - 1);
757 wc->wr_id = wq->wrid[tail];
761 if (unlikely(is_error)) {
762 mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
766 wc->status = IB_WC_SUCCESS;
770 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
771 case MLX4_OPCODE_RDMA_WRITE_IMM:
772 wc->wc_flags |= IB_WC_WITH_IMM;
774 case MLX4_OPCODE_RDMA_WRITE:
775 wc->opcode = IB_WC_RDMA_WRITE;
777 case MLX4_OPCODE_SEND_IMM:
778 wc->wc_flags |= IB_WC_WITH_IMM;
780 case MLX4_OPCODE_SEND:
781 case MLX4_OPCODE_SEND_INVAL:
782 wc->opcode = IB_WC_SEND;
784 case MLX4_OPCODE_RDMA_READ:
785 wc->opcode = IB_WC_RDMA_READ;
786 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
788 case MLX4_OPCODE_ATOMIC_CS:
789 wc->opcode = IB_WC_COMP_SWAP;
792 case MLX4_OPCODE_ATOMIC_FA:
793 wc->opcode = IB_WC_FETCH_ADD;
796 case MLX4_OPCODE_MASKED_ATOMIC_CS:
797 wc->opcode = IB_WC_MASKED_COMP_SWAP;
800 case MLX4_OPCODE_MASKED_ATOMIC_FA:
801 wc->opcode = IB_WC_MASKED_FETCH_ADD;
804 case MLX4_OPCODE_LSO:
805 wc->opcode = IB_WC_LSO;
807 case MLX4_OPCODE_FMR:
808 wc->opcode = IB_WC_REG_MR;
810 case MLX4_OPCODE_LOCAL_INVAL:
811 wc->opcode = IB_WC_LOCAL_INV;
815 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
817 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
818 case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
819 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
820 wc->wc_flags = IB_WC_WITH_IMM;
821 wc->ex.imm_data = cqe->immed_rss_invalid;
823 case MLX4_RECV_OPCODE_SEND_INVAL:
824 wc->opcode = IB_WC_RECV;
825 wc->wc_flags = IB_WC_WITH_INVALIDATE;
826 wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid);
828 case MLX4_RECV_OPCODE_SEND:
829 wc->opcode = IB_WC_RECV;
832 case MLX4_RECV_OPCODE_SEND_IMM:
833 wc->opcode = IB_WC_RECV;
834 wc->wc_flags = IB_WC_WITH_IMM;
835 wc->ex.imm_data = cqe->immed_rss_invalid;
839 is_eth = (rdma_port_get_link_layer(wc->qp->device,
841 IB_LINK_LAYER_ETHERNET);
842 if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
843 if ((*cur_qp)->mlx4_ib_qp_type &
844 (MLX4_IB_QPT_PROXY_SMI_OWNER |
845 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
846 use_tunnel_data(*cur_qp, cq, wc, tail, cqe,
852 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
853 wc->src_qp = g_mlpath_rqpn & 0xffffff;
854 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
855 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
856 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
857 wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
858 cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
861 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
862 if (be32_to_cpu(cqe->vlan_my_qpn) &
863 MLX4_CQE_CVLAN_PRESENT_MASK) {
864 wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
867 wc->vlan_id = 0xffff;
869 memcpy(wc->smac, cqe->smac, ETH_ALEN);
870 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
872 wc->slid = be16_to_cpu(cqe->rlid);
873 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
874 wc->vlan_id = 0xffff;
881 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
883 struct mlx4_ib_cq *cq = to_mcq(ibcq);
884 struct mlx4_ib_qp *cur_qp = NULL;
887 struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
889 spin_lock_irqsave(&cq->lock, flags);
890 if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
891 mlx4_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
895 for (npolled = 0; npolled < num_entries; ++npolled) {
896 if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled))
900 mlx4_cq_set_ci(&cq->mcq);
903 spin_unlock_irqrestore(&cq->lock, flags);
908 int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
910 mlx4_cq_arm(&to_mcq(ibcq)->mcq,
911 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
912 MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT,
913 to_mdev(ibcq->device)->uar_map,
914 MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock));
919 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
923 struct mlx4_cqe *cqe, *dest;
925 int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0;
928 * First we need to find the current producer index, so we
929 * know where to start cleaning from. It doesn't matter if HW
930 * adds new entries after this loop -- the QP we're worried
931 * about is already in RESET, so the new entries won't come
932 * from our QP and therefore don't need to be checked.
934 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
935 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
939 * Now sweep backwards through the CQ, removing CQ entries
940 * that match our QP by copying older entries on top of them.
942 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
943 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
946 if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
947 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
948 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
951 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
954 owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
955 memcpy(dest, cqe, sizeof *cqe);
956 dest->owner_sr_opcode = owner_bit |
957 (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
962 cq->mcq.cons_index += nfreed;
964 * Make sure update of buffer contents is done before
965 * updating consumer index.
968 mlx4_cq_set_ci(&cq->mcq);
972 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
974 spin_lock_irq(&cq->lock);
975 __mlx4_ib_cq_clean(cq, qpn, srq);
976 spin_unlock_irq(&cq->lock);