2 * Copyright (c) 2016 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/platform_device.h>
34 #include <rdma/ib_umem.h>
35 #include <rdma/uverbs_ioctl.h>
36 #include "hns_roce_device.h"
37 #include "hns_roce_cmd.h"
38 #include "hns_roce_hem.h"
39 #include <rdma/hns-abi.h>
40 #include "hns_roce_common.h"
42 static void hns_roce_ib_cq_comp(struct hns_roce_cq *hr_cq)
44 struct ib_cq *ibcq = &hr_cq->ib_cq;
46 ibcq->comp_handler(ibcq, ibcq->cq_context);
49 static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
50 enum hns_roce_event event_type)
52 struct hns_roce_dev *hr_dev;
53 struct ib_event event;
57 hr_dev = to_hr_dev(ibcq->device);
59 if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
60 event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
61 event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
63 "hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
64 event_type, hr_cq->cqn);
68 if (ibcq->event_handler) {
69 event.device = ibcq->device;
70 event.event = IB_EVENT_CQ_ERR;
71 event.element.cq = ibcq;
72 ibcq->event_handler(&event, ibcq->cq_context);
76 static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
77 struct hns_roce_cmd_mailbox *mailbox,
80 return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cq_num, 0,
81 HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIMEOUT_MSECS);
84 static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
85 struct hns_roce_mtt *hr_mtt,
86 struct hns_roce_uar *hr_uar,
87 struct hns_roce_cq *hr_cq, int vector)
89 struct hns_roce_cmd_mailbox *mailbox;
90 struct hns_roce_hem_table *mtt_table;
91 struct hns_roce_cq_table *cq_table;
92 struct device *dev = hr_dev->dev;
93 dma_addr_t dma_handle;
97 cq_table = &hr_dev->cq_table;
99 /* Get the physical address of cq buf */
100 if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
101 mtt_table = &hr_dev->mr_table.mtt_cqe_table;
103 mtt_table = &hr_dev->mr_table.mtt_table;
105 mtts = hns_roce_table_find(hr_dev, mtt_table,
106 hr_mtt->first_seg, &dma_handle);
108 dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n");
112 if (vector >= hr_dev->caps.num_comp_vectors) {
113 dev_err(dev, "CQ alloc.Invalid vector.\n");
116 hr_cq->vector = vector;
118 ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
120 dev_err(dev, "CQ alloc.Failed to alloc index.\n");
124 /* Get CQC memory HEM(Hardware Entry Memory) table */
125 ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
127 dev_err(dev, "CQ alloc.Failed to get context mem.\n");
131 ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
133 dev_err(dev, "CQ alloc failed xa_store.\n");
137 /* Allocate mailbox memory */
138 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
139 if (IS_ERR(mailbox)) {
140 ret = PTR_ERR(mailbox);
144 hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle,
147 /* Send mailbox to hw */
148 ret = hns_roce_sw2hw_cq(hr_dev, mailbox, hr_cq->cqn);
149 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
151 dev_err(dev, "CQ alloc.Failed to cmd mailbox.\n");
155 hr_cq->cons_index = 0;
159 atomic_set(&hr_cq->refcount, 1);
160 init_completion(&hr_cq->free);
165 xa_erase(&cq_table->array, hr_cq->cqn);
168 hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
171 hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
175 static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
176 struct hns_roce_cmd_mailbox *mailbox,
177 unsigned long cq_num)
179 return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
180 mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_CQ,
181 HNS_ROCE_CMD_TIMEOUT_MSECS);
184 void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
186 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
187 struct device *dev = hr_dev->dev;
190 ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn);
192 dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
195 xa_erase(&cq_table->array, hr_cq->cqn);
197 /* Waiting interrupt process procedure carried out */
198 synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
200 /* wait for all interrupt processed */
201 if (atomic_dec_and_test(&hr_cq->refcount))
202 complete(&hr_cq->free);
203 wait_for_completion(&hr_cq->free);
205 hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
206 hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
209 static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
210 struct ib_udata *udata,
211 struct hns_roce_cq_buf *buf,
212 struct ib_umem **umem, u64 buf_addr, int cqe)
218 *umem = ib_umem_get(udata, buf_addr, cqe * hr_dev->caps.cq_entry_sz,
219 IB_ACCESS_LOCAL_WRITE, 1);
221 return PTR_ERR(*umem);
223 if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
224 buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
226 buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
228 if (hr_dev->caps.cqe_buf_pg_sz) {
229 npages = (ib_umem_page_count(*umem) +
230 (1 << hr_dev->caps.cqe_buf_pg_sz) - 1) /
231 (1 << hr_dev->caps.cqe_buf_pg_sz);
232 page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
233 ret = hns_roce_mtt_init(hr_dev, npages, page_shift,
236 ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
237 PAGE_SHIFT, &buf->hr_mtt);
242 ret = hns_roce_ib_umem_write_mtt(hr_dev, &buf->hr_mtt, *umem);
249 hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
252 ib_umem_release(*umem);
256 static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
257 struct hns_roce_cq_buf *buf, u32 nent)
260 u32 page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
262 ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz,
263 (1 << page_shift) * 2, &buf->hr_buf,
268 if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
269 buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
271 buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
273 ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages,
274 buf->hr_buf.page_shift, &buf->hr_mtt);
278 ret = hns_roce_buf_write_mtt(hr_dev, &buf->hr_mtt, &buf->hr_buf);
285 hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
288 hns_roce_buf_free(hr_dev, nent * hr_dev->caps.cq_entry_sz,
294 static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
295 struct hns_roce_cq_buf *buf, int cqe)
297 hns_roce_buf_free(hr_dev, (cqe + 1) * hr_dev->caps.cq_entry_sz,
301 int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
302 const struct ib_cq_init_attr *attr,
303 struct ib_udata *udata)
305 struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
306 struct device *dev = hr_dev->dev;
307 struct hns_roce_ib_create_cq ucmd;
308 struct hns_roce_ib_create_cq_resp resp = {};
309 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
310 struct hns_roce_uar *uar = NULL;
311 int vector = attr->comp_vector;
312 int cq_entries = attr->cqe;
314 struct hns_roce_ucontext *context = rdma_udata_to_drv_context(
315 udata, struct hns_roce_ucontext, ibucontext);
317 if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
318 dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
319 cq_entries, hr_dev->caps.max_cqes);
323 if (hr_dev->caps.min_cqes)
324 cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
326 cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
327 hr_cq->ib_cq.cqe = cq_entries - 1;
328 spin_lock_init(&hr_cq->lock);
331 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
332 dev_err(dev, "Failed to copy_from_udata.\n");
337 /* Get user space address, write it into mtt table */
338 ret = hns_roce_ib_get_cq_umem(hr_dev, udata, &hr_cq->hr_buf,
339 &hr_cq->umem, ucmd.buf_addr,
342 dev_err(dev, "Failed to get_cq_umem.\n");
346 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
347 (udata->outlen >= sizeof(resp))) {
348 ret = hns_roce_db_map_user(context, udata, ucmd.db_addr,
351 dev_err(dev, "cq record doorbell map failed!\n");
355 resp.cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB;
358 /* Get user space parameters */
361 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
362 ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
366 hr_cq->set_ci_db = hr_cq->db.db_record;
367 *hr_cq->set_ci_db = 0;
371 /* Init mmt table and write buff address to mtt table */
372 ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf,
375 dev_err(dev, "Failed to alloc_cq_buf.\n");
379 uar = &hr_dev->priv_uar;
380 hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
381 DB_REG_OFFSET * uar->index;
384 /* Allocate cq index, fill cq_context */
385 ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, uar,
388 dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
393 * For the QP created by kernel space, tptr value should be initialized
394 * to zero; For the QP created by user space, it will cause synchronous
395 * problems if tptr is set to zero here, so we initialze it in user
398 if (!udata && hr_cq->tptr_addr)
399 *hr_cq->tptr_addr = 0;
401 /* Get created cq handler and carry out event */
402 hr_cq->comp = hns_roce_ib_cq_comp;
403 hr_cq->event = hns_roce_ib_cq_event;
404 hr_cq->cq_depth = cq_entries;
407 resp.cqn = hr_cq->cqn;
408 ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
416 hns_roce_free_cq(hr_dev, hr_cq);
419 if (udata && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
420 (udata->outlen >= sizeof(resp)))
421 hns_roce_db_unmap_user(context, &hr_cq->db);
424 hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
425 ib_umem_release(hr_cq->umem);
427 hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
431 if (!udata && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
432 hns_roce_free_db(hr_dev, &hr_cq->db);
438 void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
440 struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
441 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
443 if (hr_dev->hw->destroy_cq) {
444 hr_dev->hw->destroy_cq(ib_cq, udata);
448 hns_roce_free_cq(hr_dev, hr_cq);
449 hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
451 ib_umem_release(hr_cq->umem);
453 if (hr_cq->db_en == 1)
454 hns_roce_db_unmap_user(rdma_udata_to_drv_context(
456 struct hns_roce_ucontext,
460 /* Free the buff of stored cq */
461 hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, ib_cq->cqe);
462 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
463 hns_roce_free_db(hr_dev, &hr_cq->db);
467 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
469 struct device *dev = hr_dev->dev;
470 struct hns_roce_cq *cq;
472 cq = xa_load(&hr_dev->cq_table.array, cqn & (hr_dev->caps.num_cqs - 1));
474 dev_warn(dev, "Completion event for bogus CQ 0x%08x\n", cqn);
482 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
484 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
485 struct device *dev = hr_dev->dev;
486 struct hns_roce_cq *cq;
488 cq = xa_load(&cq_table->array, cqn & (hr_dev->caps.num_cqs - 1));
490 atomic_inc(&cq->refcount);
493 dev_warn(dev, "Async event for bogus CQ %08x\n", cqn);
497 cq->event(cq, (enum hns_roce_event)event_type);
499 if (atomic_dec_and_test(&cq->refcount))
503 int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
505 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
507 xa_init(&cq_table->array);
509 return hns_roce_bitmap_init(&cq_table->bitmap, hr_dev->caps.num_cqs,
510 hr_dev->caps.num_cqs - 1,
511 hr_dev->caps.reserved_cqs, 0);
514 void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
516 hns_roce_bitmap_cleanup(&hr_dev->cq_table.bitmap);