Linux-libre 5.0.10-gnu
[librecmc/linux-libre.git] / drivers / infiniband / hw / hns / hns_roce_cq.c
1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/platform_device.h>
34 #include <rdma/ib_umem.h>
35 #include "hns_roce_device.h"
36 #include "hns_roce_cmd.h"
37 #include "hns_roce_hem.h"
38 #include <rdma/hns-abi.h>
39 #include "hns_roce_common.h"
40
41 static void hns_roce_ib_cq_comp(struct hns_roce_cq *hr_cq)
42 {
43         struct ib_cq *ibcq = &hr_cq->ib_cq;
44
45         ibcq->comp_handler(ibcq, ibcq->cq_context);
46 }
47
48 static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
49                                  enum hns_roce_event event_type)
50 {
51         struct hns_roce_dev *hr_dev;
52         struct ib_event event;
53         struct ib_cq *ibcq;
54
55         ibcq = &hr_cq->ib_cq;
56         hr_dev = to_hr_dev(ibcq->device);
57
58         if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
59             event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
60             event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
61                 dev_err(hr_dev->dev,
62                         "hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
63                         event_type, hr_cq->cqn);
64                 return;
65         }
66
67         if (ibcq->event_handler) {
68                 event.device = ibcq->device;
69                 event.event = IB_EVENT_CQ_ERR;
70                 event.element.cq = ibcq;
71                 ibcq->event_handler(&event, ibcq->cq_context);
72         }
73 }
74
75 static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
76                              struct hns_roce_cmd_mailbox *mailbox,
77                              unsigned long cq_num)
78 {
79         return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cq_num, 0,
80                             HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIMEOUT_MSECS);
81 }
82
83 static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
84                              struct hns_roce_mtt *hr_mtt,
85                              struct hns_roce_uar *hr_uar,
86                              struct hns_roce_cq *hr_cq, int vector)
87 {
88         struct hns_roce_cmd_mailbox *mailbox;
89         struct hns_roce_hem_table *mtt_table;
90         struct hns_roce_cq_table *cq_table;
91         struct device *dev = hr_dev->dev;
92         dma_addr_t dma_handle;
93         u64 *mtts;
94         int ret;
95
96         cq_table = &hr_dev->cq_table;
97
98         /* Get the physical address of cq buf */
99         if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
100                 mtt_table = &hr_dev->mr_table.mtt_cqe_table;
101         else
102                 mtt_table = &hr_dev->mr_table.mtt_table;
103
104         mtts = hns_roce_table_find(hr_dev, mtt_table,
105                                    hr_mtt->first_seg, &dma_handle);
106         if (!mtts) {
107                 dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n");
108                 return -EINVAL;
109         }
110
111         if (vector >= hr_dev->caps.num_comp_vectors) {
112                 dev_err(dev, "CQ alloc.Invalid vector.\n");
113                 return -EINVAL;
114         }
115         hr_cq->vector = vector;
116
117         ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
118         if (ret == -1) {
119                 dev_err(dev, "CQ alloc.Failed to alloc index.\n");
120                 return -ENOMEM;
121         }
122
123         /* Get CQC memory HEM(Hardware Entry Memory) table */
124         ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
125         if (ret) {
126                 dev_err(dev, "CQ alloc.Failed to get context mem.\n");
127                 goto err_out;
128         }
129
130         /* The cq insert radix tree */
131         spin_lock_irq(&cq_table->lock);
132         /* Radix_tree: The associated pointer and long integer key value like */
133         ret = radix_tree_insert(&cq_table->tree, hr_cq->cqn, hr_cq);
134         spin_unlock_irq(&cq_table->lock);
135         if (ret) {
136                 dev_err(dev, "CQ alloc.Failed to radix_tree_insert.\n");
137                 goto err_put;
138         }
139
140         /* Allocate mailbox memory */
141         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
142         if (IS_ERR(mailbox)) {
143                 ret = PTR_ERR(mailbox);
144                 goto err_radix;
145         }
146
147         hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle,
148                               nent, vector);
149
150         /* Send mailbox to hw */
151         ret = hns_roce_sw2hw_cq(hr_dev, mailbox, hr_cq->cqn);
152         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
153         if (ret) {
154                 dev_err(dev, "CQ alloc.Failed to cmd mailbox.\n");
155                 goto err_radix;
156         }
157
158         hr_cq->cons_index = 0;
159         hr_cq->arm_sn = 1;
160         hr_cq->uar = hr_uar;
161
162         atomic_set(&hr_cq->refcount, 1);
163         init_completion(&hr_cq->free);
164
165         return 0;
166
167 err_radix:
168         spin_lock_irq(&cq_table->lock);
169         radix_tree_delete(&cq_table->tree, hr_cq->cqn);
170         spin_unlock_irq(&cq_table->lock);
171
172 err_put:
173         hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
174
175 err_out:
176         hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
177         return ret;
178 }
179
180 static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
181                              struct hns_roce_cmd_mailbox *mailbox,
182                              unsigned long cq_num)
183 {
184         return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
185                                  mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_CQ,
186                                  HNS_ROCE_CMD_TIMEOUT_MSECS);
187 }
188
189 void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
190 {
191         struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
192         struct device *dev = hr_dev->dev;
193         int ret;
194
195         ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn);
196         if (ret)
197                 dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
198                         hr_cq->cqn);
199
200         /* Waiting interrupt process procedure carried out */
201         synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
202
203         /* wait for all interrupt processed */
204         if (atomic_dec_and_test(&hr_cq->refcount))
205                 complete(&hr_cq->free);
206         wait_for_completion(&hr_cq->free);
207
208         spin_lock_irq(&cq_table->lock);
209         radix_tree_delete(&cq_table->tree, hr_cq->cqn);
210         spin_unlock_irq(&cq_table->lock);
211
212         hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
213         hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
214 }
215 EXPORT_SYMBOL_GPL(hns_roce_free_cq);
216
217 static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
218                                    struct ib_ucontext *context,
219                                    struct hns_roce_cq_buf *buf,
220                                    struct ib_umem **umem, u64 buf_addr, int cqe)
221 {
222         int ret;
223         u32 page_shift;
224         u32 npages;
225
226         *umem = ib_umem_get(context, buf_addr, cqe * hr_dev->caps.cq_entry_sz,
227                             IB_ACCESS_LOCAL_WRITE, 1);
228         if (IS_ERR(*umem))
229                 return PTR_ERR(*umem);
230
231         if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
232                 buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
233         else
234                 buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
235
236         if (hr_dev->caps.cqe_buf_pg_sz) {
237                 npages = (ib_umem_page_count(*umem) +
238                         (1 << hr_dev->caps.cqe_buf_pg_sz) - 1) /
239                         (1 << hr_dev->caps.cqe_buf_pg_sz);
240                 page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
241                 ret = hns_roce_mtt_init(hr_dev, npages, page_shift,
242                                         &buf->hr_mtt);
243         } else {
244                 ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
245                                 (*umem)->page_shift,
246                                 &buf->hr_mtt);
247         }
248         if (ret)
249                 goto err_buf;
250
251         ret = hns_roce_ib_umem_write_mtt(hr_dev, &buf->hr_mtt, *umem);
252         if (ret)
253                 goto err_mtt;
254
255         return 0;
256
257 err_mtt:
258         hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
259
260 err_buf:
261         ib_umem_release(*umem);
262         return ret;
263 }
264
265 static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
266                                     struct hns_roce_cq_buf *buf, u32 nent)
267 {
268         int ret;
269         u32 page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
270
271         ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz,
272                                  (1 << page_shift) * 2, &buf->hr_buf,
273                                  page_shift);
274         if (ret)
275                 goto out;
276
277         if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
278                 buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
279         else
280                 buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
281
282         ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages,
283                                 buf->hr_buf.page_shift, &buf->hr_mtt);
284         if (ret)
285                 goto err_buf;
286
287         ret = hns_roce_buf_write_mtt(hr_dev, &buf->hr_mtt, &buf->hr_buf);
288         if (ret)
289                 goto err_mtt;
290
291         return 0;
292
293 err_mtt:
294         hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
295
296 err_buf:
297         hns_roce_buf_free(hr_dev, nent * hr_dev->caps.cq_entry_sz,
298                           &buf->hr_buf);
299 out:
300         return ret;
301 }
302
303 static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
304                                     struct hns_roce_cq_buf *buf, int cqe)
305 {
306         hns_roce_buf_free(hr_dev, (cqe + 1) * hr_dev->caps.cq_entry_sz,
307                           &buf->hr_buf);
308 }
309
310 struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
311                                     const struct ib_cq_init_attr *attr,
312                                     struct ib_ucontext *context,
313                                     struct ib_udata *udata)
314 {
315         struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
316         struct device *dev = hr_dev->dev;
317         struct hns_roce_ib_create_cq ucmd;
318         struct hns_roce_ib_create_cq_resp resp = {};
319         struct hns_roce_cq *hr_cq = NULL;
320         struct hns_roce_uar *uar = NULL;
321         int vector = attr->comp_vector;
322         int cq_entries = attr->cqe;
323         int ret;
324
325         if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
326                 dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
327                         cq_entries, hr_dev->caps.max_cqes);
328                 return ERR_PTR(-EINVAL);
329         }
330
331         hr_cq = kzalloc(sizeof(*hr_cq), GFP_KERNEL);
332         if (!hr_cq)
333                 return ERR_PTR(-ENOMEM);
334
335         if (hr_dev->caps.min_cqes)
336                 cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
337
338         cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
339         hr_cq->ib_cq.cqe = cq_entries - 1;
340         spin_lock_init(&hr_cq->lock);
341
342         if (context) {
343                 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
344                         dev_err(dev, "Failed to copy_from_udata.\n");
345                         ret = -EFAULT;
346                         goto err_cq;
347                 }
348
349                 /* Get user space address, write it into mtt table */
350                 ret = hns_roce_ib_get_cq_umem(hr_dev, context, &hr_cq->hr_buf,
351                                               &hr_cq->umem, ucmd.buf_addr,
352                                               cq_entries);
353                 if (ret) {
354                         dev_err(dev, "Failed to get_cq_umem.\n");
355                         goto err_cq;
356                 }
357
358                 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
359                     (udata->outlen >= sizeof(resp))) {
360                         ret = hns_roce_db_map_user(to_hr_ucontext(context),
361                                                    ucmd.db_addr, &hr_cq->db);
362                         if (ret) {
363                                 dev_err(dev, "cq record doorbell map failed!\n");
364                                 goto err_mtt;
365                         }
366                         hr_cq->db_en = 1;
367                         resp.cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB;
368                 }
369
370                 /* Get user space parameters */
371                 uar = &to_hr_ucontext(context)->uar;
372         } else {
373                 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
374                         ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
375                         if (ret)
376                                 goto err_cq;
377
378                         hr_cq->set_ci_db = hr_cq->db.db_record;
379                         *hr_cq->set_ci_db = 0;
380                         hr_cq->db_en = 1;
381                 }
382
383                 /* Init mmt table and write buff address to mtt table */
384                 ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf,
385                                                cq_entries);
386                 if (ret) {
387                         dev_err(dev, "Failed to alloc_cq_buf.\n");
388                         goto err_db;
389                 }
390
391                 uar = &hr_dev->priv_uar;
392                 hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
393                                 DB_REG_OFFSET * uar->index;
394         }
395
396         /* Allocate cq index, fill cq_context */
397         ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, uar,
398                                 hr_cq, vector);
399         if (ret) {
400                 dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
401                 goto err_dbmap;
402         }
403
404         /*
405          * For the QP created by kernel space, tptr value should be initialized
406          * to zero; For the QP created by user space, it will cause synchronous
407          * problems if tptr is set to zero here, so we initialze it in user
408          * space.
409          */
410         if (!context && hr_cq->tptr_addr)
411                 *hr_cq->tptr_addr = 0;
412
413         /* Get created cq handler and carry out event */
414         hr_cq->comp = hns_roce_ib_cq_comp;
415         hr_cq->event = hns_roce_ib_cq_event;
416         hr_cq->cq_depth = cq_entries;
417
418         if (context) {
419                 resp.cqn = hr_cq->cqn;
420                 ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
421                 if (ret)
422                         goto err_cqc;
423         }
424
425         return &hr_cq->ib_cq;
426
427 err_cqc:
428         hns_roce_free_cq(hr_dev, hr_cq);
429
430 err_dbmap:
431         if (context && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
432             (udata->outlen >= sizeof(resp)))
433                 hns_roce_db_unmap_user(to_hr_ucontext(context),
434                                        &hr_cq->db);
435
436 err_mtt:
437         hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
438         if (context)
439                 ib_umem_release(hr_cq->umem);
440         else
441                 hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
442                                         hr_cq->ib_cq.cqe);
443
444 err_db:
445         if (!context && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
446                 hns_roce_free_db(hr_dev, &hr_cq->db);
447
448 err_cq:
449         kfree(hr_cq);
450         return ERR_PTR(ret);
451 }
452 EXPORT_SYMBOL_GPL(hns_roce_ib_create_cq);
453
454 int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
455 {
456         struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
457         struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
458         int ret = 0;
459
460         if (hr_dev->hw->destroy_cq) {
461                 ret = hr_dev->hw->destroy_cq(ib_cq);
462         } else {
463                 hns_roce_free_cq(hr_dev, hr_cq);
464                 hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
465
466                 if (ib_cq->uobject) {
467                         ib_umem_release(hr_cq->umem);
468
469                         if (hr_cq->db_en == 1)
470                                 hns_roce_db_unmap_user(
471                                         to_hr_ucontext(ib_cq->uobject->context),
472                                         &hr_cq->db);
473                 } else {
474                         /* Free the buff of stored cq */
475                         hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
476                                                 ib_cq->cqe);
477                         if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
478                                 hns_roce_free_db(hr_dev, &hr_cq->db);
479                 }
480
481                 kfree(hr_cq);
482         }
483
484         return ret;
485 }
486 EXPORT_SYMBOL_GPL(hns_roce_ib_destroy_cq);
487
488 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
489 {
490         struct device *dev = hr_dev->dev;
491         struct hns_roce_cq *cq;
492
493         cq = radix_tree_lookup(&hr_dev->cq_table.tree,
494                                cqn & (hr_dev->caps.num_cqs - 1));
495         if (!cq) {
496                 dev_warn(dev, "Completion event for bogus CQ 0x%08x\n", cqn);
497                 return;
498         }
499
500         ++cq->arm_sn;
501         cq->comp(cq);
502 }
503 EXPORT_SYMBOL_GPL(hns_roce_cq_completion);
504
505 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
506 {
507         struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
508         struct device *dev = hr_dev->dev;
509         struct hns_roce_cq *cq;
510
511         cq = radix_tree_lookup(&cq_table->tree,
512                                cqn & (hr_dev->caps.num_cqs - 1));
513         if (cq)
514                 atomic_inc(&cq->refcount);
515
516         if (!cq) {
517                 dev_warn(dev, "Async event for bogus CQ %08x\n", cqn);
518                 return;
519         }
520
521         cq->event(cq, (enum hns_roce_event)event_type);
522
523         if (atomic_dec_and_test(&cq->refcount))
524                 complete(&cq->free);
525 }
526 EXPORT_SYMBOL_GPL(hns_roce_cq_event);
527
528 int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
529 {
530         struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
531
532         spin_lock_init(&cq_table->lock);
533         INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
534
535         return hns_roce_bitmap_init(&cq_table->bitmap, hr_dev->caps.num_cqs,
536                                     hr_dev->caps.num_cqs - 1,
537                                     hr_dev->caps.reserved_cqs, 0);
538 }
539
540 void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
541 {
542         hns_roce_bitmap_cleanup(&hr_dev->cq_table.bitmap);
543 }