Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / net / ethernet / hisilicon / hns / hnae.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (c) 2014-2015 Hisilicon Limited.
4  */
5
6 #include <linux/dma-mapping.h>
7 #include <linux/interrupt.h>
8 #include <linux/of.h>
9 #include <linux/skbuff.h>
10 #include <linux/slab.h>
11 #include "hnae.h"
12
13 #define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev)
14
15 static struct class *hnae_class;
16
17 static void
18 hnae_list_add(spinlock_t *lock, struct list_head *node, struct list_head *head)
19 {
20         unsigned long flags;
21
22         spin_lock_irqsave(lock, flags);
23         list_add_tail_rcu(node, head);
24         spin_unlock_irqrestore(lock, flags);
25 }
26
27 static void hnae_list_del(spinlock_t *lock, struct list_head *node)
28 {
29         unsigned long flags;
30
31         spin_lock_irqsave(lock, flags);
32         list_del_rcu(node);
33         spin_unlock_irqrestore(lock, flags);
34 }
35
36 static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
37 {
38         unsigned int order = hnae_page_order(ring);
39         struct page *p = dev_alloc_pages(order);
40
41         if (!p)
42                 return -ENOMEM;
43
44         cb->priv = p;
45         cb->page_offset = 0;
46         cb->reuse_flag = 0;
47         cb->buf  = page_address(p);
48         cb->length = hnae_page_size(ring);
49         cb->type = DESC_TYPE_PAGE;
50
51         return 0;
52 }
53
54 static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
55 {
56         if (unlikely(!cb->priv))
57                 return;
58
59         if (cb->type == DESC_TYPE_SKB)
60                 dev_kfree_skb_any((struct sk_buff *)cb->priv);
61         else if (unlikely(is_rx_ring(ring)))
62                 put_page((struct page *)cb->priv);
63
64         cb->priv = NULL;
65 }
66
67 static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
68 {
69         cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
70                                cb->length, ring_to_dma_dir(ring));
71
72         if (dma_mapping_error(ring_to_dev(ring), cb->dma))
73                 return -EIO;
74
75         return 0;
76 }
77
78 static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
79 {
80         if (cb->type == DESC_TYPE_SKB)
81                 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
82                                  ring_to_dma_dir(ring));
83         else if (cb->length)
84                 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
85                                ring_to_dma_dir(ring));
86 }
87
88 static struct hnae_buf_ops hnae_bops = {
89         .alloc_buffer = hnae_alloc_buffer,
90         .free_buffer = hnae_free_buffer,
91         .map_buffer = hnae_map_buffer,
92         .unmap_buffer = hnae_unmap_buffer,
93 };
94
95 static int __ae_match(struct device *dev, const void *data)
96 {
97         struct hnae_ae_dev *hdev = cls_to_ae_dev(dev);
98
99         if (dev_of_node(hdev->dev))
100                 return (data == &hdev->dev->of_node->fwnode);
101         else if (is_acpi_node(hdev->dev->fwnode))
102                 return (data == hdev->dev->fwnode);
103
104         dev_err(dev, "__ae_match cannot read cfg data from OF or acpi\n");
105         return 0;
106 }
107
108 static struct hnae_ae_dev *find_ae(const struct fwnode_handle *fwnode)
109 {
110         struct device *dev;
111
112         WARN_ON(!fwnode);
113
114         dev = class_find_device(hnae_class, NULL, fwnode, __ae_match);
115
116         return dev ? cls_to_ae_dev(dev) : NULL;
117 }
118
119 static void hnae_free_buffers(struct hnae_ring *ring)
120 {
121         int i;
122
123         for (i = 0; i < ring->desc_num; i++)
124                 hnae_free_buffer_detach(ring, i);
125 }
126
127 /* Allocate memory for raw pkg, and map with dma */
128 static int hnae_alloc_buffers(struct hnae_ring *ring)
129 {
130         int i, j, ret;
131
132         for (i = 0; i < ring->desc_num; i++) {
133                 ret = hnae_alloc_buffer_attach(ring, i);
134                 if (ret)
135                         goto out_buffer_fail;
136         }
137
138         return 0;
139
140 out_buffer_fail:
141         for (j = i - 1; j >= 0; j--)
142                 hnae_free_buffer_detach(ring, j);
143         return ret;
144 }
145
146 /* free desc along with its attached buffer */
147 static void hnae_free_desc(struct hnae_ring *ring)
148 {
149         dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
150                          ring->desc_num * sizeof(ring->desc[0]),
151                          ring_to_dma_dir(ring));
152         ring->desc_dma_addr = 0;
153         kfree(ring->desc);
154         ring->desc = NULL;
155 }
156
157 /* alloc desc, without buffer attached */
158 static int hnae_alloc_desc(struct hnae_ring *ring)
159 {
160         int size = ring->desc_num * sizeof(ring->desc[0]);
161
162         ring->desc = kzalloc(size, GFP_KERNEL);
163         if (!ring->desc)
164                 return -ENOMEM;
165
166         ring->desc_dma_addr = dma_map_single(ring_to_dev(ring),
167                 ring->desc, size, ring_to_dma_dir(ring));
168         if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
169                 ring->desc_dma_addr = 0;
170                 kfree(ring->desc);
171                 ring->desc = NULL;
172                 return -ENOMEM;
173         }
174
175         return 0;
176 }
177
178 /* fini ring, also free the buffer for the ring */
179 static void hnae_fini_ring(struct hnae_ring *ring)
180 {
181         if (is_rx_ring(ring))
182                 hnae_free_buffers(ring);
183
184         hnae_free_desc(ring);
185         kfree(ring->desc_cb);
186         ring->desc_cb = NULL;
187         ring->next_to_clean = 0;
188         ring->next_to_use = 0;
189 }
190
191 /* init ring, and with buffer for rx ring */
192 static int
193 hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
194 {
195         int ret;
196
197         if (ring->desc_num <= 0 || ring->buf_size <= 0)
198                 return -EINVAL;
199
200         ring->q = q;
201         ring->flags = flags;
202         ring->coal_param = q->handle->coal_param;
203         assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr);
204
205         /* not matter for tx or rx ring, the ntc and ntc start from 0 */
206         assert(ring->next_to_use == 0);
207         assert(ring->next_to_clean == 0);
208
209         ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
210                         GFP_KERNEL);
211         if (!ring->desc_cb) {
212                 ret = -ENOMEM;
213                 goto out;
214         }
215
216         ret = hnae_alloc_desc(ring);
217         if (ret)
218                 goto out_with_desc_cb;
219
220         if (is_rx_ring(ring)) {
221                 ret = hnae_alloc_buffers(ring);
222                 if (ret)
223                         goto out_with_desc;
224         }
225
226         return 0;
227
228 out_with_desc:
229         hnae_free_desc(ring);
230 out_with_desc_cb:
231         kfree(ring->desc_cb);
232         ring->desc_cb = NULL;
233 out:
234         return ret;
235 }
236
237 static int hnae_init_queue(struct hnae_handle *h, struct hnae_queue *q,
238                            struct hnae_ae_dev *dev)
239 {
240         int ret;
241
242         q->dev = dev;
243         q->handle = h;
244
245         ret = hnae_init_ring(q, &q->tx_ring, q->tx_ring.flags | RINGF_DIR);
246         if (ret)
247                 goto out;
248
249         ret = hnae_init_ring(q, &q->rx_ring, q->rx_ring.flags & ~RINGF_DIR);
250         if (ret)
251                 goto out_with_tx_ring;
252
253         if (dev->ops->init_queue)
254                 dev->ops->init_queue(q);
255
256         return 0;
257
258 out_with_tx_ring:
259         hnae_fini_ring(&q->tx_ring);
260 out:
261         return ret;
262 }
263
264 static void hnae_fini_queue(struct hnae_queue *q)
265 {
266         if (q->dev->ops->fini_queue)
267                 q->dev->ops->fini_queue(q);
268
269         hnae_fini_ring(&q->tx_ring);
270         hnae_fini_ring(&q->rx_ring);
271 }
272
273 /**
274  * ae_chain - define ae chain head
275  */
276 static RAW_NOTIFIER_HEAD(ae_chain);
277
278 int hnae_register_notifier(struct notifier_block *nb)
279 {
280         return raw_notifier_chain_register(&ae_chain, nb);
281 }
282 EXPORT_SYMBOL(hnae_register_notifier);
283
284 void hnae_unregister_notifier(struct notifier_block *nb)
285 {
286         if (raw_notifier_chain_unregister(&ae_chain, nb))
287                 dev_err(NULL, "notifier chain unregister fail\n");
288 }
289 EXPORT_SYMBOL(hnae_unregister_notifier);
290
291 int hnae_reinit_handle(struct hnae_handle *handle)
292 {
293         int i, j;
294         int ret;
295
296         for (i = 0; i < handle->q_num; i++) /* free ring*/
297                 hnae_fini_queue(handle->qs[i]);
298
299         if (handle->dev->ops->reset)
300                 handle->dev->ops->reset(handle);
301
302         for (i = 0; i < handle->q_num; i++) {/* reinit ring*/
303                 ret = hnae_init_queue(handle, handle->qs[i], handle->dev);
304                 if (ret)
305                         goto out_when_init_queue;
306         }
307         return 0;
308 out_when_init_queue:
309         for (j = i - 1; j >= 0; j--)
310                 hnae_fini_queue(handle->qs[j]);
311         return ret;
312 }
313 EXPORT_SYMBOL(hnae_reinit_handle);
314
315 /* hnae_get_handle - get a handle from the AE
316  * @owner_dev: the dev use this handle
317  * @ae_id: the id of the ae to be used
318  * @ae_opts: the options set for the handle
319  * @bops: the callbacks for buffer management
320  *
321  * return handle ptr or ERR_PTR
322  */
323 struct hnae_handle *hnae_get_handle(struct device *owner_dev,
324                                     const struct fwnode_handle  *fwnode,
325                                     u32 port_id,
326                                     struct hnae_buf_ops *bops)
327 {
328         struct hnae_ae_dev *dev;
329         struct hnae_handle *handle;
330         int i, j;
331         int ret;
332
333         dev = find_ae(fwnode);
334         if (!dev)
335                 return ERR_PTR(-ENODEV);
336
337         handle = dev->ops->get_handle(dev, port_id);
338         if (IS_ERR(handle)) {
339                 put_device(&dev->cls_dev);
340                 return handle;
341         }
342
343         handle->dev = dev;
344         handle->owner_dev = owner_dev;
345         handle->bops = bops ? bops : &hnae_bops;
346         handle->eport_id = port_id;
347
348         for (i = 0; i < handle->q_num; i++) {
349                 ret = hnae_init_queue(handle, handle->qs[i], dev);
350                 if (ret)
351                         goto out_when_init_queue;
352         }
353
354         __module_get(dev->owner);
355
356         hnae_list_add(&dev->lock, &handle->node, &dev->handle_list);
357
358         return handle;
359
360 out_when_init_queue:
361         for (j = i - 1; j >= 0; j--)
362                 hnae_fini_queue(handle->qs[j]);
363
364         put_device(&dev->cls_dev);
365
366         return ERR_PTR(-ENOMEM);
367 }
368 EXPORT_SYMBOL(hnae_get_handle);
369
370 void hnae_put_handle(struct hnae_handle *h)
371 {
372         struct hnae_ae_dev *dev = h->dev;
373         int i;
374
375         for (i = 0; i < h->q_num; i++)
376                 hnae_fini_queue(h->qs[i]);
377
378         if (h->dev->ops->reset)
379                 h->dev->ops->reset(h);
380
381         hnae_list_del(&dev->lock, &h->node);
382
383         if (dev->ops->put_handle)
384                 dev->ops->put_handle(h);
385
386         module_put(dev->owner);
387
388         put_device(&dev->cls_dev);
389 }
390 EXPORT_SYMBOL(hnae_put_handle);
391
392 static void hnae_release(struct device *dev)
393 {
394 }
395
396 /**
397  * hnae_ae_register - register a AE engine to hnae framework
398  * @hdev: the hnae ae engine device
399  * @owner:  the module who provides this dev
400  * NOTE: the duplicated name will not be checked
401  */
402 int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
403 {
404         static atomic_t id = ATOMIC_INIT(-1);
405         int ret;
406
407         if (!hdev->dev)
408                 return -ENODEV;
409
410         if (!hdev->ops || !hdev->ops->get_handle ||
411             !hdev->ops->toggle_ring_irq ||
412             !hdev->ops->get_status || !hdev->ops->adjust_link)
413                 return -EINVAL;
414
415         hdev->owner = owner;
416         hdev->id = (int)atomic_inc_return(&id);
417         hdev->cls_dev.parent = hdev->dev;
418         hdev->cls_dev.class = hnae_class;
419         hdev->cls_dev.release = hnae_release;
420         (void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
421         ret = device_register(&hdev->cls_dev);
422         if (ret)
423                 return ret;
424
425         __module_get(THIS_MODULE);
426
427         INIT_LIST_HEAD(&hdev->handle_list);
428         spin_lock_init(&hdev->lock);
429
430         ret = raw_notifier_call_chain(&ae_chain, HNAE_AE_REGISTER, NULL);
431         if (ret)
432                 dev_dbg(hdev->dev,
433                         "has not notifier for AE: %s\n", hdev->name);
434
435         return 0;
436 }
437 EXPORT_SYMBOL(hnae_ae_register);
438
439 /**
440  * hnae_ae_unregister - unregisters a HNAE AE engine
441  * @cdev: the device to unregister
442  */
443 void hnae_ae_unregister(struct hnae_ae_dev *hdev)
444 {
445         device_unregister(&hdev->cls_dev);
446         module_put(THIS_MODULE);
447 }
448 EXPORT_SYMBOL(hnae_ae_unregister);
449
450 static int __init hnae_init(void)
451 {
452         hnae_class = class_create(THIS_MODULE, "hnae");
453         return PTR_ERR_OR_ZERO(hnae_class);
454 }
455
456 static void __exit hnae_exit(void)
457 {
458         class_destroy(hnae_class);
459 }
460
461 subsys_initcall(hnae_init);
462 module_exit(hnae_exit);
463
464 MODULE_AUTHOR("Hisilicon, Inc.");
465 MODULE_LICENSE("GPL");
466 MODULE_DESCRIPTION("Hisilicon Network Acceleration Engine Framework");
467
468 /* vi: set tw=78 noet: */