Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / net / ethernet / netronome / nfp / bpf / offload.c
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2016-2018 Netronome Systems, Inc. */
3
4 /*
5  * nfp_net_offload.c
6  * Netronome network device driver: TC offload functions for PF and VF
7  */
8
9 #define pr_fmt(fmt)     "NFP net bpf: " fmt
10
11 #include <linux/bpf.h>
12 #include <linux/kernel.h>
13 #include <linux/netdevice.h>
14 #include <linux/pci.h>
15 #include <linux/jiffies.h>
16 #include <linux/timer.h>
17 #include <linux/list.h>
18 #include <linux/mm.h>
19
20 #include <net/pkt_cls.h>
21 #include <net/tc_act/tc_gact.h>
22 #include <net/tc_act/tc_mirred.h>
23
24 #include "main.h"
25 #include "../ccm.h"
26 #include "../nfp_app.h"
27 #include "../nfp_net_ctrl.h"
28 #include "../nfp_net.h"
29
30 static int
31 nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
32                    struct bpf_map *map)
33 {
34         struct nfp_bpf_neutral_map *record;
35         int err;
36
37         /* Reuse path - other offloaded program is already tracking this map. */
38         record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id,
39                                         nfp_bpf_maps_neutral_params);
40         if (record) {
41                 nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
42                 record->count++;
43                 return 0;
44         }
45
46         /* Grab a single ref to the map for our record.  The prog destroy ndo
47          * happens after free_used_maps().
48          */
49         map = bpf_map_inc(map, false);
50         if (IS_ERR(map))
51                 return PTR_ERR(map);
52
53         record = kmalloc(sizeof(*record), GFP_KERNEL);
54         if (!record) {
55                 err = -ENOMEM;
56                 goto err_map_put;
57         }
58
59         record->ptr = map;
60         record->map_id = map->id;
61         record->count = 1;
62
63         err = rhashtable_insert_fast(&bpf->maps_neutral, &record->l,
64                                      nfp_bpf_maps_neutral_params);
65         if (err)
66                 goto err_free_rec;
67
68         nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
69
70         return 0;
71
72 err_free_rec:
73         kfree(record);
74 err_map_put:
75         bpf_map_put(map);
76         return err;
77 }
78
79 static void
80 nfp_map_ptrs_forget(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog)
81 {
82         bool freed = false;
83         int i;
84
85         for (i = 0; i < nfp_prog->map_records_cnt; i++) {
86                 if (--nfp_prog->map_records[i]->count) {
87                         nfp_prog->map_records[i] = NULL;
88                         continue;
89                 }
90
91                 WARN_ON(rhashtable_remove_fast(&bpf->maps_neutral,
92                                                &nfp_prog->map_records[i]->l,
93                                                nfp_bpf_maps_neutral_params));
94                 freed = true;
95         }
96
97         if (freed) {
98                 synchronize_rcu();
99
100                 for (i = 0; i < nfp_prog->map_records_cnt; i++)
101                         if (nfp_prog->map_records[i]) {
102                                 bpf_map_put(nfp_prog->map_records[i]->ptr);
103                                 kfree(nfp_prog->map_records[i]);
104                         }
105         }
106
107         kfree(nfp_prog->map_records);
108         nfp_prog->map_records = NULL;
109         nfp_prog->map_records_cnt = 0;
110 }
111
112 static int
113 nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
114                     struct bpf_prog *prog)
115 {
116         int i, cnt, err;
117
118         /* Quickly count the maps we will have to remember */
119         cnt = 0;
120         for (i = 0; i < prog->aux->used_map_cnt; i++)
121                 if (bpf_map_offload_neutral(prog->aux->used_maps[i]))
122                         cnt++;
123         if (!cnt)
124                 return 0;
125
126         nfp_prog->map_records = kmalloc_array(cnt,
127                                               sizeof(nfp_prog->map_records[0]),
128                                               GFP_KERNEL);
129         if (!nfp_prog->map_records)
130                 return -ENOMEM;
131
132         for (i = 0; i < prog->aux->used_map_cnt; i++)
133                 if (bpf_map_offload_neutral(prog->aux->used_maps[i])) {
134                         err = nfp_map_ptr_record(bpf, nfp_prog,
135                                                  prog->aux->used_maps[i]);
136                         if (err) {
137                                 nfp_map_ptrs_forget(bpf, nfp_prog);
138                                 return err;
139                         }
140                 }
141         WARN_ON(cnt != nfp_prog->map_records_cnt);
142
143         return 0;
144 }
145
146 static int
147 nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
148                  unsigned int cnt)
149 {
150         struct nfp_insn_meta *meta;
151         unsigned int i;
152
153         for (i = 0; i < cnt; i++) {
154                 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
155                 if (!meta)
156                         return -ENOMEM;
157
158                 meta->insn = prog[i];
159                 meta->n = i;
160                 if (is_mbpf_alu(meta)) {
161                         meta->umin_src = U64_MAX;
162                         meta->umin_dst = U64_MAX;
163                 }
164
165                 list_add_tail(&meta->l, &nfp_prog->insns);
166         }
167         nfp_prog->n_insns = cnt;
168
169         nfp_bpf_jit_prepare(nfp_prog);
170
171         return 0;
172 }
173
174 static void nfp_prog_free(struct nfp_prog *nfp_prog)
175 {
176         struct nfp_insn_meta *meta, *tmp;
177
178         kfree(nfp_prog->subprog);
179
180         list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
181                 list_del(&meta->l);
182                 kfree(meta);
183         }
184         kfree(nfp_prog);
185 }
186
187 static int nfp_bpf_verifier_prep(struct bpf_prog *prog)
188 {
189         struct nfp_prog *nfp_prog;
190         int ret;
191
192         nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
193         if (!nfp_prog)
194                 return -ENOMEM;
195         prog->aux->offload->dev_priv = nfp_prog;
196
197         INIT_LIST_HEAD(&nfp_prog->insns);
198         nfp_prog->type = prog->type;
199         nfp_prog->bpf = bpf_offload_dev_priv(prog->aux->offload->offdev);
200
201         ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
202         if (ret)
203                 goto err_free;
204
205         nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
206
207         return 0;
208
209 err_free:
210         nfp_prog_free(nfp_prog);
211
212         return ret;
213 }
214
215 static int nfp_bpf_translate(struct bpf_prog *prog)
216 {
217         struct nfp_net *nn = netdev_priv(prog->aux->offload->netdev);
218         struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
219         unsigned int max_instr;
220         int err;
221
222         /* We depend on dead code elimination succeeding */
223         if (prog->aux->offload->opt_failed)
224                 return -EINVAL;
225
226         max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
227         nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
228
229         nfp_prog->prog = kvmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
230         if (!nfp_prog->prog)
231                 return -ENOMEM;
232
233         err = nfp_bpf_jit(nfp_prog);
234         if (err)
235                 return err;
236
237         prog->aux->offload->jited_len = nfp_prog->prog_len * sizeof(u64);
238         prog->aux->offload->jited_image = nfp_prog->prog;
239
240         return nfp_map_ptrs_record(nfp_prog->bpf, nfp_prog, prog);
241 }
242
243 static void nfp_bpf_destroy(struct bpf_prog *prog)
244 {
245         struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
246
247         kvfree(nfp_prog->prog);
248         nfp_map_ptrs_forget(nfp_prog->bpf, nfp_prog);
249         nfp_prog_free(nfp_prog);
250 }
251
252 /* Atomic engine requires values to be in big endian, we need to byte swap
253  * the value words used with xadd.
254  */
255 static void nfp_map_bpf_byte_swap(struct nfp_bpf_map *nfp_map, void *value)
256 {
257         u32 *word = value;
258         unsigned int i;
259
260         for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
261                 if (nfp_map->use_map[i].type == NFP_MAP_USE_ATOMIC_CNT)
262                         word[i] = (__force u32)cpu_to_be32(word[i]);
263 }
264
265 /* Mark value as unsafely initialized in case it becomes atomic later
266  * and we didn't byte swap something non-byte swap neutral.
267  */
268 static void
269 nfp_map_bpf_byte_swap_record(struct nfp_bpf_map *nfp_map, void *value)
270 {
271         u32 *word = value;
272         unsigned int i;
273
274         for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
275                 if (nfp_map->use_map[i].type == NFP_MAP_UNUSED &&
276                     word[i] != (__force u32)cpu_to_be32(word[i]))
277                         nfp_map->use_map[i].non_zero_update = 1;
278 }
279
280 static int
281 nfp_bpf_map_lookup_entry(struct bpf_offloaded_map *offmap,
282                          void *key, void *value)
283 {
284         int err;
285
286         err = nfp_bpf_ctrl_lookup_entry(offmap, key, value);
287         if (err)
288                 return err;
289
290         nfp_map_bpf_byte_swap(offmap->dev_priv, value);
291         return 0;
292 }
293
294 static int
295 nfp_bpf_map_update_entry(struct bpf_offloaded_map *offmap,
296                          void *key, void *value, u64 flags)
297 {
298         nfp_map_bpf_byte_swap(offmap->dev_priv, value);
299         nfp_map_bpf_byte_swap_record(offmap->dev_priv, value);
300         return nfp_bpf_ctrl_update_entry(offmap, key, value, flags);
301 }
302
303 static int
304 nfp_bpf_map_get_next_key(struct bpf_offloaded_map *offmap,
305                          void *key, void *next_key)
306 {
307         if (!key)
308                 return nfp_bpf_ctrl_getfirst_entry(offmap, next_key);
309         return nfp_bpf_ctrl_getnext_entry(offmap, key, next_key);
310 }
311
312 static int
313 nfp_bpf_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
314 {
315         if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
316                 return -EINVAL;
317         return nfp_bpf_ctrl_del_entry(offmap, key);
318 }
319
320 static const struct bpf_map_dev_ops nfp_bpf_map_ops = {
321         .map_get_next_key       = nfp_bpf_map_get_next_key,
322         .map_lookup_elem        = nfp_bpf_map_lookup_entry,
323         .map_update_elem        = nfp_bpf_map_update_entry,
324         .map_delete_elem        = nfp_bpf_map_delete_elem,
325 };
326
327 static int
328 nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
329 {
330         struct nfp_bpf_map *nfp_map;
331         unsigned int use_map_size;
332         long long int res;
333
334         if (!bpf->maps.types)
335                 return -EOPNOTSUPP;
336
337         if (offmap->map.map_flags ||
338             offmap->map.numa_node != NUMA_NO_NODE) {
339                 pr_info("map flags are not supported\n");
340                 return -EINVAL;
341         }
342
343         if (!(bpf->maps.types & 1 << offmap->map.map_type)) {
344                 pr_info("map type not supported\n");
345                 return -EOPNOTSUPP;
346         }
347         if (bpf->maps.max_maps == bpf->maps_in_use) {
348                 pr_info("too many maps for a device\n");
349                 return -ENOMEM;
350         }
351         if (bpf->maps.max_elems - bpf->map_elems_in_use <
352             offmap->map.max_entries) {
353                 pr_info("map with too many elements: %u, left: %u\n",
354                         offmap->map.max_entries,
355                         bpf->maps.max_elems - bpf->map_elems_in_use);
356                 return -ENOMEM;
357         }
358
359         if (round_up(offmap->map.key_size, 8) +
360             round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) {
361                 pr_info("map elements too large: %u, FW max element size (key+value): %u\n",
362                         round_up(offmap->map.key_size, 8) +
363                         round_up(offmap->map.value_size, 8),
364                         bpf->maps.max_elem_sz);
365                 return -ENOMEM;
366         }
367         if (offmap->map.key_size > bpf->maps.max_key_sz) {
368                 pr_info("map key size %u, FW max is %u\n",
369                         offmap->map.key_size, bpf->maps.max_key_sz);
370                 return -ENOMEM;
371         }
372         if (offmap->map.value_size > bpf->maps.max_val_sz) {
373                 pr_info("map value size %u, FW max is %u\n",
374                         offmap->map.value_size, bpf->maps.max_val_sz);
375                 return -ENOMEM;
376         }
377
378         use_map_size = DIV_ROUND_UP(offmap->map.value_size, 4) *
379                        FIELD_SIZEOF(struct nfp_bpf_map, use_map[0]);
380
381         nfp_map = kzalloc(sizeof(*nfp_map) + use_map_size, GFP_USER);
382         if (!nfp_map)
383                 return -ENOMEM;
384
385         offmap->dev_priv = nfp_map;
386         nfp_map->offmap = offmap;
387         nfp_map->bpf = bpf;
388
389         res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map);
390         if (res < 0) {
391                 kfree(nfp_map);
392                 return res;
393         }
394
395         nfp_map->tid = res;
396         offmap->dev_ops = &nfp_bpf_map_ops;
397         bpf->maps_in_use++;
398         bpf->map_elems_in_use += offmap->map.max_entries;
399         list_add_tail(&nfp_map->l, &bpf->map_list);
400
401         return 0;
402 }
403
404 static int
405 nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
406 {
407         struct nfp_bpf_map *nfp_map = offmap->dev_priv;
408
409         nfp_bpf_ctrl_free_map(bpf, nfp_map);
410         list_del_init(&nfp_map->l);
411         bpf->map_elems_in_use -= offmap->map.max_entries;
412         bpf->maps_in_use--;
413         kfree(nfp_map);
414
415         return 0;
416 }
417
418 int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
419 {
420         switch (bpf->command) {
421         case BPF_OFFLOAD_MAP_ALLOC:
422                 return nfp_bpf_map_alloc(app->priv, bpf->offmap);
423         case BPF_OFFLOAD_MAP_FREE:
424                 return nfp_bpf_map_free(app->priv, bpf->offmap);
425         default:
426                 return -EINVAL;
427         }
428 }
429
430 static unsigned long
431 nfp_bpf_perf_event_copy(void *dst, const void *src,
432                         unsigned long off, unsigned long len)
433 {
434         memcpy(dst, src + off, len);
435         return 0;
436 }
437
438 int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
439                          unsigned int len)
440 {
441         struct cmsg_bpf_event *cbe = (void *)data;
442         struct nfp_bpf_neutral_map *record;
443         u32 pkt_size, data_size, map_id;
444         u64 map_id_full;
445
446         if (len < sizeof(struct cmsg_bpf_event))
447                 return -EINVAL;
448
449         pkt_size = be32_to_cpu(cbe->pkt_size);
450         data_size = be32_to_cpu(cbe->data_size);
451         map_id_full = be64_to_cpu(cbe->map_ptr);
452         map_id = map_id_full;
453
454         if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
455                 return -EINVAL;
456         if (cbe->hdr.ver != NFP_CCM_ABI_VERSION)
457                 return -EINVAL;
458
459         rcu_read_lock();
460         record = rhashtable_lookup_fast(&bpf->maps_neutral, &map_id,
461                                         nfp_bpf_maps_neutral_params);
462         if (!record || map_id_full > U32_MAX) {
463                 rcu_read_unlock();
464                 cmsg_warn(bpf, "perf event: map id %lld (0x%llx) not recognized, dropping event\n",
465                           map_id_full, map_id_full);
466                 return -EINVAL;
467         }
468
469         bpf_event_output(record->ptr, be32_to_cpu(cbe->cpu_id),
470                          &cbe->data[round_up(pkt_size, 4)], data_size,
471                          cbe->data, pkt_size, nfp_bpf_perf_event_copy);
472         rcu_read_unlock();
473
474         return 0;
475 }
476
477 static int
478 nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
479                  struct netlink_ext_ack *extack)
480 {
481         struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
482         unsigned int fw_mtu, pkt_off, max_stack, max_prog_len;
483         dma_addr_t dma_addr;
484         void *img;
485         int err;
486
487         fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
488         pkt_off = min(prog->aux->max_pkt_offset, nn->dp.netdev->mtu);
489         if (fw_mtu < pkt_off) {
490                 NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with potential packet access beyond HW packet split boundary");
491                 return -EOPNOTSUPP;
492         }
493
494         max_stack = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
495         if (nfp_prog->stack_size > max_stack) {
496                 NL_SET_ERR_MSG_MOD(extack, "stack too large");
497                 return -EOPNOTSUPP;
498         }
499
500         max_prog_len = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
501         if (nfp_prog->prog_len > max_prog_len) {
502                 NL_SET_ERR_MSG_MOD(extack, "program too long");
503                 return -EOPNOTSUPP;
504         }
505
506         img = nfp_bpf_relo_for_vnic(nfp_prog, nn->app_priv);
507         if (IS_ERR(img))
508                 return PTR_ERR(img);
509
510         dma_addr = dma_map_single(nn->dp.dev, img,
511                                   nfp_prog->prog_len * sizeof(u64),
512                                   DMA_TO_DEVICE);
513         if (dma_mapping_error(nn->dp.dev, dma_addr)) {
514                 kfree(img);
515                 return -ENOMEM;
516         }
517
518         nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
519         nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
520
521         /* Load up the JITed code */
522         err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
523         if (err)
524                 NL_SET_ERR_MSG_MOD(extack,
525                                    "FW command error while loading BPF");
526
527         dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
528                          DMA_TO_DEVICE);
529         kfree(img);
530
531         return err;
532 }
533
534 static void
535 nfp_net_bpf_start(struct nfp_net *nn, struct netlink_ext_ack *extack)
536 {
537         int err;
538
539         /* Enable passing packets through BPF function */
540         nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
541         nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
542         err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
543         if (err)
544                 NL_SET_ERR_MSG_MOD(extack,
545                                    "FW command error while enabling BPF");
546 }
547
548 static int nfp_net_bpf_stop(struct nfp_net *nn)
549 {
550         if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
551                 return 0;
552
553         nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
554         nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
555
556         return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
557 }
558
559 int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
560                         bool old_prog, struct netlink_ext_ack *extack)
561 {
562         int err;
563
564         if (prog && !bpf_offload_dev_match(prog, nn->dp.netdev))
565                 return -EINVAL;
566
567         if (prog && old_prog) {
568                 u8 cap;
569
570                 cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP);
571                 if (!(cap & NFP_NET_BPF_CAP_RELO)) {
572                         NL_SET_ERR_MSG_MOD(extack,
573                                            "FW does not support live reload");
574                         return -EBUSY;
575                 }
576         }
577
578         /* Something else is loaded, different program type? */
579         if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
580                 return -EBUSY;
581
582         if (old_prog && !prog)
583                 return nfp_net_bpf_stop(nn);
584
585         err = nfp_net_bpf_load(nn, prog, extack);
586         if (err)
587                 return err;
588
589         if (!old_prog)
590                 nfp_net_bpf_start(nn, extack);
591
592         return 0;
593 }
594
595 const struct bpf_prog_offload_ops nfp_bpf_dev_ops = {
596         .insn_hook      = nfp_verify_insn,
597         .finalize       = nfp_bpf_finalize,
598         .replace_insn   = nfp_bpf_opt_replace_insn,
599         .remove_insns   = nfp_bpf_opt_remove_insns,
600         .prepare        = nfp_bpf_verifier_prep,
601         .translate      = nfp_bpf_translate,
602         .destroy        = nfp_bpf_destroy,
603 };