Linux-libre 4.14.82-gnu
[librecmc/linux-libre.git] / net / sched / cls_bpf.c
1 /*
2  * Berkeley Packet Filter based traffic classifier
3  *
4  * Might be used to classify traffic through flexible, user-defined and
5  * possibly JIT-ed BPF filters for traffic control as an alternative to
6  * ematches.
7  *
8  * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/skbuff.h>
18 #include <linux/filter.h>
19 #include <linux/bpf.h>
20
21 #include <net/rtnetlink.h>
22 #include <net/pkt_cls.h>
23 #include <net/sock.h>
24
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
27 MODULE_DESCRIPTION("TC BPF based classifier");
28
29 #define CLS_BPF_NAME_LEN        256
30 #define CLS_BPF_SUPPORTED_GEN_FLAGS             \
31         (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
32
33 struct cls_bpf_head {
34         struct list_head plist;
35         u32 hgen;
36         struct rcu_head rcu;
37 };
38
39 struct cls_bpf_prog {
40         struct bpf_prog *filter;
41         struct list_head link;
42         struct tcf_result res;
43         bool exts_integrated;
44         bool offloaded;
45         u32 gen_flags;
46         struct tcf_exts exts;
47         u32 handle;
48         u16 bpf_num_ops;
49         struct sock_filter *bpf_ops;
50         const char *bpf_name;
51         struct tcf_proto *tp;
52         union {
53                 struct work_struct work;
54                 struct rcu_head rcu;
55         };
56 };
57
58 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
59         [TCA_BPF_CLASSID]       = { .type = NLA_U32 },
60         [TCA_BPF_FLAGS]         = { .type = NLA_U32 },
61         [TCA_BPF_FLAGS_GEN]     = { .type = NLA_U32 },
62         [TCA_BPF_FD]            = { .type = NLA_U32 },
63         [TCA_BPF_NAME]          = { .type = NLA_NUL_STRING,
64                                     .len = CLS_BPF_NAME_LEN },
65         [TCA_BPF_OPS_LEN]       = { .type = NLA_U16 },
66         [TCA_BPF_OPS]           = { .type = NLA_BINARY,
67                                     .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
68 };
69
70 static int cls_bpf_exec_opcode(int code)
71 {
72         switch (code) {
73         case TC_ACT_OK:
74         case TC_ACT_SHOT:
75         case TC_ACT_STOLEN:
76         case TC_ACT_TRAP:
77         case TC_ACT_REDIRECT:
78         case TC_ACT_UNSPEC:
79                 return code;
80         default:
81                 return TC_ACT_UNSPEC;
82         }
83 }
84
85 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
86                             struct tcf_result *res)
87 {
88         struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
89         bool at_ingress = skb_at_tc_ingress(skb);
90         struct cls_bpf_prog *prog;
91         int ret = -1;
92
93         /* Needed here for accessing maps. */
94         rcu_read_lock();
95         list_for_each_entry_rcu(prog, &head->plist, link) {
96                 int filter_res;
97
98                 qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
99
100                 if (tc_skip_sw(prog->gen_flags)) {
101                         filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
102                 } else if (at_ingress) {
103                         /* It is safe to push/pull even if skb_shared() */
104                         __skb_push(skb, skb->mac_len);
105                         bpf_compute_data_end(skb);
106                         filter_res = BPF_PROG_RUN(prog->filter, skb);
107                         __skb_pull(skb, skb->mac_len);
108                 } else {
109                         bpf_compute_data_end(skb);
110                         filter_res = BPF_PROG_RUN(prog->filter, skb);
111                 }
112
113                 if (prog->exts_integrated) {
114                         res->class   = 0;
115                         res->classid = TC_H_MAJ(prog->res.classid) |
116                                        qdisc_skb_cb(skb)->tc_classid;
117
118                         ret = cls_bpf_exec_opcode(filter_res);
119                         if (ret == TC_ACT_UNSPEC)
120                                 continue;
121                         break;
122                 }
123
124                 if (filter_res == 0)
125                         continue;
126                 if (filter_res != -1) {
127                         res->class   = 0;
128                         res->classid = filter_res;
129                 } else {
130                         *res = prog->res;
131                 }
132
133                 ret = tcf_exts_exec(skb, &prog->exts, res);
134                 if (ret < 0)
135                         continue;
136
137                 break;
138         }
139         rcu_read_unlock();
140
141         return ret;
142 }
143
144 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
145 {
146         return !prog->bpf_ops;
147 }
148
149 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
150                                enum tc_clsbpf_command cmd)
151 {
152         struct net_device *dev = tp->q->dev_queue->dev;
153         struct tc_cls_bpf_offload cls_bpf = {};
154         int err;
155
156         tc_cls_common_offload_init(&cls_bpf.common, tp);
157         cls_bpf.command = cmd;
158         cls_bpf.exts = &prog->exts;
159         cls_bpf.prog = prog->filter;
160         cls_bpf.name = prog->bpf_name;
161         cls_bpf.exts_integrated = prog->exts_integrated;
162         cls_bpf.gen_flags = prog->gen_flags;
163
164         err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSBPF, &cls_bpf);
165         if (!err && (cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE))
166                 prog->gen_flags |= TCA_CLS_FLAGS_IN_HW;
167
168         return err;
169 }
170
171 static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
172                            struct cls_bpf_prog *oldprog)
173 {
174         struct net_device *dev = tp->q->dev_queue->dev;
175         struct cls_bpf_prog *obj = prog;
176         enum tc_clsbpf_command cmd;
177         bool skip_sw;
178         int ret;
179
180         skip_sw = tc_skip_sw(prog->gen_flags) ||
181                 (oldprog && tc_skip_sw(oldprog->gen_flags));
182
183         if (oldprog && oldprog->offloaded) {
184                 if (tc_should_offload(dev, prog->gen_flags)) {
185                         cmd = TC_CLSBPF_REPLACE;
186                 } else if (!tc_skip_sw(prog->gen_flags)) {
187                         obj = oldprog;
188                         cmd = TC_CLSBPF_DESTROY;
189                 } else {
190                         return -EINVAL;
191                 }
192         } else {
193                 if (!tc_should_offload(dev, prog->gen_flags))
194                         return skip_sw ? -EINVAL : 0;
195                 cmd = TC_CLSBPF_ADD;
196         }
197
198         ret = cls_bpf_offload_cmd(tp, obj, cmd);
199         if (ret)
200                 return skip_sw ? ret : 0;
201
202         obj->offloaded = true;
203         if (oldprog)
204                 oldprog->offloaded = false;
205
206         return 0;
207 }
208
209 static void cls_bpf_stop_offload(struct tcf_proto *tp,
210                                  struct cls_bpf_prog *prog)
211 {
212         int err;
213
214         if (!prog->offloaded)
215                 return;
216
217         err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
218         if (err) {
219                 pr_err("Stopping hardware offload failed: %d\n", err);
220                 return;
221         }
222
223         prog->offloaded = false;
224 }
225
226 static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
227                                          struct cls_bpf_prog *prog)
228 {
229         if (!prog->offloaded)
230                 return;
231
232         cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS);
233 }
234
235 static int cls_bpf_init(struct tcf_proto *tp)
236 {
237         struct cls_bpf_head *head;
238
239         head = kzalloc(sizeof(*head), GFP_KERNEL);
240         if (head == NULL)
241                 return -ENOBUFS;
242
243         INIT_LIST_HEAD_RCU(&head->plist);
244         rcu_assign_pointer(tp->root, head);
245
246         return 0;
247 }
248
249 static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
250 {
251         if (cls_bpf_is_ebpf(prog))
252                 bpf_prog_put(prog->filter);
253         else
254                 bpf_prog_destroy(prog->filter);
255
256         kfree(prog->bpf_name);
257         kfree(prog->bpf_ops);
258 }
259
260 static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
261 {
262         tcf_exts_destroy(&prog->exts);
263         tcf_exts_put_net(&prog->exts);
264
265         cls_bpf_free_parms(prog);
266         kfree(prog);
267 }
268
269 static void cls_bpf_delete_prog_work(struct work_struct *work)
270 {
271         struct cls_bpf_prog *prog = container_of(work, struct cls_bpf_prog, work);
272
273         rtnl_lock();
274         __cls_bpf_delete_prog(prog);
275         rtnl_unlock();
276 }
277
278 static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
279 {
280         struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
281
282         INIT_WORK(&prog->work, cls_bpf_delete_prog_work);
283         tcf_queue_work(&prog->work);
284 }
285
286 static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
287 {
288         cls_bpf_stop_offload(tp, prog);
289         list_del_rcu(&prog->link);
290         tcf_unbind_filter(tp, &prog->res);
291         if (tcf_exts_get_net(&prog->exts))
292                 call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu);
293         else
294                 __cls_bpf_delete_prog(prog);
295 }
296
297 static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last)
298 {
299         struct cls_bpf_head *head = rtnl_dereference(tp->root);
300
301         __cls_bpf_delete(tp, arg);
302         *last = list_empty(&head->plist);
303         return 0;
304 }
305
306 static void cls_bpf_destroy(struct tcf_proto *tp)
307 {
308         struct cls_bpf_head *head = rtnl_dereference(tp->root);
309         struct cls_bpf_prog *prog, *tmp;
310
311         list_for_each_entry_safe(prog, tmp, &head->plist, link)
312                 __cls_bpf_delete(tp, prog);
313
314         kfree_rcu(head, rcu);
315 }
316
317 static void *cls_bpf_get(struct tcf_proto *tp, u32 handle)
318 {
319         struct cls_bpf_head *head = rtnl_dereference(tp->root);
320         struct cls_bpf_prog *prog;
321
322         list_for_each_entry(prog, &head->plist, link) {
323                 if (prog->handle == handle)
324                         return prog;
325         }
326
327         return NULL;
328 }
329
330 static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
331 {
332         struct sock_filter *bpf_ops;
333         struct sock_fprog_kern fprog_tmp;
334         struct bpf_prog *fp;
335         u16 bpf_size, bpf_num_ops;
336         int ret;
337
338         bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
339         if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
340                 return -EINVAL;
341
342         bpf_size = bpf_num_ops * sizeof(*bpf_ops);
343         if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
344                 return -EINVAL;
345
346         bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
347         if (bpf_ops == NULL)
348                 return -ENOMEM;
349
350         memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
351
352         fprog_tmp.len = bpf_num_ops;
353         fprog_tmp.filter = bpf_ops;
354
355         ret = bpf_prog_create(&fp, &fprog_tmp);
356         if (ret < 0) {
357                 kfree(bpf_ops);
358                 return ret;
359         }
360
361         prog->bpf_ops = bpf_ops;
362         prog->bpf_num_ops = bpf_num_ops;
363         prog->bpf_name = NULL;
364         prog->filter = fp;
365
366         return 0;
367 }
368
369 static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
370                                  const struct tcf_proto *tp)
371 {
372         struct bpf_prog *fp;
373         char *name = NULL;
374         u32 bpf_fd;
375
376         bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
377
378         fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
379         if (IS_ERR(fp))
380                 return PTR_ERR(fp);
381
382         if (tb[TCA_BPF_NAME]) {
383                 name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
384                 if (!name) {
385                         bpf_prog_put(fp);
386                         return -ENOMEM;
387                 }
388         }
389
390         prog->bpf_ops = NULL;
391         prog->bpf_name = name;
392         prog->filter = fp;
393
394         if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
395                 netif_keep_dst(qdisc_dev(tp->q));
396
397         return 0;
398 }
399
400 static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
401                              struct cls_bpf_prog *prog, unsigned long base,
402                              struct nlattr **tb, struct nlattr *est, bool ovr)
403 {
404         bool is_bpf, is_ebpf, have_exts = false;
405         u32 gen_flags = 0;
406         int ret;
407
408         is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
409         is_ebpf = tb[TCA_BPF_FD];
410         if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
411                 return -EINVAL;
412
413         ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr);
414         if (ret < 0)
415                 return ret;
416
417         if (tb[TCA_BPF_FLAGS]) {
418                 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
419
420                 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
421                         return -EINVAL;
422
423                 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
424         }
425         if (tb[TCA_BPF_FLAGS_GEN]) {
426                 gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
427                 if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
428                     !tc_flags_valid(gen_flags))
429                         return -EINVAL;
430         }
431
432         prog->exts_integrated = have_exts;
433         prog->gen_flags = gen_flags;
434
435         ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
436                        cls_bpf_prog_from_efd(tb, prog, tp);
437         if (ret < 0)
438                 return ret;
439
440         if (tb[TCA_BPF_CLASSID]) {
441                 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
442                 tcf_bind_filter(tp, &prog->res, base);
443         }
444
445         return 0;
446 }
447
448 static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
449                                    struct cls_bpf_head *head)
450 {
451         unsigned int i = 0x80000000;
452         u32 handle;
453
454         do {
455                 if (++head->hgen == 0x7FFFFFFF)
456                         head->hgen = 1;
457         } while (--i > 0 && cls_bpf_get(tp, head->hgen));
458
459         if (unlikely(i == 0)) {
460                 pr_err("Insufficient number of handles\n");
461                 handle = 0;
462         } else {
463                 handle = head->hgen;
464         }
465
466         return handle;
467 }
468
469 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
470                           struct tcf_proto *tp, unsigned long base,
471                           u32 handle, struct nlattr **tca,
472                           void **arg, bool ovr)
473 {
474         struct cls_bpf_head *head = rtnl_dereference(tp->root);
475         struct cls_bpf_prog *oldprog = *arg;
476         struct nlattr *tb[TCA_BPF_MAX + 1];
477         struct cls_bpf_prog *prog;
478         int ret;
479
480         if (tca[TCA_OPTIONS] == NULL)
481                 return -EINVAL;
482
483         ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy,
484                                NULL);
485         if (ret < 0)
486                 return ret;
487
488         prog = kzalloc(sizeof(*prog), GFP_KERNEL);
489         if (!prog)
490                 return -ENOBUFS;
491
492         ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
493         if (ret < 0)
494                 goto errout;
495
496         if (oldprog) {
497                 if (handle && oldprog->handle != handle) {
498                         ret = -EINVAL;
499                         goto errout;
500                 }
501         }
502
503         if (handle == 0)
504                 prog->handle = cls_bpf_grab_new_handle(tp, head);
505         else
506                 prog->handle = handle;
507         if (prog->handle == 0) {
508                 ret = -EINVAL;
509                 goto errout;
510         }
511
512         ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
513         if (ret < 0)
514                 goto errout;
515
516         ret = cls_bpf_offload(tp, prog, oldprog);
517         if (ret)
518                 goto errout_parms;
519
520         if (!tc_in_hw(prog->gen_flags))
521                 prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
522
523         if (oldprog) {
524                 list_replace_rcu(&oldprog->link, &prog->link);
525                 tcf_unbind_filter(tp, &oldprog->res);
526                 tcf_exts_get_net(&oldprog->exts);
527                 call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu);
528         } else {
529                 list_add_rcu(&prog->link, &head->plist);
530         }
531
532         *arg = prog;
533         return 0;
534
535 errout_parms:
536         cls_bpf_free_parms(prog);
537 errout:
538         tcf_exts_destroy(&prog->exts);
539         kfree(prog);
540         return ret;
541 }
542
543 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
544                                  struct sk_buff *skb)
545 {
546         struct nlattr *nla;
547
548         if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
549                 return -EMSGSIZE;
550
551         nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
552                           sizeof(struct sock_filter));
553         if (nla == NULL)
554                 return -EMSGSIZE;
555
556         memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
557
558         return 0;
559 }
560
561 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
562                                   struct sk_buff *skb)
563 {
564         struct nlattr *nla;
565
566         if (prog->bpf_name &&
567             nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
568                 return -EMSGSIZE;
569
570         if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
571                 return -EMSGSIZE;
572
573         nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
574         if (nla == NULL)
575                 return -EMSGSIZE;
576
577         memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
578
579         return 0;
580 }
581
582 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
583                         struct sk_buff *skb, struct tcmsg *tm)
584 {
585         struct cls_bpf_prog *prog = fh;
586         struct nlattr *nest;
587         u32 bpf_flags = 0;
588         int ret;
589
590         if (prog == NULL)
591                 return skb->len;
592
593         tm->tcm_handle = prog->handle;
594
595         cls_bpf_offload_update_stats(tp, prog);
596
597         nest = nla_nest_start(skb, TCA_OPTIONS);
598         if (nest == NULL)
599                 goto nla_put_failure;
600
601         if (prog->res.classid &&
602             nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
603                 goto nla_put_failure;
604
605         if (cls_bpf_is_ebpf(prog))
606                 ret = cls_bpf_dump_ebpf_info(prog, skb);
607         else
608                 ret = cls_bpf_dump_bpf_info(prog, skb);
609         if (ret)
610                 goto nla_put_failure;
611
612         if (tcf_exts_dump(skb, &prog->exts) < 0)
613                 goto nla_put_failure;
614
615         if (prog->exts_integrated)
616                 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
617         if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
618                 goto nla_put_failure;
619         if (prog->gen_flags &&
620             nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
621                 goto nla_put_failure;
622
623         nla_nest_end(skb, nest);
624
625         if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
626                 goto nla_put_failure;
627
628         return skb->len;
629
630 nla_put_failure:
631         nla_nest_cancel(skb, nest);
632         return -1;
633 }
634
635 static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl)
636 {
637         struct cls_bpf_prog *prog = fh;
638
639         if (prog && prog->res.classid == classid)
640                 prog->res.class = cl;
641 }
642
643 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
644 {
645         struct cls_bpf_head *head = rtnl_dereference(tp->root);
646         struct cls_bpf_prog *prog;
647
648         list_for_each_entry(prog, &head->plist, link) {
649                 if (arg->count < arg->skip)
650                         goto skip;
651                 if (arg->fn(tp, prog, arg) < 0) {
652                         arg->stop = 1;
653                         break;
654                 }
655 skip:
656                 arg->count++;
657         }
658 }
659
660 static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
661         .kind           =       "bpf",
662         .owner          =       THIS_MODULE,
663         .classify       =       cls_bpf_classify,
664         .init           =       cls_bpf_init,
665         .destroy        =       cls_bpf_destroy,
666         .get            =       cls_bpf_get,
667         .change         =       cls_bpf_change,
668         .delete         =       cls_bpf_delete,
669         .walk           =       cls_bpf_walk,
670         .dump           =       cls_bpf_dump,
671         .bind_class     =       cls_bpf_bind_class,
672 };
673
674 static int __init cls_bpf_init_mod(void)
675 {
676         return register_tcf_proto_ops(&cls_bpf_ops);
677 }
678
679 static void __exit cls_bpf_exit_mod(void)
680 {
681         unregister_tcf_proto_ops(&cls_bpf_ops);
682 }
683
684 module_init(cls_bpf_init_mod);
685 module_exit(cls_bpf_exit_mod);