Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / tools / perf / util / bpf-loader.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bpf-loader.c
4  *
5  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
6  * Copyright (C) 2015 Huawei Inc.
7  */
8
9 #include <linux/bpf.h>
10 #include <bpf/libbpf.h>
11 #include <bpf/bpf.h>
12 #include <linux/err.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/zalloc.h>
16 #include <errno.h>
17 #include "perf.h"
18 #include "debug.h"
19 #include "evlist.h"
20 #include "bpf-loader.h"
21 #include "bpf-prologue.h"
22 #include "probe-event.h"
23 #include "probe-finder.h" // for MAX_PROBES
24 #include "parse-events.h"
25 #include "strfilter.h"
26 #include "llvm-utils.h"
27 #include "c++/clang-c.h"
28
29 static int libbpf_perf_print(enum libbpf_print_level level __attribute__((unused)),
30                               const char *fmt, va_list args)
31 {
32         return veprintf(1, verbose, pr_fmt(fmt), args);
33 }
34
35 struct bpf_prog_priv {
36         bool is_tp;
37         char *sys_name;
38         char *evt_name;
39         struct perf_probe_event pev;
40         bool need_prologue;
41         struct bpf_insn *insns_buf;
42         int nr_types;
43         int *type_mapping;
44 };
45
46 static bool libbpf_initialized;
47
48 struct bpf_object *
49 bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
50 {
51         struct bpf_object *obj;
52
53         if (!libbpf_initialized) {
54                 libbpf_set_print(libbpf_perf_print);
55                 libbpf_initialized = true;
56         }
57
58         obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, name);
59         if (IS_ERR_OR_NULL(obj)) {
60                 pr_debug("bpf: failed to load buffer\n");
61                 return ERR_PTR(-EINVAL);
62         }
63
64         return obj;
65 }
66
67 struct bpf_object *bpf__prepare_load(const char *filename, bool source)
68 {
69         struct bpf_object *obj;
70
71         if (!libbpf_initialized) {
72                 libbpf_set_print(libbpf_perf_print);
73                 libbpf_initialized = true;
74         }
75
76         if (source) {
77                 int err;
78                 void *obj_buf;
79                 size_t obj_buf_sz;
80
81                 perf_clang__init();
82                 err = perf_clang__compile_bpf(filename, &obj_buf, &obj_buf_sz);
83                 perf_clang__cleanup();
84                 if (err) {
85                         pr_debug("bpf: builtin compilation failed: %d, try external compiler\n", err);
86                         err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
87                         if (err)
88                                 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
89                 } else
90                         pr_debug("bpf: successful builtin compilation\n");
91                 obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename);
92
93                 if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj)
94                         llvm__dump_obj(filename, obj_buf, obj_buf_sz);
95
96                 free(obj_buf);
97         } else
98                 obj = bpf_object__open(filename);
99
100         if (IS_ERR_OR_NULL(obj)) {
101                 pr_debug("bpf: failed to load %s\n", filename);
102                 return obj;
103         }
104
105         return obj;
106 }
107
108 void bpf__clear(void)
109 {
110         struct bpf_object *obj, *tmp;
111
112         bpf_object__for_each_safe(obj, tmp) {
113                 bpf__unprobe(obj);
114                 bpf_object__close(obj);
115         }
116 }
117
118 static void
119 clear_prog_priv(struct bpf_program *prog __maybe_unused,
120                 void *_priv)
121 {
122         struct bpf_prog_priv *priv = _priv;
123
124         cleanup_perf_probe_events(&priv->pev, 1);
125         zfree(&priv->insns_buf);
126         zfree(&priv->type_mapping);
127         zfree(&priv->sys_name);
128         zfree(&priv->evt_name);
129         free(priv);
130 }
131
132 static int
133 prog_config__exec(const char *value, struct perf_probe_event *pev)
134 {
135         pev->uprobes = true;
136         pev->target = strdup(value);
137         if (!pev->target)
138                 return -ENOMEM;
139         return 0;
140 }
141
142 static int
143 prog_config__module(const char *value, struct perf_probe_event *pev)
144 {
145         pev->uprobes = false;
146         pev->target = strdup(value);
147         if (!pev->target)
148                 return -ENOMEM;
149         return 0;
150 }
151
152 static int
153 prog_config__bool(const char *value, bool *pbool, bool invert)
154 {
155         int err;
156         bool bool_value;
157
158         if (!pbool)
159                 return -EINVAL;
160
161         err = strtobool(value, &bool_value);
162         if (err)
163                 return err;
164
165         *pbool = invert ? !bool_value : bool_value;
166         return 0;
167 }
168
169 static int
170 prog_config__inlines(const char *value,
171                      struct perf_probe_event *pev __maybe_unused)
172 {
173         return prog_config__bool(value, &probe_conf.no_inlines, true);
174 }
175
176 static int
177 prog_config__force(const char *value,
178                    struct perf_probe_event *pev __maybe_unused)
179 {
180         return prog_config__bool(value, &probe_conf.force_add, false);
181 }
182
183 static struct {
184         const char *key;
185         const char *usage;
186         const char *desc;
187         int (*func)(const char *, struct perf_probe_event *);
188 } bpf_prog_config_terms[] = {
189         {
190                 .key    = "exec",
191                 .usage  = "exec=<full path of file>",
192                 .desc   = "Set uprobe target",
193                 .func   = prog_config__exec,
194         },
195         {
196                 .key    = "module",
197                 .usage  = "module=<module name>    ",
198                 .desc   = "Set kprobe module",
199                 .func   = prog_config__module,
200         },
201         {
202                 .key    = "inlines",
203                 .usage  = "inlines=[yes|no]        ",
204                 .desc   = "Probe at inline symbol",
205                 .func   = prog_config__inlines,
206         },
207         {
208                 .key    = "force",
209                 .usage  = "force=[yes|no]          ",
210                 .desc   = "Forcibly add events with existing name",
211                 .func   = prog_config__force,
212         },
213 };
214
215 static int
216 do_prog_config(const char *key, const char *value,
217                struct perf_probe_event *pev)
218 {
219         unsigned int i;
220
221         pr_debug("config bpf program: %s=%s\n", key, value);
222         for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
223                 if (strcmp(key, bpf_prog_config_terms[i].key) == 0)
224                         return bpf_prog_config_terms[i].func(value, pev);
225
226         pr_debug("BPF: ERROR: invalid program config option: %s=%s\n",
227                  key, value);
228
229         pr_debug("\nHint: Valid options are:\n");
230         for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
231                 pr_debug("\t%s:\t%s\n", bpf_prog_config_terms[i].usage,
232                          bpf_prog_config_terms[i].desc);
233         pr_debug("\n");
234
235         return -BPF_LOADER_ERRNO__PROGCONF_TERM;
236 }
237
238 static const char *
239 parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev)
240 {
241         char *text = strdup(config_str);
242         char *sep, *line;
243         const char *main_str = NULL;
244         int err = 0;
245
246         if (!text) {
247                 pr_debug("Not enough memory: dup config_str failed\n");
248                 return ERR_PTR(-ENOMEM);
249         }
250
251         line = text;
252         while ((sep = strchr(line, ';'))) {
253                 char *equ;
254
255                 *sep = '\0';
256                 equ = strchr(line, '=');
257                 if (!equ) {
258                         pr_warning("WARNING: invalid config in BPF object: %s\n",
259                                    line);
260                         pr_warning("\tShould be 'key=value'.\n");
261                         goto nextline;
262                 }
263                 *equ = '\0';
264
265                 err = do_prog_config(line, equ + 1, pev);
266                 if (err)
267                         break;
268 nextline:
269                 line = sep + 1;
270         }
271
272         if (!err)
273                 main_str = config_str + (line - text);
274         free(text);
275
276         return err ? ERR_PTR(err) : main_str;
277 }
278
279 static int
280 parse_prog_config(const char *config_str, const char **p_main_str,
281                   bool *is_tp, struct perf_probe_event *pev)
282 {
283         int err;
284         const char *main_str = parse_prog_config_kvpair(config_str, pev);
285
286         if (IS_ERR(main_str))
287                 return PTR_ERR(main_str);
288
289         *p_main_str = main_str;
290         if (!strchr(main_str, '=')) {
291                 /* Is a tracepoint event? */
292                 const char *s = strchr(main_str, ':');
293
294                 if (!s) {
295                         pr_debug("bpf: '%s' is not a valid tracepoint\n",
296                                  config_str);
297                         return -BPF_LOADER_ERRNO__CONFIG;
298                 }
299
300                 *is_tp = true;
301                 return 0;
302         }
303
304         *is_tp = false;
305         err = parse_perf_probe_command(main_str, pev);
306         if (err < 0) {
307                 pr_debug("bpf: '%s' is not a valid config string\n",
308                          config_str);
309                 /* parse failed, don't need clear pev. */
310                 return -BPF_LOADER_ERRNO__CONFIG;
311         }
312         return 0;
313 }
314
315 static int
316 config_bpf_program(struct bpf_program *prog)
317 {
318         struct perf_probe_event *pev = NULL;
319         struct bpf_prog_priv *priv = NULL;
320         const char *config_str, *main_str;
321         bool is_tp = false;
322         int err;
323
324         /* Initialize per-program probing setting */
325         probe_conf.no_inlines = false;
326         probe_conf.force_add = false;
327
328         config_str = bpf_program__title(prog, false);
329         if (IS_ERR(config_str)) {
330                 pr_debug("bpf: unable to get title for program\n");
331                 return PTR_ERR(config_str);
332         }
333
334         priv = calloc(sizeof(*priv), 1);
335         if (!priv) {
336                 pr_debug("bpf: failed to alloc priv\n");
337                 return -ENOMEM;
338         }
339         pev = &priv->pev;
340
341         pr_debug("bpf: config program '%s'\n", config_str);
342         err = parse_prog_config(config_str, &main_str, &is_tp, pev);
343         if (err)
344                 goto errout;
345
346         if (is_tp) {
347                 char *s = strchr(main_str, ':');
348
349                 priv->is_tp = true;
350                 priv->sys_name = strndup(main_str, s - main_str);
351                 priv->evt_name = strdup(s + 1);
352                 goto set_priv;
353         }
354
355         if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) {
356                 pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
357                          config_str, PERF_BPF_PROBE_GROUP);
358                 err = -BPF_LOADER_ERRNO__GROUP;
359                 goto errout;
360         } else if (!pev->group)
361                 pev->group = strdup(PERF_BPF_PROBE_GROUP);
362
363         if (!pev->group) {
364                 pr_debug("bpf: strdup failed\n");
365                 err = -ENOMEM;
366                 goto errout;
367         }
368
369         if (!pev->event) {
370                 pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n",
371                          config_str);
372                 err = -BPF_LOADER_ERRNO__EVENTNAME;
373                 goto errout;
374         }
375         pr_debug("bpf: config '%s' is ok\n", config_str);
376
377 set_priv:
378         err = bpf_program__set_priv(prog, priv, clear_prog_priv);
379         if (err) {
380                 pr_debug("Failed to set priv for program '%s'\n", config_str);
381                 goto errout;
382         }
383
384         return 0;
385
386 errout:
387         if (pev)
388                 clear_perf_probe_event(pev);
389         free(priv);
390         return err;
391 }
392
393 static int bpf__prepare_probe(void)
394 {
395         static int err = 0;
396         static bool initialized = false;
397
398         /*
399          * Make err static, so if init failed the first, bpf__prepare_probe()
400          * fails each time without calling init_probe_symbol_maps multiple
401          * times.
402          */
403         if (initialized)
404                 return err;
405
406         initialized = true;
407         err = init_probe_symbol_maps(false);
408         if (err < 0)
409                 pr_debug("Failed to init_probe_symbol_maps\n");
410         probe_conf.max_probes = MAX_PROBES;
411         return err;
412 }
413
414 static int
415 preproc_gen_prologue(struct bpf_program *prog, int n,
416                      struct bpf_insn *orig_insns, int orig_insns_cnt,
417                      struct bpf_prog_prep_result *res)
418 {
419         struct bpf_prog_priv *priv = bpf_program__priv(prog);
420         struct probe_trace_event *tev;
421         struct perf_probe_event *pev;
422         struct bpf_insn *buf;
423         size_t prologue_cnt = 0;
424         int i, err;
425
426         if (IS_ERR(priv) || !priv || priv->is_tp)
427                 goto errout;
428
429         pev = &priv->pev;
430
431         if (n < 0 || n >= priv->nr_types)
432                 goto errout;
433
434         /* Find a tev belongs to that type */
435         for (i = 0; i < pev->ntevs; i++) {
436                 if (priv->type_mapping[i] == n)
437                         break;
438         }
439
440         if (i >= pev->ntevs) {
441                 pr_debug("Internal error: prologue type %d not found\n", n);
442                 return -BPF_LOADER_ERRNO__PROLOGUE;
443         }
444
445         tev = &pev->tevs[i];
446
447         buf = priv->insns_buf;
448         err = bpf__gen_prologue(tev->args, tev->nargs,
449                                 buf, &prologue_cnt,
450                                 BPF_MAXINSNS - orig_insns_cnt);
451         if (err) {
452                 const char *title;
453
454                 title = bpf_program__title(prog, false);
455                 if (!title)
456                         title = "[unknown]";
457
458                 pr_debug("Failed to generate prologue for program %s\n",
459                          title);
460                 return err;
461         }
462
463         memcpy(&buf[prologue_cnt], orig_insns,
464                sizeof(struct bpf_insn) * orig_insns_cnt);
465
466         res->new_insn_ptr = buf;
467         res->new_insn_cnt = prologue_cnt + orig_insns_cnt;
468         res->pfd = NULL;
469         return 0;
470
471 errout:
472         pr_debug("Internal error in preproc_gen_prologue\n");
473         return -BPF_LOADER_ERRNO__PROLOGUE;
474 }
475
476 /*
477  * compare_tev_args is reflexive, transitive and antisymmetric.
478  * I can proof it but this margin is too narrow to contain.
479  */
480 static int compare_tev_args(const void *ptev1, const void *ptev2)
481 {
482         int i, ret;
483         const struct probe_trace_event *tev1 =
484                 *(const struct probe_trace_event **)ptev1;
485         const struct probe_trace_event *tev2 =
486                 *(const struct probe_trace_event **)ptev2;
487
488         ret = tev2->nargs - tev1->nargs;
489         if (ret)
490                 return ret;
491
492         for (i = 0; i < tev1->nargs; i++) {
493                 struct probe_trace_arg *arg1, *arg2;
494                 struct probe_trace_arg_ref *ref1, *ref2;
495
496                 arg1 = &tev1->args[i];
497                 arg2 = &tev2->args[i];
498
499                 ret = strcmp(arg1->value, arg2->value);
500                 if (ret)
501                         return ret;
502
503                 ref1 = arg1->ref;
504                 ref2 = arg2->ref;
505
506                 while (ref1 && ref2) {
507                         ret = ref2->offset - ref1->offset;
508                         if (ret)
509                                 return ret;
510
511                         ref1 = ref1->next;
512                         ref2 = ref2->next;
513                 }
514
515                 if (ref1 || ref2)
516                         return ref2 ? 1 : -1;
517         }
518
519         return 0;
520 }
521
522 /*
523  * Assign a type number to each tevs in a pev.
524  * mapping is an array with same slots as tevs in that pev.
525  * nr_types will be set to number of types.
526  */
527 static int map_prologue(struct perf_probe_event *pev, int *mapping,
528                         int *nr_types)
529 {
530         int i, type = 0;
531         struct probe_trace_event **ptevs;
532
533         size_t array_sz = sizeof(*ptevs) * pev->ntevs;
534
535         ptevs = malloc(array_sz);
536         if (!ptevs) {
537                 pr_debug("Not enough memory: alloc ptevs failed\n");
538                 return -ENOMEM;
539         }
540
541         pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs);
542         for (i = 0; i < pev->ntevs; i++)
543                 ptevs[i] = &pev->tevs[i];
544
545         qsort(ptevs, pev->ntevs, sizeof(*ptevs),
546               compare_tev_args);
547
548         for (i = 0; i < pev->ntevs; i++) {
549                 int n;
550
551                 n = ptevs[i] - pev->tevs;
552                 if (i == 0) {
553                         mapping[n] = type;
554                         pr_debug("mapping[%d]=%d\n", n, type);
555                         continue;
556                 }
557
558                 if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0)
559                         mapping[n] = type;
560                 else
561                         mapping[n] = ++type;
562
563                 pr_debug("mapping[%d]=%d\n", n, mapping[n]);
564         }
565         free(ptevs);
566         *nr_types = type + 1;
567
568         return 0;
569 }
570
571 static int hook_load_preprocessor(struct bpf_program *prog)
572 {
573         struct bpf_prog_priv *priv = bpf_program__priv(prog);
574         struct perf_probe_event *pev;
575         bool need_prologue = false;
576         int err, i;
577
578         if (IS_ERR(priv) || !priv) {
579                 pr_debug("Internal error when hook preprocessor\n");
580                 return -BPF_LOADER_ERRNO__INTERNAL;
581         }
582
583         if (priv->is_tp) {
584                 priv->need_prologue = false;
585                 return 0;
586         }
587
588         pev = &priv->pev;
589         for (i = 0; i < pev->ntevs; i++) {
590                 struct probe_trace_event *tev = &pev->tevs[i];
591
592                 if (tev->nargs > 0) {
593                         need_prologue = true;
594                         break;
595                 }
596         }
597
598         /*
599          * Since all tevs don't have argument, we don't need generate
600          * prologue.
601          */
602         if (!need_prologue) {
603                 priv->need_prologue = false;
604                 return 0;
605         }
606
607         priv->need_prologue = true;
608         priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS);
609         if (!priv->insns_buf) {
610                 pr_debug("Not enough memory: alloc insns_buf failed\n");
611                 return -ENOMEM;
612         }
613
614         priv->type_mapping = malloc(sizeof(int) * pev->ntevs);
615         if (!priv->type_mapping) {
616                 pr_debug("Not enough memory: alloc type_mapping failed\n");
617                 return -ENOMEM;
618         }
619         memset(priv->type_mapping, -1,
620                sizeof(int) * pev->ntevs);
621
622         err = map_prologue(pev, priv->type_mapping, &priv->nr_types);
623         if (err)
624                 return err;
625
626         err = bpf_program__set_prep(prog, priv->nr_types,
627                                     preproc_gen_prologue);
628         return err;
629 }
630
631 int bpf__probe(struct bpf_object *obj)
632 {
633         int err = 0;
634         struct bpf_program *prog;
635         struct bpf_prog_priv *priv;
636         struct perf_probe_event *pev;
637
638         err = bpf__prepare_probe();
639         if (err) {
640                 pr_debug("bpf__prepare_probe failed\n");
641                 return err;
642         }
643
644         bpf_object__for_each_program(prog, obj) {
645                 err = config_bpf_program(prog);
646                 if (err)
647                         goto out;
648
649                 priv = bpf_program__priv(prog);
650                 if (IS_ERR(priv) || !priv) {
651                         err = PTR_ERR(priv);
652                         goto out;
653                 }
654
655                 if (priv->is_tp) {
656                         bpf_program__set_tracepoint(prog);
657                         continue;
658                 }
659
660                 bpf_program__set_kprobe(prog);
661                 pev = &priv->pev;
662
663                 err = convert_perf_probe_events(pev, 1);
664                 if (err < 0) {
665                         pr_debug("bpf_probe: failed to convert perf probe events\n");
666                         goto out;
667                 }
668
669                 err = apply_perf_probe_events(pev, 1);
670                 if (err < 0) {
671                         pr_debug("bpf_probe: failed to apply perf probe events\n");
672                         goto out;
673                 }
674
675                 /*
676                  * After probing, let's consider prologue, which
677                  * adds program fetcher to BPF programs.
678                  *
679                  * hook_load_preprocessorr() hooks pre-processor
680                  * to bpf_program, let it generate prologue
681                  * dynamically during loading.
682                  */
683                 err = hook_load_preprocessor(prog);
684                 if (err)
685                         goto out;
686         }
687 out:
688         return err < 0 ? err : 0;
689 }
690
691 #define EVENTS_WRITE_BUFSIZE  4096
692 int bpf__unprobe(struct bpf_object *obj)
693 {
694         int err, ret = 0;
695         struct bpf_program *prog;
696
697         bpf_object__for_each_program(prog, obj) {
698                 struct bpf_prog_priv *priv = bpf_program__priv(prog);
699                 int i;
700
701                 if (IS_ERR(priv) || !priv || priv->is_tp)
702                         continue;
703
704                 for (i = 0; i < priv->pev.ntevs; i++) {
705                         struct probe_trace_event *tev = &priv->pev.tevs[i];
706                         char name_buf[EVENTS_WRITE_BUFSIZE];
707                         struct strfilter *delfilter;
708
709                         snprintf(name_buf, EVENTS_WRITE_BUFSIZE,
710                                  "%s:%s", tev->group, tev->event);
711                         name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0';
712
713                         delfilter = strfilter__new(name_buf, NULL);
714                         if (!delfilter) {
715                                 pr_debug("Failed to create filter for unprobing\n");
716                                 ret = -ENOMEM;
717                                 continue;
718                         }
719
720                         err = del_perf_probe_events(delfilter);
721                         strfilter__delete(delfilter);
722                         if (err) {
723                                 pr_debug("Failed to delete %s\n", name_buf);
724                                 ret = err;
725                                 continue;
726                         }
727                 }
728         }
729         return ret;
730 }
731
732 int bpf__load(struct bpf_object *obj)
733 {
734         int err;
735
736         err = bpf_object__load(obj);
737         if (err) {
738                 char bf[128];
739                 libbpf_strerror(err, bf, sizeof(bf));
740                 pr_debug("bpf: load objects failed: err=%d: (%s)\n", err, bf);
741                 return err;
742         }
743         return 0;
744 }
745
746 int bpf__foreach_event(struct bpf_object *obj,
747                        bpf_prog_iter_callback_t func,
748                        void *arg)
749 {
750         struct bpf_program *prog;
751         int err;
752
753         bpf_object__for_each_program(prog, obj) {
754                 struct bpf_prog_priv *priv = bpf_program__priv(prog);
755                 struct probe_trace_event *tev;
756                 struct perf_probe_event *pev;
757                 int i, fd;
758
759                 if (IS_ERR(priv) || !priv) {
760                         pr_debug("bpf: failed to get private field\n");
761                         return -BPF_LOADER_ERRNO__INTERNAL;
762                 }
763
764                 if (priv->is_tp) {
765                         fd = bpf_program__fd(prog);
766                         err = (*func)(priv->sys_name, priv->evt_name, fd, arg);
767                         if (err) {
768                                 pr_debug("bpf: tracepoint call back failed, stop iterate\n");
769                                 return err;
770                         }
771                         continue;
772                 }
773
774                 pev = &priv->pev;
775                 for (i = 0; i < pev->ntevs; i++) {
776                         tev = &pev->tevs[i];
777
778                         if (priv->need_prologue) {
779                                 int type = priv->type_mapping[i];
780
781                                 fd = bpf_program__nth_fd(prog, type);
782                         } else {
783                                 fd = bpf_program__fd(prog);
784                         }
785
786                         if (fd < 0) {
787                                 pr_debug("bpf: failed to get file descriptor\n");
788                                 return fd;
789                         }
790
791                         err = (*func)(tev->group, tev->event, fd, arg);
792                         if (err) {
793                                 pr_debug("bpf: call back failed, stop iterate\n");
794                                 return err;
795                         }
796                 }
797         }
798         return 0;
799 }
800
801 enum bpf_map_op_type {
802         BPF_MAP_OP_SET_VALUE,
803         BPF_MAP_OP_SET_EVSEL,
804 };
805
806 enum bpf_map_key_type {
807         BPF_MAP_KEY_ALL,
808         BPF_MAP_KEY_RANGES,
809 };
810
811 struct bpf_map_op {
812         struct list_head list;
813         enum bpf_map_op_type op_type;
814         enum bpf_map_key_type key_type;
815         union {
816                 struct parse_events_array array;
817         } k;
818         union {
819                 u64 value;
820                 struct perf_evsel *evsel;
821         } v;
822 };
823
824 struct bpf_map_priv {
825         struct list_head ops_list;
826 };
827
828 static void
829 bpf_map_op__delete(struct bpf_map_op *op)
830 {
831         if (!list_empty(&op->list))
832                 list_del_init(&op->list);
833         if (op->key_type == BPF_MAP_KEY_RANGES)
834                 parse_events__clear_array(&op->k.array);
835         free(op);
836 }
837
838 static void
839 bpf_map_priv__purge(struct bpf_map_priv *priv)
840 {
841         struct bpf_map_op *pos, *n;
842
843         list_for_each_entry_safe(pos, n, &priv->ops_list, list) {
844                 list_del_init(&pos->list);
845                 bpf_map_op__delete(pos);
846         }
847 }
848
849 static void
850 bpf_map_priv__clear(struct bpf_map *map __maybe_unused,
851                     void *_priv)
852 {
853         struct bpf_map_priv *priv = _priv;
854
855         bpf_map_priv__purge(priv);
856         free(priv);
857 }
858
859 static int
860 bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term)
861 {
862         op->key_type = BPF_MAP_KEY_ALL;
863         if (!term)
864                 return 0;
865
866         if (term->array.nr_ranges) {
867                 size_t memsz = term->array.nr_ranges *
868                                 sizeof(op->k.array.ranges[0]);
869
870                 op->k.array.ranges = memdup(term->array.ranges, memsz);
871                 if (!op->k.array.ranges) {
872                         pr_debug("Not enough memory to alloc indices for map\n");
873                         return -ENOMEM;
874                 }
875                 op->key_type = BPF_MAP_KEY_RANGES;
876                 op->k.array.nr_ranges = term->array.nr_ranges;
877         }
878         return 0;
879 }
880
881 static struct bpf_map_op *
882 bpf_map_op__new(struct parse_events_term *term)
883 {
884         struct bpf_map_op *op;
885         int err;
886
887         op = zalloc(sizeof(*op));
888         if (!op) {
889                 pr_debug("Failed to alloc bpf_map_op\n");
890                 return ERR_PTR(-ENOMEM);
891         }
892         INIT_LIST_HEAD(&op->list);
893
894         err = bpf_map_op_setkey(op, term);
895         if (err) {
896                 free(op);
897                 return ERR_PTR(err);
898         }
899         return op;
900 }
901
902 static struct bpf_map_op *
903 bpf_map_op__clone(struct bpf_map_op *op)
904 {
905         struct bpf_map_op *newop;
906
907         newop = memdup(op, sizeof(*op));
908         if (!newop) {
909                 pr_debug("Failed to alloc bpf_map_op\n");
910                 return NULL;
911         }
912
913         INIT_LIST_HEAD(&newop->list);
914         if (op->key_type == BPF_MAP_KEY_RANGES) {
915                 size_t memsz = op->k.array.nr_ranges *
916                                sizeof(op->k.array.ranges[0]);
917
918                 newop->k.array.ranges = memdup(op->k.array.ranges, memsz);
919                 if (!newop->k.array.ranges) {
920                         pr_debug("Failed to alloc indices for map\n");
921                         free(newop);
922                         return NULL;
923                 }
924         }
925
926         return newop;
927 }
928
929 static struct bpf_map_priv *
930 bpf_map_priv__clone(struct bpf_map_priv *priv)
931 {
932         struct bpf_map_priv *newpriv;
933         struct bpf_map_op *pos, *newop;
934
935         newpriv = zalloc(sizeof(*newpriv));
936         if (!newpriv) {
937                 pr_debug("Not enough memory to alloc map private\n");
938                 return NULL;
939         }
940         INIT_LIST_HEAD(&newpriv->ops_list);
941
942         list_for_each_entry(pos, &priv->ops_list, list) {
943                 newop = bpf_map_op__clone(pos);
944                 if (!newop) {
945                         bpf_map_priv__purge(newpriv);
946                         return NULL;
947                 }
948                 list_add_tail(&newop->list, &newpriv->ops_list);
949         }
950
951         return newpriv;
952 }
953
954 static int
955 bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
956 {
957         const char *map_name = bpf_map__name(map);
958         struct bpf_map_priv *priv = bpf_map__priv(map);
959
960         if (IS_ERR(priv)) {
961                 pr_debug("Failed to get private from map %s\n", map_name);
962                 return PTR_ERR(priv);
963         }
964
965         if (!priv) {
966                 priv = zalloc(sizeof(*priv));
967                 if (!priv) {
968                         pr_debug("Not enough memory to alloc map private\n");
969                         return -ENOMEM;
970                 }
971                 INIT_LIST_HEAD(&priv->ops_list);
972
973                 if (bpf_map__set_priv(map, priv, bpf_map_priv__clear)) {
974                         free(priv);
975                         return -BPF_LOADER_ERRNO__INTERNAL;
976                 }
977         }
978
979         list_add_tail(&op->list, &priv->ops_list);
980         return 0;
981 }
982
983 static struct bpf_map_op *
984 bpf_map__add_newop(struct bpf_map *map, struct parse_events_term *term)
985 {
986         struct bpf_map_op *op;
987         int err;
988
989         op = bpf_map_op__new(term);
990         if (IS_ERR(op))
991                 return op;
992
993         err = bpf_map__add_op(map, op);
994         if (err) {
995                 bpf_map_op__delete(op);
996                 return ERR_PTR(err);
997         }
998         return op;
999 }
1000
1001 static int
1002 __bpf_map__config_value(struct bpf_map *map,
1003                         struct parse_events_term *term)
1004 {
1005         struct bpf_map_op *op;
1006         const char *map_name = bpf_map__name(map);
1007         const struct bpf_map_def *def = bpf_map__def(map);
1008
1009         if (IS_ERR(def)) {
1010                 pr_debug("Unable to get map definition from '%s'\n",
1011                          map_name);
1012                 return -BPF_LOADER_ERRNO__INTERNAL;
1013         }
1014
1015         if (def->type != BPF_MAP_TYPE_ARRAY) {
1016                 pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n",
1017                          map_name);
1018                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1019         }
1020         if (def->key_size < sizeof(unsigned int)) {
1021                 pr_debug("Map %s has incorrect key size\n", map_name);
1022                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE;
1023         }
1024         switch (def->value_size) {
1025         case 1:
1026         case 2:
1027         case 4:
1028         case 8:
1029                 break;
1030         default:
1031                 pr_debug("Map %s has incorrect value size\n", map_name);
1032                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1033         }
1034
1035         op = bpf_map__add_newop(map, term);
1036         if (IS_ERR(op))
1037                 return PTR_ERR(op);
1038         op->op_type = BPF_MAP_OP_SET_VALUE;
1039         op->v.value = term->val.num;
1040         return 0;
1041 }
1042
1043 static int
1044 bpf_map__config_value(struct bpf_map *map,
1045                       struct parse_events_term *term,
1046                       struct perf_evlist *evlist __maybe_unused)
1047 {
1048         if (!term->err_val) {
1049                 pr_debug("Config value not set\n");
1050                 return -BPF_LOADER_ERRNO__OBJCONF_CONF;
1051         }
1052
1053         if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) {
1054                 pr_debug("ERROR: wrong value type for 'value'\n");
1055                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
1056         }
1057
1058         return __bpf_map__config_value(map, term);
1059 }
1060
1061 static int
1062 __bpf_map__config_event(struct bpf_map *map,
1063                         struct parse_events_term *term,
1064                         struct perf_evlist *evlist)
1065 {
1066         struct perf_evsel *evsel;
1067         const struct bpf_map_def *def;
1068         struct bpf_map_op *op;
1069         const char *map_name = bpf_map__name(map);
1070
1071         evsel = perf_evlist__find_evsel_by_str(evlist, term->val.str);
1072         if (!evsel) {
1073                 pr_debug("Event (for '%s') '%s' doesn't exist\n",
1074                          map_name, term->val.str);
1075                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT;
1076         }
1077
1078         def = bpf_map__def(map);
1079         if (IS_ERR(def)) {
1080                 pr_debug("Unable to get map definition from '%s'\n",
1081                          map_name);
1082                 return PTR_ERR(def);
1083         }
1084
1085         /*
1086          * No need to check key_size and value_size:
1087          * kernel has already checked them.
1088          */
1089         if (def->type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
1090                 pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
1091                          map_name);
1092                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1093         }
1094
1095         op = bpf_map__add_newop(map, term);
1096         if (IS_ERR(op))
1097                 return PTR_ERR(op);
1098         op->op_type = BPF_MAP_OP_SET_EVSEL;
1099         op->v.evsel = evsel;
1100         return 0;
1101 }
1102
1103 static int
1104 bpf_map__config_event(struct bpf_map *map,
1105                       struct parse_events_term *term,
1106                       struct perf_evlist *evlist)
1107 {
1108         if (!term->err_val) {
1109                 pr_debug("Config value not set\n");
1110                 return -BPF_LOADER_ERRNO__OBJCONF_CONF;
1111         }
1112
1113         if (term->type_val != PARSE_EVENTS__TERM_TYPE_STR) {
1114                 pr_debug("ERROR: wrong value type for 'event'\n");
1115                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
1116         }
1117
1118         return __bpf_map__config_event(map, term, evlist);
1119 }
1120
1121 struct bpf_obj_config__map_func {
1122         const char *config_opt;
1123         int (*config_func)(struct bpf_map *, struct parse_events_term *,
1124                            struct perf_evlist *);
1125 };
1126
1127 struct bpf_obj_config__map_func bpf_obj_config__map_funcs[] = {
1128         {"value", bpf_map__config_value},
1129         {"event", bpf_map__config_event},
1130 };
1131
1132 static int
1133 config_map_indices_range_check(struct parse_events_term *term,
1134                                struct bpf_map *map,
1135                                const char *map_name)
1136 {
1137         struct parse_events_array *array = &term->array;
1138         const struct bpf_map_def *def;
1139         unsigned int i;
1140
1141         if (!array->nr_ranges)
1142                 return 0;
1143         if (!array->ranges) {
1144                 pr_debug("ERROR: map %s: array->nr_ranges is %d but range array is NULL\n",
1145                          map_name, (int)array->nr_ranges);
1146                 return -BPF_LOADER_ERRNO__INTERNAL;
1147         }
1148
1149         def = bpf_map__def(map);
1150         if (IS_ERR(def)) {
1151                 pr_debug("ERROR: Unable to get map definition from '%s'\n",
1152                          map_name);
1153                 return -BPF_LOADER_ERRNO__INTERNAL;
1154         }
1155
1156         for (i = 0; i < array->nr_ranges; i++) {
1157                 unsigned int start = array->ranges[i].start;
1158                 size_t length = array->ranges[i].length;
1159                 unsigned int idx = start + length - 1;
1160
1161                 if (idx >= def->max_entries) {
1162                         pr_debug("ERROR: index %d too large\n", idx);
1163                         return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG;
1164                 }
1165         }
1166         return 0;
1167 }
1168
1169 static int
1170 bpf__obj_config_map(struct bpf_object *obj,
1171                     struct parse_events_term *term,
1172                     struct perf_evlist *evlist,
1173                     int *key_scan_pos)
1174 {
1175         /* key is "map:<mapname>.<config opt>" */
1176         char *map_name = strdup(term->config + sizeof("map:") - 1);
1177         struct bpf_map *map;
1178         int err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
1179         char *map_opt;
1180         size_t i;
1181
1182         if (!map_name)
1183                 return -ENOMEM;
1184
1185         map_opt = strchr(map_name, '.');
1186         if (!map_opt) {
1187                 pr_debug("ERROR: Invalid map config: %s\n", map_name);
1188                 goto out;
1189         }
1190
1191         *map_opt++ = '\0';
1192         if (*map_opt == '\0') {
1193                 pr_debug("ERROR: Invalid map option: %s\n", term->config);
1194                 goto out;
1195         }
1196
1197         map = bpf_object__find_map_by_name(obj, map_name);
1198         if (!map) {
1199                 pr_debug("ERROR: Map %s doesn't exist\n", map_name);
1200                 err = -BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST;
1201                 goto out;
1202         }
1203
1204         *key_scan_pos += strlen(map_opt);
1205         err = config_map_indices_range_check(term, map, map_name);
1206         if (err)
1207                 goto out;
1208         *key_scan_pos -= strlen(map_opt);
1209
1210         for (i = 0; i < ARRAY_SIZE(bpf_obj_config__map_funcs); i++) {
1211                 struct bpf_obj_config__map_func *func =
1212                                 &bpf_obj_config__map_funcs[i];
1213
1214                 if (strcmp(map_opt, func->config_opt) == 0) {
1215                         err = func->config_func(map, term, evlist);
1216                         goto out;
1217                 }
1218         }
1219
1220         pr_debug("ERROR: Invalid map config option '%s'\n", map_opt);
1221         err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT;
1222 out:
1223         free(map_name);
1224         if (!err)
1225                 key_scan_pos += strlen(map_opt);
1226         return err;
1227 }
1228
1229 int bpf__config_obj(struct bpf_object *obj,
1230                     struct parse_events_term *term,
1231                     struct perf_evlist *evlist,
1232                     int *error_pos)
1233 {
1234         int key_scan_pos = 0;
1235         int err;
1236
1237         if (!obj || !term || !term->config)
1238                 return -EINVAL;
1239
1240         if (strstarts(term->config, "map:")) {
1241                 key_scan_pos = sizeof("map:") - 1;
1242                 err = bpf__obj_config_map(obj, term, evlist, &key_scan_pos);
1243                 goto out;
1244         }
1245         err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
1246 out:
1247         if (error_pos)
1248                 *error_pos = key_scan_pos;
1249         return err;
1250
1251 }
1252
1253 typedef int (*map_config_func_t)(const char *name, int map_fd,
1254                                  const struct bpf_map_def *pdef,
1255                                  struct bpf_map_op *op,
1256                                  void *pkey, void *arg);
1257
1258 static int
1259 foreach_key_array_all(map_config_func_t func,
1260                       void *arg, const char *name,
1261                       int map_fd, const struct bpf_map_def *pdef,
1262                       struct bpf_map_op *op)
1263 {
1264         unsigned int i;
1265         int err;
1266
1267         for (i = 0; i < pdef->max_entries; i++) {
1268                 err = func(name, map_fd, pdef, op, &i, arg);
1269                 if (err) {
1270                         pr_debug("ERROR: failed to insert value to %s[%u]\n",
1271                                  name, i);
1272                         return err;
1273                 }
1274         }
1275         return 0;
1276 }
1277
1278 static int
1279 foreach_key_array_ranges(map_config_func_t func, void *arg,
1280                          const char *name, int map_fd,
1281                          const struct bpf_map_def *pdef,
1282                          struct bpf_map_op *op)
1283 {
1284         unsigned int i, j;
1285         int err;
1286
1287         for (i = 0; i < op->k.array.nr_ranges; i++) {
1288                 unsigned int start = op->k.array.ranges[i].start;
1289                 size_t length = op->k.array.ranges[i].length;
1290
1291                 for (j = 0; j < length; j++) {
1292                         unsigned int idx = start + j;
1293
1294                         err = func(name, map_fd, pdef, op, &idx, arg);
1295                         if (err) {
1296                                 pr_debug("ERROR: failed to insert value to %s[%u]\n",
1297                                          name, idx);
1298                                 return err;
1299                         }
1300                 }
1301         }
1302         return 0;
1303 }
1304
1305 static int
1306 bpf_map_config_foreach_key(struct bpf_map *map,
1307                            map_config_func_t func,
1308                            void *arg)
1309 {
1310         int err, map_fd;
1311         struct bpf_map_op *op;
1312         const struct bpf_map_def *def;
1313         const char *name = bpf_map__name(map);
1314         struct bpf_map_priv *priv = bpf_map__priv(map);
1315
1316         if (IS_ERR(priv)) {
1317                 pr_debug("ERROR: failed to get private from map %s\n", name);
1318                 return -BPF_LOADER_ERRNO__INTERNAL;
1319         }
1320         if (!priv || list_empty(&priv->ops_list)) {
1321                 pr_debug("INFO: nothing to config for map %s\n", name);
1322                 return 0;
1323         }
1324
1325         def = bpf_map__def(map);
1326         if (IS_ERR(def)) {
1327                 pr_debug("ERROR: failed to get definition from map %s\n", name);
1328                 return -BPF_LOADER_ERRNO__INTERNAL;
1329         }
1330         map_fd = bpf_map__fd(map);
1331         if (map_fd < 0) {
1332                 pr_debug("ERROR: failed to get fd from map %s\n", name);
1333                 return map_fd;
1334         }
1335
1336         list_for_each_entry(op, &priv->ops_list, list) {
1337                 switch (def->type) {
1338                 case BPF_MAP_TYPE_ARRAY:
1339                 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1340                         switch (op->key_type) {
1341                         case BPF_MAP_KEY_ALL:
1342                                 err = foreach_key_array_all(func, arg, name,
1343                                                             map_fd, def, op);
1344                                 break;
1345                         case BPF_MAP_KEY_RANGES:
1346                                 err = foreach_key_array_ranges(func, arg, name,
1347                                                                map_fd, def,
1348                                                                op);
1349                                 break;
1350                         default:
1351                                 pr_debug("ERROR: keytype for map '%s' invalid\n",
1352                                          name);
1353                                 return -BPF_LOADER_ERRNO__INTERNAL;
1354                         }
1355                         if (err)
1356                                 return err;
1357                         break;
1358                 default:
1359                         pr_debug("ERROR: type of '%s' incorrect\n", name);
1360                         return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1361                 }
1362         }
1363
1364         return 0;
1365 }
1366
1367 static int
1368 apply_config_value_for_key(int map_fd, void *pkey,
1369                            size_t val_size, u64 val)
1370 {
1371         int err = 0;
1372
1373         switch (val_size) {
1374         case 1: {
1375                 u8 _val = (u8)(val);
1376                 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1377                 break;
1378         }
1379         case 2: {
1380                 u16 _val = (u16)(val);
1381                 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1382                 break;
1383         }
1384         case 4: {
1385                 u32 _val = (u32)(val);
1386                 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1387                 break;
1388         }
1389         case 8: {
1390                 err = bpf_map_update_elem(map_fd, pkey, &val, BPF_ANY);
1391                 break;
1392         }
1393         default:
1394                 pr_debug("ERROR: invalid value size\n");
1395                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1396         }
1397         if (err && errno)
1398                 err = -errno;
1399         return err;
1400 }
1401
1402 static int
1403 apply_config_evsel_for_key(const char *name, int map_fd, void *pkey,
1404                            struct perf_evsel *evsel)
1405 {
1406         struct xyarray *xy = evsel->fd;
1407         struct perf_event_attr *attr;
1408         unsigned int key, events;
1409         bool check_pass = false;
1410         int *evt_fd;
1411         int err;
1412
1413         if (!xy) {
1414                 pr_debug("ERROR: evsel not ready for map %s\n", name);
1415                 return -BPF_LOADER_ERRNO__INTERNAL;
1416         }
1417
1418         if (xy->row_size / xy->entry_size != 1) {
1419                 pr_debug("ERROR: Dimension of target event is incorrect for map %s\n",
1420                          name);
1421                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM;
1422         }
1423
1424         attr = &evsel->attr;
1425         if (attr->inherit) {
1426                 pr_debug("ERROR: Can't put inherit event into map %s\n", name);
1427                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH;
1428         }
1429
1430         if (perf_evsel__is_bpf_output(evsel))
1431                 check_pass = true;
1432         if (attr->type == PERF_TYPE_RAW)
1433                 check_pass = true;
1434         if (attr->type == PERF_TYPE_HARDWARE)
1435                 check_pass = true;
1436         if (!check_pass) {
1437                 pr_debug("ERROR: Event type is wrong for map %s\n", name);
1438                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE;
1439         }
1440
1441         events = xy->entries / (xy->row_size / xy->entry_size);
1442         key = *((unsigned int *)pkey);
1443         if (key >= events) {
1444                 pr_debug("ERROR: there is no event %d for map %s\n",
1445                          key, name);
1446                 return -BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE;
1447         }
1448         evt_fd = xyarray__entry(xy, key, 0);
1449         err = bpf_map_update_elem(map_fd, pkey, evt_fd, BPF_ANY);
1450         if (err && errno)
1451                 err = -errno;
1452         return err;
1453 }
1454
1455 static int
1456 apply_obj_config_map_for_key(const char *name, int map_fd,
1457                              const struct bpf_map_def *pdef,
1458                              struct bpf_map_op *op,
1459                              void *pkey, void *arg __maybe_unused)
1460 {
1461         int err;
1462
1463         switch (op->op_type) {
1464         case BPF_MAP_OP_SET_VALUE:
1465                 err = apply_config_value_for_key(map_fd, pkey,
1466                                                  pdef->value_size,
1467                                                  op->v.value);
1468                 break;
1469         case BPF_MAP_OP_SET_EVSEL:
1470                 err = apply_config_evsel_for_key(name, map_fd, pkey,
1471                                                  op->v.evsel);
1472                 break;
1473         default:
1474                 pr_debug("ERROR: unknown value type for '%s'\n", name);
1475                 err = -BPF_LOADER_ERRNO__INTERNAL;
1476         }
1477         return err;
1478 }
1479
1480 static int
1481 apply_obj_config_map(struct bpf_map *map)
1482 {
1483         return bpf_map_config_foreach_key(map,
1484                                           apply_obj_config_map_for_key,
1485                                           NULL);
1486 }
1487
1488 static int
1489 apply_obj_config_object(struct bpf_object *obj)
1490 {
1491         struct bpf_map *map;
1492         int err;
1493
1494         bpf_object__for_each_map(map, obj) {
1495                 err = apply_obj_config_map(map);
1496                 if (err)
1497                         return err;
1498         }
1499         return 0;
1500 }
1501
1502 int bpf__apply_obj_config(void)
1503 {
1504         struct bpf_object *obj, *tmp;
1505         int err;
1506
1507         bpf_object__for_each_safe(obj, tmp) {
1508                 err = apply_obj_config_object(obj);
1509                 if (err)
1510                         return err;
1511         }
1512
1513         return 0;
1514 }
1515
1516 #define bpf__for_each_map(pos, obj, objtmp)     \
1517         bpf_object__for_each_safe(obj, objtmp)  \
1518                 bpf_object__for_each_map(pos, obj)
1519
1520 #define bpf__for_each_map_named(pos, obj, objtmp, name) \
1521         bpf__for_each_map(pos, obj, objtmp)             \
1522                 if (bpf_map__name(pos) &&               \
1523                         (strcmp(name,                   \
1524                                 bpf_map__name(pos)) == 0))
1525
1526 struct perf_evsel *bpf__setup_output_event(struct perf_evlist *evlist, const char *name)
1527 {
1528         struct bpf_map_priv *tmpl_priv = NULL;
1529         struct bpf_object *obj, *tmp;
1530         struct perf_evsel *evsel = NULL;
1531         struct bpf_map *map;
1532         int err;
1533         bool need_init = false;
1534
1535         bpf__for_each_map_named(map, obj, tmp, name) {
1536                 struct bpf_map_priv *priv = bpf_map__priv(map);
1537
1538                 if (IS_ERR(priv))
1539                         return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
1540
1541                 /*
1542                  * No need to check map type: type should have been
1543                  * verified by kernel.
1544                  */
1545                 if (!need_init && !priv)
1546                         need_init = !priv;
1547                 if (!tmpl_priv && priv)
1548                         tmpl_priv = priv;
1549         }
1550
1551         if (!need_init)
1552                 return NULL;
1553
1554         if (!tmpl_priv) {
1555                 char *event_definition = NULL;
1556
1557                 if (asprintf(&event_definition, "bpf-output/no-inherit=1,name=%s/", name) < 0)
1558                         return ERR_PTR(-ENOMEM);
1559
1560                 err = parse_events(evlist, event_definition, NULL);
1561                 free(event_definition);
1562
1563                 if (err) {
1564                         pr_debug("ERROR: failed to create the \"%s\" bpf-output event\n", name);
1565                         return ERR_PTR(-err);
1566                 }
1567
1568                 evsel = perf_evlist__last(evlist);
1569         }
1570
1571         bpf__for_each_map_named(map, obj, tmp, name) {
1572                 struct bpf_map_priv *priv = bpf_map__priv(map);
1573
1574                 if (IS_ERR(priv))
1575                         return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
1576                 if (priv)
1577                         continue;
1578
1579                 if (tmpl_priv) {
1580                         priv = bpf_map_priv__clone(tmpl_priv);
1581                         if (!priv)
1582                                 return ERR_PTR(-ENOMEM);
1583
1584                         err = bpf_map__set_priv(map, priv, bpf_map_priv__clear);
1585                         if (err) {
1586                                 bpf_map_priv__clear(map, priv);
1587                                 return ERR_PTR(err);
1588                         }
1589                 } else if (evsel) {
1590                         struct bpf_map_op *op;
1591
1592                         op = bpf_map__add_newop(map, NULL);
1593                         if (IS_ERR(op))
1594                                 return ERR_CAST(op);
1595                         op->op_type = BPF_MAP_OP_SET_EVSEL;
1596                         op->v.evsel = evsel;
1597                 }
1598         }
1599
1600         return evsel;
1601 }
1602
1603 int bpf__setup_stdout(struct perf_evlist *evlist)
1604 {
1605         struct perf_evsel *evsel = bpf__setup_output_event(evlist, "__bpf_stdout__");
1606         return PTR_ERR_OR_ZERO(evsel);
1607 }
1608
1609 #define ERRNO_OFFSET(e)         ((e) - __BPF_LOADER_ERRNO__START)
1610 #define ERRCODE_OFFSET(c)       ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
1611 #define NR_ERRNO        (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
1612
1613 static const char *bpf_loader_strerror_table[NR_ERRNO] = {
1614         [ERRCODE_OFFSET(CONFIG)]        = "Invalid config string",
1615         [ERRCODE_OFFSET(GROUP)]         = "Invalid group name",
1616         [ERRCODE_OFFSET(EVENTNAME)]     = "No event name found in config string",
1617         [ERRCODE_OFFSET(INTERNAL)]      = "BPF loader internal error",
1618         [ERRCODE_OFFSET(COMPILE)]       = "Error when compiling BPF scriptlet",
1619         [ERRCODE_OFFSET(PROGCONF_TERM)] = "Invalid program config term in config string",
1620         [ERRCODE_OFFSET(PROLOGUE)]      = "Failed to generate prologue",
1621         [ERRCODE_OFFSET(PROLOGUE2BIG)]  = "Prologue too big for program",
1622         [ERRCODE_OFFSET(PROLOGUEOOB)]   = "Offset out of bound for prologue",
1623         [ERRCODE_OFFSET(OBJCONF_OPT)]   = "Invalid object config option",
1624         [ERRCODE_OFFSET(OBJCONF_CONF)]  = "Config value not set (missing '=')",
1625         [ERRCODE_OFFSET(OBJCONF_MAP_OPT)]       = "Invalid object map config option",
1626         [ERRCODE_OFFSET(OBJCONF_MAP_NOTEXIST)]  = "Target map doesn't exist",
1627         [ERRCODE_OFFSET(OBJCONF_MAP_VALUE)]     = "Incorrect value type for map",
1628         [ERRCODE_OFFSET(OBJCONF_MAP_TYPE)]      = "Incorrect map type",
1629         [ERRCODE_OFFSET(OBJCONF_MAP_KEYSIZE)]   = "Incorrect map key size",
1630         [ERRCODE_OFFSET(OBJCONF_MAP_VALUESIZE)] = "Incorrect map value size",
1631         [ERRCODE_OFFSET(OBJCONF_MAP_NOEVT)]     = "Event not found for map setting",
1632         [ERRCODE_OFFSET(OBJCONF_MAP_MAPSIZE)]   = "Invalid map size for event setting",
1633         [ERRCODE_OFFSET(OBJCONF_MAP_EVTDIM)]    = "Event dimension too large",
1634         [ERRCODE_OFFSET(OBJCONF_MAP_EVTINH)]    = "Doesn't support inherit event",
1635         [ERRCODE_OFFSET(OBJCONF_MAP_EVTTYPE)]   = "Wrong event type for map",
1636         [ERRCODE_OFFSET(OBJCONF_MAP_IDX2BIG)]   = "Index too large",
1637 };
1638
1639 static int
1640 bpf_loader_strerror(int err, char *buf, size_t size)
1641 {
1642         char sbuf[STRERR_BUFSIZE];
1643         const char *msg;
1644
1645         if (!buf || !size)
1646                 return -1;
1647
1648         err = err > 0 ? err : -err;
1649
1650         if (err >= __LIBBPF_ERRNO__START)
1651                 return libbpf_strerror(err, buf, size);
1652
1653         if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) {
1654                 msg = bpf_loader_strerror_table[ERRNO_OFFSET(err)];
1655                 snprintf(buf, size, "%s", msg);
1656                 buf[size - 1] = '\0';
1657                 return 0;
1658         }
1659
1660         if (err >= __BPF_LOADER_ERRNO__END)
1661                 snprintf(buf, size, "Unknown bpf loader error %d", err);
1662         else
1663                 snprintf(buf, size, "%s",
1664                          str_error_r(err, sbuf, sizeof(sbuf)));
1665
1666         buf[size - 1] = '\0';
1667         return -1;
1668 }
1669
1670 #define bpf__strerror_head(err, buf, size) \
1671         char sbuf[STRERR_BUFSIZE], *emsg;\
1672         if (!size)\
1673                 return 0;\
1674         if (err < 0)\
1675                 err = -err;\
1676         bpf_loader_strerror(err, sbuf, sizeof(sbuf));\
1677         emsg = sbuf;\
1678         switch (err) {\
1679         default:\
1680                 scnprintf(buf, size, "%s", emsg);\
1681                 break;
1682
1683 #define bpf__strerror_entry(val, fmt...)\
1684         case val: {\
1685                 scnprintf(buf, size, fmt);\
1686                 break;\
1687         }
1688
1689 #define bpf__strerror_end(buf, size)\
1690         }\
1691         buf[size - 1] = '\0';
1692
1693 int bpf__strerror_prepare_load(const char *filename, bool source,
1694                                int err, char *buf, size_t size)
1695 {
1696         size_t n;
1697         int ret;
1698
1699         n = snprintf(buf, size, "Failed to load %s%s: ",
1700                          filename, source ? " from source" : "");
1701         if (n >= size) {
1702                 buf[size - 1] = '\0';
1703                 return 0;
1704         }
1705         buf += n;
1706         size -= n;
1707
1708         ret = bpf_loader_strerror(err, buf, size);
1709         buf[size - 1] = '\0';
1710         return ret;
1711 }
1712
1713 int bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
1714                         int err, char *buf, size_t size)
1715 {
1716         bpf__strerror_head(err, buf, size);
1717         case BPF_LOADER_ERRNO__PROGCONF_TERM: {
1718                 scnprintf(buf, size, "%s (add -v to see detail)", emsg);
1719                 break;
1720         }
1721         bpf__strerror_entry(EEXIST, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'");
1722         bpf__strerror_entry(EACCES, "You need to be root");
1723         bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0");
1724         bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file");
1725         bpf__strerror_end(buf, size);
1726         return 0;
1727 }
1728
1729 int bpf__strerror_load(struct bpf_object *obj,
1730                        int err, char *buf, size_t size)
1731 {
1732         bpf__strerror_head(err, buf, size);
1733         case LIBBPF_ERRNO__KVER: {
1734                 unsigned int obj_kver = bpf_object__kversion(obj);
1735                 unsigned int real_kver;
1736
1737                 if (fetch_kernel_version(&real_kver, NULL, 0)) {
1738                         scnprintf(buf, size, "Unable to fetch kernel version");
1739                         break;
1740                 }
1741
1742                 if (obj_kver != real_kver) {
1743                         scnprintf(buf, size,
1744                                   "'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")",
1745                                   KVER_PARAM(obj_kver),
1746                                   KVER_PARAM(real_kver));
1747                         break;
1748                 }
1749
1750                 scnprintf(buf, size, "Failed to load program for unknown reason");
1751                 break;
1752         }
1753         bpf__strerror_end(buf, size);
1754         return 0;
1755 }
1756
1757 int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused,
1758                              struct parse_events_term *term __maybe_unused,
1759                              struct perf_evlist *evlist __maybe_unused,
1760                              int *error_pos __maybe_unused, int err,
1761                              char *buf, size_t size)
1762 {
1763         bpf__strerror_head(err, buf, size);
1764         bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE,
1765                             "Can't use this config term with this map type");
1766         bpf__strerror_end(buf, size);
1767         return 0;
1768 }
1769
1770 int bpf__strerror_apply_obj_config(int err, char *buf, size_t size)
1771 {
1772         bpf__strerror_head(err, buf, size);
1773         bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM,
1774                             "Cannot set event to BPF map in multi-thread tracing");
1775         bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH,
1776                             "%s (Hint: use -i to turn off inherit)", emsg);
1777         bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE,
1778                             "Can only put raw, hardware and BPF output event into a BPF map");
1779         bpf__strerror_end(buf, size);
1780         return 0;
1781 }
1782
1783 int bpf__strerror_setup_output_event(struct perf_evlist *evlist __maybe_unused,
1784                                      int err, char *buf, size_t size)
1785 {
1786         bpf__strerror_head(err, buf, size);
1787         bpf__strerror_end(buf, size);
1788         return 0;
1789 }