Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / tools / testing / selftests / bpf / prog_tests / perf_buffer.c
1 // SPDX-License-Identifier: GPL-2.0
2 #define _GNU_SOURCE
3 #include <pthread.h>
4 #include <sched.h>
5 #include <sys/socket.h>
6 #include <test_progs.h>
7
8 static void on_sample(void *ctx, int cpu, void *data, __u32 size)
9 {
10         int cpu_data = *(int *)data, duration = 0;
11         cpu_set_t *cpu_seen = ctx;
12
13         if (cpu_data != cpu)
14                 CHECK(cpu_data != cpu, "check_cpu_data",
15                       "cpu_data %d != cpu %d\n", cpu_data, cpu);
16
17         CPU_SET(cpu, cpu_seen);
18 }
19
20 void test_perf_buffer(void)
21 {
22         int err, prog_fd, nr_cpus, i, duration = 0;
23         const char *prog_name = "kprobe/sys_nanosleep";
24         const char *file = "./test_perf_buffer.o";
25         struct perf_buffer_opts pb_opts = {};
26         struct bpf_map *perf_buf_map;
27         cpu_set_t cpu_set, cpu_seen;
28         struct bpf_program *prog;
29         struct bpf_object *obj;
30         struct perf_buffer *pb;
31         struct bpf_link *link;
32
33         nr_cpus = libbpf_num_possible_cpus();
34         if (CHECK(nr_cpus < 0, "nr_cpus", "err %d\n", nr_cpus))
35                 return;
36
37         /* load program */
38         err = bpf_prog_load(file, BPF_PROG_TYPE_KPROBE, &obj, &prog_fd);
39         if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno))
40                 return;
41
42         prog = bpf_object__find_program_by_title(obj, prog_name);
43         if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name))
44                 goto out_close;
45
46         /* load map */
47         perf_buf_map = bpf_object__find_map_by_name(obj, "perf_buf_map");
48         if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n"))
49                 goto out_close;
50
51         /* attach kprobe */
52         link = bpf_program__attach_kprobe(prog, false /* retprobe */,
53                                           SYS_NANOSLEEP_KPROBE_NAME);
54         if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link)))
55                 goto out_close;
56
57         /* set up perf buffer */
58         pb_opts.sample_cb = on_sample;
59         pb_opts.ctx = &cpu_seen;
60         pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, &pb_opts);
61         if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
62                 goto out_detach;
63
64         /* trigger kprobe on every CPU */
65         CPU_ZERO(&cpu_seen);
66         for (i = 0; i < nr_cpus; i++) {
67                 CPU_ZERO(&cpu_set);
68                 CPU_SET(i, &cpu_set);
69
70                 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set),
71                                              &cpu_set);
72                 if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n",
73                                  i, err))
74                         goto out_detach;
75
76                 usleep(1);
77         }
78
79         /* read perf buffer */
80         err = perf_buffer__poll(pb, 100);
81         if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err))
82                 goto out_free_pb;
83
84         if (CHECK(CPU_COUNT(&cpu_seen) != nr_cpus, "seen_cpu_cnt",
85                   "expect %d, seen %d\n", nr_cpus, CPU_COUNT(&cpu_seen)))
86                 goto out_free_pb;
87
88 out_free_pb:
89         perf_buffer__free(pb);
90 out_detach:
91         bpf_link__destroy(link);
92 out_close:
93         bpf_object__close(obj);
94 }