Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / tools / testing / selftests / bpf / prog_tests / get_stack_raw_tp.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3
4 #define MAX_CNT_RAWTP   10ull
5 #define MAX_STACK_RAWTP 100
6 struct get_stack_trace_t {
7         int pid;
8         int kern_stack_size;
9         int user_stack_size;
10         int user_stack_buildid_size;
11         __u64 kern_stack[MAX_STACK_RAWTP];
12         __u64 user_stack[MAX_STACK_RAWTP];
13         struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
14 };
15
16 static int get_stack_print_output(void *data, int size)
17 {
18         bool good_kern_stack = false, good_user_stack = false;
19         const char *nonjit_func = "___bpf_prog_run";
20         struct get_stack_trace_t *e = data;
21         int i, num_stack;
22         static __u64 cnt;
23         struct ksym *ks;
24
25         cnt++;
26
27         if (size < sizeof(struct get_stack_trace_t)) {
28                 __u64 *raw_data = data;
29                 bool found = false;
30
31                 num_stack = size / sizeof(__u64);
32                 /* If jit is enabled, we do not have a good way to
33                  * verify the sanity of the kernel stack. So we
34                  * just assume it is good if the stack is not empty.
35                  * This could be improved in the future.
36                  */
37                 if (jit_enabled) {
38                         found = num_stack > 0;
39                 } else {
40                         for (i = 0; i < num_stack; i++) {
41                                 ks = ksym_search(raw_data[i]);
42                                 if (ks && (strcmp(ks->name, nonjit_func) == 0)) {
43                                         found = true;
44                                         break;
45                                 }
46                         }
47                 }
48                 if (found) {
49                         good_kern_stack = true;
50                         good_user_stack = true;
51                 }
52         } else {
53                 num_stack = e->kern_stack_size / sizeof(__u64);
54                 if (jit_enabled) {
55                         good_kern_stack = num_stack > 0;
56                 } else {
57                         for (i = 0; i < num_stack; i++) {
58                                 ks = ksym_search(e->kern_stack[i]);
59                                 if (ks && (strcmp(ks->name, nonjit_func) == 0)) {
60                                         good_kern_stack = true;
61                                         break;
62                                 }
63                         }
64                 }
65                 if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0)
66                         good_user_stack = true;
67         }
68         if (!good_kern_stack || !good_user_stack)
69                 return LIBBPF_PERF_EVENT_ERROR;
70
71         if (cnt == MAX_CNT_RAWTP)
72                 return LIBBPF_PERF_EVENT_DONE;
73
74         return LIBBPF_PERF_EVENT_CONT;
75 }
76
77 void test_get_stack_raw_tp(void)
78 {
79         const char *file = "./test_get_stack_rawtp.o";
80         int i, efd, err, prog_fd, pmu_fd, perfmap_fd;
81         struct perf_event_attr attr = {};
82         struct timespec tv = {0, 10};
83         __u32 key = 0, duration = 0;
84         struct bpf_object *obj;
85
86         err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
87         if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
88                 return;
89
90         efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
91         if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
92                 goto close_prog;
93
94         perfmap_fd = bpf_find_map(__func__, obj, "perfmap");
95         if (CHECK(perfmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
96                   perfmap_fd, errno))
97                 goto close_prog;
98
99         err = load_kallsyms();
100         if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
101                 goto close_prog;
102
103         attr.sample_type = PERF_SAMPLE_RAW;
104         attr.type = PERF_TYPE_SOFTWARE;
105         attr.config = PERF_COUNT_SW_BPF_OUTPUT;
106         pmu_fd = syscall(__NR_perf_event_open, &attr, getpid()/*pid*/, -1/*cpu*/,
107                          -1/*group_fd*/, 0);
108         if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
109                   errno))
110                 goto close_prog;
111
112         err = bpf_map_update_elem(perfmap_fd, &key, &pmu_fd, BPF_ANY);
113         if (CHECK(err < 0, "bpf_map_update_elem", "err %d errno %d\n", err,
114                   errno))
115                 goto close_prog;
116
117         err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
118         if (CHECK(err < 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n",
119                   err, errno))
120                 goto close_prog;
121
122         err = perf_event_mmap(pmu_fd);
123         if (CHECK(err < 0, "perf_event_mmap", "err %d errno %d\n", err, errno))
124                 goto close_prog;
125
126         /* trigger some syscall action */
127         for (i = 0; i < MAX_CNT_RAWTP; i++)
128                 nanosleep(&tv, NULL);
129
130         err = perf_event_poller(pmu_fd, get_stack_print_output);
131         if (CHECK(err < 0, "perf_event_poller", "err %d errno %d\n", err, errno))
132                 goto close_prog;
133
134         goto close_prog_noerr;
135 close_prog:
136         error_cnt++;
137 close_prog_noerr:
138         bpf_object__close(obj);
139 }