2 "reference tracking: leak potential reference",
4 BPF_SK_LOOKUP(sk_lookup_tcp),
5 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
8 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9 .errstr = "Unreleased reference",
13 "reference tracking: leak potential reference to sock_common",
15 BPF_SK_LOOKUP(skc_lookup_tcp),
16 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
19 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
20 .errstr = "Unreleased reference",
24 "reference tracking: leak potential reference on stack",
26 BPF_SK_LOOKUP(sk_lookup_tcp),
27 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
28 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
29 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
30 BPF_MOV64_IMM(BPF_REG_0, 0),
33 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
34 .errstr = "Unreleased reference",
38 "reference tracking: leak potential reference on stack 2",
40 BPF_SK_LOOKUP(sk_lookup_tcp),
41 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
42 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
43 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
44 BPF_MOV64_IMM(BPF_REG_0, 0),
45 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
48 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
49 .errstr = "Unreleased reference",
53 "reference tracking: zero potential reference",
55 BPF_SK_LOOKUP(sk_lookup_tcp),
56 BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
59 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
60 .errstr = "Unreleased reference",
64 "reference tracking: zero potential reference to sock_common",
66 BPF_SK_LOOKUP(skc_lookup_tcp),
67 BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
70 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
71 .errstr = "Unreleased reference",
75 "reference tracking: copy and zero potential references",
77 BPF_SK_LOOKUP(sk_lookup_tcp),
78 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
79 BPF_MOV64_IMM(BPF_REG_0, 0),
80 BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
83 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
84 .errstr = "Unreleased reference",
88 "reference tracking: release reference without check",
90 BPF_SK_LOOKUP(sk_lookup_tcp),
91 /* reference in r0 may be NULL */
92 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
93 BPF_MOV64_IMM(BPF_REG_2, 0),
94 BPF_EMIT_CALL(BPF_FUNC_sk_release),
97 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
98 .errstr = "type=sock_or_null expected=sock",
102 "reference tracking: release reference to sock_common without check",
104 BPF_SK_LOOKUP(skc_lookup_tcp),
105 /* reference in r0 may be NULL */
106 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
107 BPF_MOV64_IMM(BPF_REG_2, 0),
108 BPF_EMIT_CALL(BPF_FUNC_sk_release),
111 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
112 .errstr = "type=sock_common_or_null expected=sock",
116 "reference tracking: release reference",
118 BPF_SK_LOOKUP(sk_lookup_tcp),
119 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
120 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
121 BPF_EMIT_CALL(BPF_FUNC_sk_release),
124 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
128 "reference tracking: release reference to sock_common",
130 BPF_SK_LOOKUP(skc_lookup_tcp),
131 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
132 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
133 BPF_EMIT_CALL(BPF_FUNC_sk_release),
136 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
140 "reference tracking: release reference 2",
142 BPF_SK_LOOKUP(sk_lookup_tcp),
143 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
144 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
146 BPF_EMIT_CALL(BPF_FUNC_sk_release),
149 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
153 "reference tracking: release reference twice",
155 BPF_SK_LOOKUP(sk_lookup_tcp),
156 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
157 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
158 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
159 BPF_EMIT_CALL(BPF_FUNC_sk_release),
160 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
161 BPF_EMIT_CALL(BPF_FUNC_sk_release),
164 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
165 .errstr = "type=inv expected=sock",
169 "reference tracking: release reference twice inside branch",
171 BPF_SK_LOOKUP(sk_lookup_tcp),
172 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
173 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
174 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
175 BPF_EMIT_CALL(BPF_FUNC_sk_release),
176 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
177 BPF_EMIT_CALL(BPF_FUNC_sk_release),
180 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
181 .errstr = "type=inv expected=sock",
185 "reference tracking: alloc, check, free in one subbranch",
187 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
188 offsetof(struct __sk_buff, data)),
189 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
190 offsetof(struct __sk_buff, data_end)),
191 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
192 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
193 /* if (offsetof(skb, mark) > data_len) exit; */
194 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
196 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
197 offsetof(struct __sk_buff, mark)),
198 BPF_SK_LOOKUP(sk_lookup_tcp),
199 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
200 /* Leak reference in R0 */
202 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
203 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
204 BPF_EMIT_CALL(BPF_FUNC_sk_release),
207 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
208 .errstr = "Unreleased reference",
210 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
213 "reference tracking: alloc, check, free in both subbranches",
215 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
216 offsetof(struct __sk_buff, data)),
217 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
218 offsetof(struct __sk_buff, data_end)),
219 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
220 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
221 /* if (offsetof(skb, mark) > data_len) exit; */
222 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
224 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
225 offsetof(struct __sk_buff, mark)),
226 BPF_SK_LOOKUP(sk_lookup_tcp),
227 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
228 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
229 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
230 BPF_EMIT_CALL(BPF_FUNC_sk_release),
232 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
233 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
234 BPF_EMIT_CALL(BPF_FUNC_sk_release),
237 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
239 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
242 "reference tracking in call: free reference in subprog",
244 BPF_SK_LOOKUP(sk_lookup_tcp),
245 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
246 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
247 BPF_MOV64_IMM(BPF_REG_0, 0),
251 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
252 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
253 BPF_EMIT_CALL(BPF_FUNC_sk_release),
256 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
260 "reference tracking in call: free reference in subprog and outside",
262 BPF_SK_LOOKUP(sk_lookup_tcp),
263 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
264 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
265 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
266 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
267 BPF_EMIT_CALL(BPF_FUNC_sk_release),
271 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
272 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
273 BPF_EMIT_CALL(BPF_FUNC_sk_release),
276 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
277 .errstr = "type=inv expected=sock",
281 "reference tracking in call: alloc & leak reference in subprog",
283 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
284 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
285 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
286 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
287 BPF_MOV64_IMM(BPF_REG_0, 0),
291 BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
292 BPF_SK_LOOKUP(sk_lookup_tcp),
293 /* spill unchecked sk_ptr into stack of caller */
294 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
295 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
298 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
299 .errstr = "Unreleased reference",
303 "reference tracking in call: alloc in subprog, release outside",
305 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
306 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
307 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
308 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
309 BPF_EMIT_CALL(BPF_FUNC_sk_release),
313 BPF_SK_LOOKUP(sk_lookup_tcp),
314 BPF_EXIT_INSN(), /* return sk */
316 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
317 .retval = POINTER_VALUE,
321 "reference tracking in call: sk_ptr leak into caller stack",
323 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
324 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
325 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
326 BPF_MOV64_IMM(BPF_REG_0, 0),
330 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
331 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
332 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
333 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
334 /* spill unchecked sk_ptr into stack of caller */
335 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
336 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
337 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
338 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
342 BPF_SK_LOOKUP(sk_lookup_tcp),
345 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
346 .errstr = "Unreleased reference",
350 "reference tracking in call: sk_ptr spill into caller stack",
352 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
353 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
354 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
355 BPF_MOV64_IMM(BPF_REG_0, 0),
359 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
360 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
361 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
362 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
363 /* spill unchecked sk_ptr into stack of caller */
364 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
365 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
366 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
367 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
368 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
369 /* now the sk_ptr is verified, free the reference */
370 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
371 BPF_EMIT_CALL(BPF_FUNC_sk_release),
375 BPF_SK_LOOKUP(sk_lookup_tcp),
378 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
382 "reference tracking: allow LD_ABS",
384 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
385 BPF_SK_LOOKUP(sk_lookup_tcp),
386 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
387 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
388 BPF_EMIT_CALL(BPF_FUNC_sk_release),
389 BPF_LD_ABS(BPF_B, 0),
390 BPF_LD_ABS(BPF_H, 0),
391 BPF_LD_ABS(BPF_W, 0),
394 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
398 "reference tracking: forbid LD_ABS while holding reference",
400 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
401 BPF_SK_LOOKUP(sk_lookup_tcp),
402 BPF_LD_ABS(BPF_B, 0),
403 BPF_LD_ABS(BPF_H, 0),
404 BPF_LD_ABS(BPF_W, 0),
405 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
406 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
407 BPF_EMIT_CALL(BPF_FUNC_sk_release),
410 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
411 .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
415 "reference tracking: allow LD_IND",
417 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
418 BPF_SK_LOOKUP(sk_lookup_tcp),
419 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
420 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
421 BPF_EMIT_CALL(BPF_FUNC_sk_release),
422 BPF_MOV64_IMM(BPF_REG_7, 1),
423 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
424 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
427 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
432 "reference tracking: forbid LD_IND while holding reference",
434 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
435 BPF_SK_LOOKUP(sk_lookup_tcp),
436 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
437 BPF_MOV64_IMM(BPF_REG_7, 1),
438 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
439 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
440 BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
441 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
442 BPF_EMIT_CALL(BPF_FUNC_sk_release),
445 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
446 .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
450 "reference tracking: check reference or tail call",
452 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
453 BPF_SK_LOOKUP(sk_lookup_tcp),
454 /* if (sk) bpf_sk_release() */
455 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
456 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
457 /* bpf_tail_call() */
458 BPF_MOV64_IMM(BPF_REG_3, 2),
459 BPF_LD_MAP_FD(BPF_REG_2, 0),
460 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
461 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
462 BPF_MOV64_IMM(BPF_REG_0, 0),
464 BPF_EMIT_CALL(BPF_FUNC_sk_release),
467 .fixup_prog1 = { 17 },
468 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
472 "reference tracking: release reference then tail call",
474 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
475 BPF_SK_LOOKUP(sk_lookup_tcp),
476 /* if (sk) bpf_sk_release() */
477 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
478 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
479 BPF_EMIT_CALL(BPF_FUNC_sk_release),
480 /* bpf_tail_call() */
481 BPF_MOV64_IMM(BPF_REG_3, 2),
482 BPF_LD_MAP_FD(BPF_REG_2, 0),
483 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
484 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
485 BPF_MOV64_IMM(BPF_REG_0, 0),
488 .fixup_prog1 = { 18 },
489 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
493 "reference tracking: leak possible reference over tail call",
495 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
496 /* Look up socket and store in REG_6 */
497 BPF_SK_LOOKUP(sk_lookup_tcp),
498 /* bpf_tail_call() */
499 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
500 BPF_MOV64_IMM(BPF_REG_3, 2),
501 BPF_LD_MAP_FD(BPF_REG_2, 0),
502 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
503 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
504 BPF_MOV64_IMM(BPF_REG_0, 0),
505 /* if (sk) bpf_sk_release() */
506 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
507 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
508 BPF_EMIT_CALL(BPF_FUNC_sk_release),
511 .fixup_prog1 = { 16 },
512 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
513 .errstr = "tail_call would lead to reference leak",
517 "reference tracking: leak checked reference over tail call",
519 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
520 /* Look up socket and store in REG_6 */
521 BPF_SK_LOOKUP(sk_lookup_tcp),
522 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
523 /* if (!sk) goto end */
524 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
525 /* bpf_tail_call() */
526 BPF_MOV64_IMM(BPF_REG_3, 0),
527 BPF_LD_MAP_FD(BPF_REG_2, 0),
528 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
529 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
530 BPF_MOV64_IMM(BPF_REG_0, 0),
531 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
532 BPF_EMIT_CALL(BPF_FUNC_sk_release),
535 .fixup_prog1 = { 17 },
536 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
537 .errstr = "tail_call would lead to reference leak",
541 "reference tracking: mangle and release sock_or_null",
543 BPF_SK_LOOKUP(sk_lookup_tcp),
544 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
545 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
546 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
547 BPF_EMIT_CALL(BPF_FUNC_sk_release),
550 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
551 .errstr = "R1 pointer arithmetic on sock_or_null prohibited",
555 "reference tracking: mangle and release sock",
557 BPF_SK_LOOKUP(sk_lookup_tcp),
558 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
559 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
560 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
561 BPF_EMIT_CALL(BPF_FUNC_sk_release),
564 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
565 .errstr = "R1 pointer arithmetic on sock prohibited",
569 "reference tracking: access member",
571 BPF_SK_LOOKUP(sk_lookup_tcp),
572 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
573 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
574 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
575 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
576 BPF_EMIT_CALL(BPF_FUNC_sk_release),
579 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
583 "reference tracking: write to member",
585 BPF_SK_LOOKUP(sk_lookup_tcp),
586 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
587 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
588 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
589 BPF_LD_IMM64(BPF_REG_2, 42),
590 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
591 offsetof(struct bpf_sock, mark)),
592 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
593 BPF_EMIT_CALL(BPF_FUNC_sk_release),
594 BPF_LD_IMM64(BPF_REG_0, 0),
597 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
598 .errstr = "cannot write into sock",
602 "reference tracking: invalid 64-bit access of member",
604 BPF_SK_LOOKUP(sk_lookup_tcp),
605 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
606 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
607 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
608 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
609 BPF_EMIT_CALL(BPF_FUNC_sk_release),
612 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
613 .errstr = "invalid sock access off=0 size=8",
617 "reference tracking: access after release",
619 BPF_SK_LOOKUP(sk_lookup_tcp),
620 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
621 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
622 BPF_EMIT_CALL(BPF_FUNC_sk_release),
623 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
626 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
627 .errstr = "!read_ok",
631 "reference tracking: direct access for lookup",
633 /* Check that the packet is at least 64B long */
634 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
635 offsetof(struct __sk_buff, data)),
636 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
637 offsetof(struct __sk_buff, data_end)),
638 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
639 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
640 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
641 /* sk = sk_lookup_tcp(ctx, skb->data, ...) */
642 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
643 BPF_MOV64_IMM(BPF_REG_4, 0),
644 BPF_MOV64_IMM(BPF_REG_5, 0),
645 BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
646 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
647 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
648 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
649 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
650 BPF_EMIT_CALL(BPF_FUNC_sk_release),
653 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
657 "reference tracking: use ptr from bpf_tcp_sock() after release",
659 BPF_SK_LOOKUP(sk_lookup_tcp),
660 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
662 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
663 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
664 BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
665 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
666 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
667 BPF_EMIT_CALL(BPF_FUNC_sk_release),
669 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
670 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
671 BPF_EMIT_CALL(BPF_FUNC_sk_release),
672 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_tcp_sock, snd_cwnd)),
675 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
677 .errstr = "invalid mem access",
680 "reference tracking: use ptr from bpf_sk_fullsock() after release",
682 BPF_SK_LOOKUP(sk_lookup_tcp),
683 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
685 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
686 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
687 BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
688 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
689 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
690 BPF_EMIT_CALL(BPF_FUNC_sk_release),
692 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
693 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
694 BPF_EMIT_CALL(BPF_FUNC_sk_release),
695 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)),
698 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
700 .errstr = "invalid mem access",
703 "reference tracking: use ptr from bpf_sk_fullsock(tp) after release",
705 BPF_SK_LOOKUP(sk_lookup_tcp),
706 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
708 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
709 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
710 BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
711 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
712 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
713 BPF_EMIT_CALL(BPF_FUNC_sk_release),
715 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
716 BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
717 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
718 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
719 BPF_EMIT_CALL(BPF_FUNC_sk_release),
720 BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 1),
722 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
725 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
727 .errstr = "invalid mem access",
730 "reference tracking: use sk after bpf_sk_release(tp)",
732 BPF_SK_LOOKUP(sk_lookup_tcp),
733 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
735 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
736 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
737 BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
738 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
739 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
740 BPF_EMIT_CALL(BPF_FUNC_sk_release),
742 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
743 BPF_EMIT_CALL(BPF_FUNC_sk_release),
744 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
747 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
749 .errstr = "invalid mem access",
752 "reference tracking: use ptr from bpf_get_listener_sock() after bpf_sk_release(sk)",
754 BPF_SK_LOOKUP(sk_lookup_tcp),
755 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
757 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
758 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
759 BPF_EMIT_CALL(BPF_FUNC_get_listener_sock),
760 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
761 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
762 BPF_EMIT_CALL(BPF_FUNC_sk_release),
764 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
765 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
766 BPF_EMIT_CALL(BPF_FUNC_sk_release),
767 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, src_port)),
770 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
774 "reference tracking: bpf_sk_release(listen_sk)",
776 BPF_SK_LOOKUP(sk_lookup_tcp),
777 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
779 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
780 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
781 BPF_EMIT_CALL(BPF_FUNC_get_listener_sock),
782 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
783 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
784 BPF_EMIT_CALL(BPF_FUNC_sk_release),
786 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
787 BPF_EMIT_CALL(BPF_FUNC_sk_release),
788 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
789 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
790 BPF_EMIT_CALL(BPF_FUNC_sk_release),
793 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
795 .errstr = "reference has not been acquired before",
798 /* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */
799 "reference tracking: tp->snd_cwnd after bpf_sk_fullsock(sk) and bpf_tcp_sock(sk)",
801 BPF_SK_LOOKUP(sk_lookup_tcp),
802 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
804 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
805 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
806 BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
807 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
808 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
809 BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
810 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
811 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 3),
812 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
813 BPF_EMIT_CALL(BPF_FUNC_sk_release),
815 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_8, offsetof(struct bpf_tcp_sock, snd_cwnd)),
816 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
817 BPF_EMIT_CALL(BPF_FUNC_sk_release),
820 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
822 .errstr = "invalid mem access",