2 * Testsuite for eBPF verifier
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
12 #include <asm/types.h>
13 #include <linux/types.h>
24 #include <sys/capability.h>
25 #include <sys/resource.h>
27 #include <linux/unistd.h>
28 #include <linux/filter.h>
29 #include <linux/bpf_perf_event.h>
30 #include <linux/bpf.h>
35 # include "autoconf.h"
37 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
38 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
42 #include "../../../include/linux/filter.h"
45 # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
52 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
53 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
57 struct bpf_insn insns[MAX_INSNS];
58 int fixup_map1[MAX_FIXUPS];
59 int fixup_map2[MAX_FIXUPS];
60 int fixup_prog[MAX_FIXUPS];
61 int fixup_map_in_map[MAX_FIXUPS];
63 const char *errstr_unpriv;
68 } result, result_unpriv;
69 enum bpf_prog_type prog_type;
73 /* Note we want this to be 64 bit aligned so that the end of our array is
74 * actually the end of the structure.
76 #define MAX_ENTRIES 11
83 static struct bpf_test tests[] = {
87 BPF_MOV64_IMM(BPF_REG_1, 1),
88 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
89 BPF_MOV64_IMM(BPF_REG_2, 3),
90 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
91 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
92 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
93 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
104 .errstr = "unreachable",
110 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
111 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
114 .errstr = "unreachable",
120 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
123 .errstr = "jump out of range",
127 "out of range jump2",
129 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
132 .errstr = "jump out of range",
138 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
139 BPF_LD_IMM64(BPF_REG_0, 0),
140 BPF_LD_IMM64(BPF_REG_0, 0),
141 BPF_LD_IMM64(BPF_REG_0, 1),
142 BPF_LD_IMM64(BPF_REG_0, 1),
143 BPF_MOV64_IMM(BPF_REG_0, 2),
146 .errstr = "invalid BPF_LD_IMM insn",
147 .errstr_unpriv = "R1 pointer comparison",
153 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
154 BPF_LD_IMM64(BPF_REG_0, 0),
155 BPF_LD_IMM64(BPF_REG_0, 0),
156 BPF_LD_IMM64(BPF_REG_0, 1),
157 BPF_LD_IMM64(BPF_REG_0, 1),
160 .errstr = "invalid BPF_LD_IMM insn",
161 .errstr_unpriv = "R1 pointer comparison",
167 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
168 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
169 BPF_LD_IMM64(BPF_REG_0, 0),
170 BPF_LD_IMM64(BPF_REG_0, 0),
171 BPF_LD_IMM64(BPF_REG_0, 1),
172 BPF_LD_IMM64(BPF_REG_0, 1),
175 .errstr = "invalid bpf_ld_imm64 insn",
181 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
184 .errstr = "invalid bpf_ld_imm64 insn",
190 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
192 .errstr = "invalid bpf_ld_imm64 insn",
198 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
199 BPF_RAW_INSN(0, 0, 0, 0, 0),
207 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
208 BPF_RAW_INSN(0, 0, 0, 0, 1),
216 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
217 BPF_RAW_INSN(0, 0, 0, 0, 1),
220 .errstr = "uses reserved fields",
226 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
227 BPF_RAW_INSN(0, 0, 0, 1, 1),
230 .errstr = "invalid bpf_ld_imm64 insn",
236 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
237 BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
240 .errstr = "invalid bpf_ld_imm64 insn",
246 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
247 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
250 .errstr = "invalid bpf_ld_imm64 insn",
256 BPF_MOV64_IMM(BPF_REG_1, 0),
257 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
258 BPF_RAW_INSN(0, 0, 0, 0, 1),
261 .errstr = "not pointing to valid bpf_map",
267 BPF_MOV64_IMM(BPF_REG_1, 0),
268 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
269 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
272 .errstr = "invalid bpf_ld_imm64 insn",
278 BPF_MOV64_IMM(BPF_REG_0, 1),
279 BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
283 .errstr = "BPF_ARSH not supported for 32 bit ALU",
288 BPF_MOV64_IMM(BPF_REG_0, 1),
289 BPF_MOV64_IMM(BPF_REG_1, 5),
290 BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
294 .errstr = "BPF_ARSH not supported for 32 bit ALU",
299 BPF_MOV64_IMM(BPF_REG_0, 1),
300 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
308 BPF_MOV64_IMM(BPF_REG_0, 1),
309 BPF_MOV64_IMM(BPF_REG_1, 5),
310 BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
318 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
320 .errstr = "jump out of range",
326 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
329 .errstr = "back-edge",
335 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
336 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
337 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
338 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
341 .errstr = "back-edge",
347 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
348 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
349 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
350 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
353 .errstr = "back-edge",
357 "read uninitialized register",
359 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
362 .errstr = "R2 !read_ok",
366 "read invalid register",
368 BPF_MOV64_REG(BPF_REG_0, -1),
371 .errstr = "R15 is invalid",
375 "program doesn't init R0 before exit",
377 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
380 .errstr = "R0 !read_ok",
384 "program doesn't init R0 before exit in all branches",
386 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
387 BPF_MOV64_IMM(BPF_REG_0, 1),
388 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
391 .errstr = "R0 !read_ok",
392 .errstr_unpriv = "R1 pointer comparison",
396 "stack out of bounds",
398 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
401 .errstr = "invalid stack",
405 "invalid call insn1",
407 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
410 .errstr = "BPF_CALL uses reserved",
414 "invalid call insn2",
416 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
419 .errstr = "BPF_CALL uses reserved",
423 "invalid function call",
425 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
428 .errstr = "invalid func unknown#1234567",
432 "uninitialized stack1",
434 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
435 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
436 BPF_LD_MAP_FD(BPF_REG_1, 0),
437 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
438 BPF_FUNC_map_lookup_elem),
442 .errstr = "invalid indirect read from stack",
446 "uninitialized stack2",
448 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
449 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
452 .errstr = "invalid read from stack",
456 "invalid fp arithmetic",
457 /* If this gets ever changed, make sure JITs can deal with it. */
459 BPF_MOV64_IMM(BPF_REG_0, 0),
460 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
461 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
462 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
465 .errstr_unpriv = "R1 subtraction from stack pointer",
466 .result_unpriv = REJECT,
467 .errstr = "R1 invalid mem access",
471 "non-invalid fp arithmetic",
473 BPF_MOV64_IMM(BPF_REG_0, 0),
474 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
480 "invalid argument register",
482 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
483 BPF_FUNC_get_cgroup_classid),
484 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
485 BPF_FUNC_get_cgroup_classid),
488 .errstr = "R1 !read_ok",
490 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
493 "non-invalid argument register",
495 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
496 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
497 BPF_FUNC_get_cgroup_classid),
498 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
499 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
500 BPF_FUNC_get_cgroup_classid),
504 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
507 "check valid spill/fill",
509 /* spill R1(ctx) into stack */
510 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
511 /* fill it back into R2 */
512 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
513 /* should be able to access R0 = *(R2 + 8) */
514 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
515 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
518 .errstr_unpriv = "R0 leaks addr",
520 .result_unpriv = REJECT,
523 "check valid spill/fill, skb mark",
525 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
526 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
527 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
528 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
529 offsetof(struct __sk_buff, mark)),
533 .result_unpriv = ACCEPT,
536 "check corrupted spill/fill",
538 /* spill R1(ctx) into stack */
539 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
540 /* mess up with R1 pointer on stack */
541 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
542 /* fill back into R0 should fail */
543 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
546 .errstr_unpriv = "attempt to corrupt spilled",
547 .errstr = "corrupted spill",
551 "invalid src register in STX",
553 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
556 .errstr = "R15 is invalid",
560 "invalid dst register in STX",
562 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
565 .errstr = "R14 is invalid",
569 "invalid dst register in ST",
571 BPF_ST_MEM(BPF_B, 14, -1, -1),
574 .errstr = "R14 is invalid",
578 "invalid src register in LDX",
580 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
583 .errstr = "R12 is invalid",
587 "invalid dst register in LDX",
589 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
592 .errstr = "R11 is invalid",
598 BPF_RAW_INSN(0, 0, 0, 0, 0),
601 .errstr = "invalid BPF_LD_IMM",
607 BPF_RAW_INSN(1, 0, 0, 0, 0),
610 .errstr = "BPF_LDX uses reserved fields",
616 BPF_RAW_INSN(-1, 0, 0, 0, 0),
619 .errstr = "invalid BPF_ALU opcode f0",
625 BPF_RAW_INSN(-1, -1, -1, -1, -1),
628 .errstr = "invalid BPF_ALU opcode f0",
634 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
637 .errstr = "BPF_ALU uses reserved fields",
641 "misaligned read from stack",
643 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
644 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
647 .errstr = "misaligned stack access",
651 "invalid map_fd for function call",
653 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
654 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
655 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
656 BPF_LD_MAP_FD(BPF_REG_1, 0),
657 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
658 BPF_FUNC_map_delete_elem),
661 .errstr = "fd 0 is not pointing to valid bpf_map",
665 "don't check return value before access",
667 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
668 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
669 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
670 BPF_LD_MAP_FD(BPF_REG_1, 0),
671 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
672 BPF_FUNC_map_lookup_elem),
673 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
677 .errstr = "R0 invalid mem access 'map_value_or_null'",
681 "access memory with incorrect alignment",
683 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
684 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
685 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
686 BPF_LD_MAP_FD(BPF_REG_1, 0),
687 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
688 BPF_FUNC_map_lookup_elem),
689 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
690 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
694 .errstr = "misaligned value access",
696 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
699 "sometimes access memory with incorrect alignment",
701 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
702 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
703 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
704 BPF_LD_MAP_FD(BPF_REG_1, 0),
705 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
706 BPF_FUNC_map_lookup_elem),
707 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
708 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
710 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
714 .errstr = "R0 invalid mem access",
715 .errstr_unpriv = "R0 leaks addr",
717 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
722 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
723 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
724 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
725 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
726 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
727 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
728 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
729 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
730 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
731 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
732 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
733 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
734 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
735 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
736 BPF_MOV64_IMM(BPF_REG_0, 0),
739 .errstr_unpriv = "R1 pointer comparison",
740 .result_unpriv = REJECT,
746 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
747 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
748 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
749 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
750 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
751 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
752 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
753 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
754 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
755 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
756 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
757 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
758 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
759 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
760 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
761 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
762 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
763 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
764 BPF_MOV64_IMM(BPF_REG_0, 0),
767 .errstr_unpriv = "R1 pointer comparison",
768 .result_unpriv = REJECT,
774 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
775 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
776 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
777 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
778 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
779 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
780 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
781 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
782 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
783 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
784 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
785 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
786 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
787 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
788 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
789 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
790 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
791 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
792 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
793 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
794 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
795 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
796 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
797 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
798 BPF_LD_MAP_FD(BPF_REG_1, 0),
799 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
800 BPF_FUNC_map_delete_elem),
803 .fixup_map1 = { 24 },
804 .errstr_unpriv = "R1 pointer comparison",
805 .result_unpriv = REJECT,
811 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
812 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
813 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
814 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
815 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
816 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
817 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
818 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
819 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
820 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
821 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
822 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
823 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
824 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
825 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
826 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
827 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
828 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
829 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
830 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
831 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
832 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
833 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
834 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
835 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
836 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
837 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
838 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
839 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
840 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
841 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
842 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
843 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
844 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
845 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
846 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
847 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
848 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
849 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
850 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
851 BPF_MOV64_IMM(BPF_REG_0, 0),
854 .errstr_unpriv = "R1 pointer comparison",
855 .result_unpriv = REJECT,
861 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
862 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
863 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
864 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
865 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
866 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
867 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
868 BPF_MOV64_IMM(BPF_REG_0, 0),
869 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
870 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
871 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
872 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
873 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
874 BPF_MOV64_IMM(BPF_REG_0, 0),
875 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
876 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
877 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
878 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
879 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
880 BPF_MOV64_IMM(BPF_REG_0, 0),
881 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
882 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
883 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
884 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
885 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
886 BPF_MOV64_IMM(BPF_REG_0, 0),
887 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
888 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
889 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
890 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
891 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
892 BPF_MOV64_IMM(BPF_REG_0, 0),
895 .errstr_unpriv = "R1 pointer comparison",
896 .result_unpriv = REJECT,
900 "access skb fields ok",
902 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
903 offsetof(struct __sk_buff, len)),
904 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
905 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
906 offsetof(struct __sk_buff, mark)),
907 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
908 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
909 offsetof(struct __sk_buff, pkt_type)),
910 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
911 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
912 offsetof(struct __sk_buff, queue_mapping)),
913 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
914 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
915 offsetof(struct __sk_buff, protocol)),
916 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
917 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
918 offsetof(struct __sk_buff, vlan_present)),
919 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
920 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
921 offsetof(struct __sk_buff, vlan_tci)),
922 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
923 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
924 offsetof(struct __sk_buff, napi_id)),
925 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
931 "access skb fields bad1",
933 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
936 .errstr = "invalid bpf_context access",
940 "access skb fields bad2",
942 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
943 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
944 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
945 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
946 BPF_LD_MAP_FD(BPF_REG_1, 0),
947 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
948 BPF_FUNC_map_lookup_elem),
949 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
951 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
952 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
953 offsetof(struct __sk_buff, pkt_type)),
957 .errstr = "different pointers",
958 .errstr_unpriv = "R1 pointer comparison",
962 "access skb fields bad3",
964 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
965 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
966 offsetof(struct __sk_buff, pkt_type)),
968 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
969 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
970 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
971 BPF_LD_MAP_FD(BPF_REG_1, 0),
972 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
973 BPF_FUNC_map_lookup_elem),
974 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
976 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
977 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
980 .errstr = "different pointers",
981 .errstr_unpriv = "R1 pointer comparison",
985 "access skb fields bad4",
987 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
988 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
989 offsetof(struct __sk_buff, len)),
990 BPF_MOV64_IMM(BPF_REG_0, 0),
992 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
993 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
994 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
995 BPF_LD_MAP_FD(BPF_REG_1, 0),
996 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
997 BPF_FUNC_map_lookup_elem),
998 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1000 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1001 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
1003 .fixup_map1 = { 7 },
1004 .errstr = "different pointers",
1005 .errstr_unpriv = "R1 pointer comparison",
1009 "invalid access __sk_buff family",
1011 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1012 offsetof(struct __sk_buff, family)),
1015 .errstr = "invalid bpf_context access",
1019 "invalid access __sk_buff remote_ip4",
1021 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1022 offsetof(struct __sk_buff, remote_ip4)),
1025 .errstr = "invalid bpf_context access",
1029 "invalid access __sk_buff local_ip4",
1031 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1032 offsetof(struct __sk_buff, local_ip4)),
1035 .errstr = "invalid bpf_context access",
1039 "invalid access __sk_buff remote_ip6",
1041 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1042 offsetof(struct __sk_buff, remote_ip6)),
1045 .errstr = "invalid bpf_context access",
1049 "invalid access __sk_buff local_ip6",
1051 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1052 offsetof(struct __sk_buff, local_ip6)),
1055 .errstr = "invalid bpf_context access",
1059 "invalid access __sk_buff remote_port",
1061 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1062 offsetof(struct __sk_buff, remote_port)),
1065 .errstr = "invalid bpf_context access",
1069 "invalid access __sk_buff remote_port",
1071 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1072 offsetof(struct __sk_buff, local_port)),
1075 .errstr = "invalid bpf_context access",
1079 "valid access __sk_buff family",
1081 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1082 offsetof(struct __sk_buff, family)),
1086 .prog_type = BPF_PROG_TYPE_SK_SKB,
1089 "valid access __sk_buff remote_ip4",
1091 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1092 offsetof(struct __sk_buff, remote_ip4)),
1096 .prog_type = BPF_PROG_TYPE_SK_SKB,
1099 "valid access __sk_buff local_ip4",
1101 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1102 offsetof(struct __sk_buff, local_ip4)),
1106 .prog_type = BPF_PROG_TYPE_SK_SKB,
1109 "valid access __sk_buff remote_ip6",
1111 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1112 offsetof(struct __sk_buff, remote_ip6[0])),
1113 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1114 offsetof(struct __sk_buff, remote_ip6[1])),
1115 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1116 offsetof(struct __sk_buff, remote_ip6[2])),
1117 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1118 offsetof(struct __sk_buff, remote_ip6[3])),
1122 .prog_type = BPF_PROG_TYPE_SK_SKB,
1125 "valid access __sk_buff local_ip6",
1127 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1128 offsetof(struct __sk_buff, local_ip6[0])),
1129 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1130 offsetof(struct __sk_buff, local_ip6[1])),
1131 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1132 offsetof(struct __sk_buff, local_ip6[2])),
1133 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1134 offsetof(struct __sk_buff, local_ip6[3])),
1138 .prog_type = BPF_PROG_TYPE_SK_SKB,
1141 "valid access __sk_buff remote_port",
1143 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1144 offsetof(struct __sk_buff, remote_port)),
1148 .prog_type = BPF_PROG_TYPE_SK_SKB,
1151 "valid access __sk_buff remote_port",
1153 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1154 offsetof(struct __sk_buff, local_port)),
1158 .prog_type = BPF_PROG_TYPE_SK_SKB,
1161 "invalid access of tc_classid for SK_SKB",
1163 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1164 offsetof(struct __sk_buff, tc_classid)),
1168 .prog_type = BPF_PROG_TYPE_SK_SKB,
1169 .errstr = "invalid bpf_context access",
1172 "invalid access of skb->mark for SK_SKB",
1174 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1175 offsetof(struct __sk_buff, mark)),
1179 .prog_type = BPF_PROG_TYPE_SK_SKB,
1180 .errstr = "invalid bpf_context access",
1183 "check skb->mark is not writeable by SK_SKB",
1185 BPF_MOV64_IMM(BPF_REG_0, 0),
1186 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1187 offsetof(struct __sk_buff, mark)),
1191 .prog_type = BPF_PROG_TYPE_SK_SKB,
1192 .errstr = "invalid bpf_context access",
1195 "check skb->tc_index is writeable by SK_SKB",
1197 BPF_MOV64_IMM(BPF_REG_0, 0),
1198 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1199 offsetof(struct __sk_buff, tc_index)),
1203 .prog_type = BPF_PROG_TYPE_SK_SKB,
1206 "check skb->priority is writeable by SK_SKB",
1208 BPF_MOV64_IMM(BPF_REG_0, 0),
1209 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1210 offsetof(struct __sk_buff, priority)),
1214 .prog_type = BPF_PROG_TYPE_SK_SKB,
1217 "direct packet read for SK_SKB",
1219 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1220 offsetof(struct __sk_buff, data)),
1221 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1222 offsetof(struct __sk_buff, data_end)),
1223 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1224 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1225 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1226 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1227 BPF_MOV64_IMM(BPF_REG_0, 0),
1231 .prog_type = BPF_PROG_TYPE_SK_SKB,
1234 "direct packet write for SK_SKB",
1236 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1237 offsetof(struct __sk_buff, data)),
1238 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1239 offsetof(struct __sk_buff, data_end)),
1240 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1241 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1242 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1243 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1244 BPF_MOV64_IMM(BPF_REG_0, 0),
1248 .prog_type = BPF_PROG_TYPE_SK_SKB,
1251 "overlapping checks for direct packet access SK_SKB",
1253 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1254 offsetof(struct __sk_buff, data)),
1255 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1256 offsetof(struct __sk_buff, data_end)),
1257 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1258 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1259 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1260 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1261 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1262 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1263 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1264 BPF_MOV64_IMM(BPF_REG_0, 0),
1268 .prog_type = BPF_PROG_TYPE_SK_SKB,
1271 "check skb->mark is not writeable by sockets",
1273 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1274 offsetof(struct __sk_buff, mark)),
1277 .errstr = "invalid bpf_context access",
1278 .errstr_unpriv = "R1 leaks addr",
1282 "check skb->tc_index is not writeable by sockets",
1284 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1285 offsetof(struct __sk_buff, tc_index)),
1288 .errstr = "invalid bpf_context access",
1289 .errstr_unpriv = "R1 leaks addr",
1293 "check cb access: byte",
1295 BPF_MOV64_IMM(BPF_REG_0, 0),
1296 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1297 offsetof(struct __sk_buff, cb[0])),
1298 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1299 offsetof(struct __sk_buff, cb[0]) + 1),
1300 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1301 offsetof(struct __sk_buff, cb[0]) + 2),
1302 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1303 offsetof(struct __sk_buff, cb[0]) + 3),
1304 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1305 offsetof(struct __sk_buff, cb[1])),
1306 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1307 offsetof(struct __sk_buff, cb[1]) + 1),
1308 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1309 offsetof(struct __sk_buff, cb[1]) + 2),
1310 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1311 offsetof(struct __sk_buff, cb[1]) + 3),
1312 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1313 offsetof(struct __sk_buff, cb[2])),
1314 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1315 offsetof(struct __sk_buff, cb[2]) + 1),
1316 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1317 offsetof(struct __sk_buff, cb[2]) + 2),
1318 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1319 offsetof(struct __sk_buff, cb[2]) + 3),
1320 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1321 offsetof(struct __sk_buff, cb[3])),
1322 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1323 offsetof(struct __sk_buff, cb[3]) + 1),
1324 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1325 offsetof(struct __sk_buff, cb[3]) + 2),
1326 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1327 offsetof(struct __sk_buff, cb[3]) + 3),
1328 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1329 offsetof(struct __sk_buff, cb[4])),
1330 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1331 offsetof(struct __sk_buff, cb[4]) + 1),
1332 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1333 offsetof(struct __sk_buff, cb[4]) + 2),
1334 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1335 offsetof(struct __sk_buff, cb[4]) + 3),
1336 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1337 offsetof(struct __sk_buff, cb[0])),
1338 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1339 offsetof(struct __sk_buff, cb[0]) + 1),
1340 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1341 offsetof(struct __sk_buff, cb[0]) + 2),
1342 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1343 offsetof(struct __sk_buff, cb[0]) + 3),
1344 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1345 offsetof(struct __sk_buff, cb[1])),
1346 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1347 offsetof(struct __sk_buff, cb[1]) + 1),
1348 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1349 offsetof(struct __sk_buff, cb[1]) + 2),
1350 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1351 offsetof(struct __sk_buff, cb[1]) + 3),
1352 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1353 offsetof(struct __sk_buff, cb[2])),
1354 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1355 offsetof(struct __sk_buff, cb[2]) + 1),
1356 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1357 offsetof(struct __sk_buff, cb[2]) + 2),
1358 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1359 offsetof(struct __sk_buff, cb[2]) + 3),
1360 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1361 offsetof(struct __sk_buff, cb[3])),
1362 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1363 offsetof(struct __sk_buff, cb[3]) + 1),
1364 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1365 offsetof(struct __sk_buff, cb[3]) + 2),
1366 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1367 offsetof(struct __sk_buff, cb[3]) + 3),
1368 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1369 offsetof(struct __sk_buff, cb[4])),
1370 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1371 offsetof(struct __sk_buff, cb[4]) + 1),
1372 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1373 offsetof(struct __sk_buff, cb[4]) + 2),
1374 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1375 offsetof(struct __sk_buff, cb[4]) + 3),
1381 "__sk_buff->hash, offset 0, byte store not permitted",
1383 BPF_MOV64_IMM(BPF_REG_0, 0),
1384 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1385 offsetof(struct __sk_buff, hash)),
1388 .errstr = "invalid bpf_context access",
1392 "__sk_buff->tc_index, offset 3, byte store not permitted",
1394 BPF_MOV64_IMM(BPF_REG_0, 0),
1395 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1396 offsetof(struct __sk_buff, tc_index) + 3),
1399 .errstr = "invalid bpf_context access",
1403 "check skb->hash byte load permitted",
1405 BPF_MOV64_IMM(BPF_REG_0, 0),
1406 #if __BYTE_ORDER == __LITTLE_ENDIAN
1407 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1408 offsetof(struct __sk_buff, hash)),
1410 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1411 offsetof(struct __sk_buff, hash) + 3),
1418 "check skb->hash byte load not permitted 1",
1420 BPF_MOV64_IMM(BPF_REG_0, 0),
1421 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1422 offsetof(struct __sk_buff, hash) + 1),
1425 .errstr = "invalid bpf_context access",
1429 "check skb->hash byte load not permitted 2",
1431 BPF_MOV64_IMM(BPF_REG_0, 0),
1432 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1433 offsetof(struct __sk_buff, hash) + 2),
1436 .errstr = "invalid bpf_context access",
1440 "check skb->hash byte load not permitted 3",
1442 BPF_MOV64_IMM(BPF_REG_0, 0),
1443 #if __BYTE_ORDER == __LITTLE_ENDIAN
1444 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1445 offsetof(struct __sk_buff, hash) + 3),
1447 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1448 offsetof(struct __sk_buff, hash)),
1452 .errstr = "invalid bpf_context access",
1456 "check cb access: byte, wrong type",
1458 BPF_MOV64_IMM(BPF_REG_0, 0),
1459 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1460 offsetof(struct __sk_buff, cb[0])),
1463 .errstr = "invalid bpf_context access",
1465 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1468 "check cb access: half",
1470 BPF_MOV64_IMM(BPF_REG_0, 0),
1471 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1472 offsetof(struct __sk_buff, cb[0])),
1473 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1474 offsetof(struct __sk_buff, cb[0]) + 2),
1475 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1476 offsetof(struct __sk_buff, cb[1])),
1477 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1478 offsetof(struct __sk_buff, cb[1]) + 2),
1479 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1480 offsetof(struct __sk_buff, cb[2])),
1481 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1482 offsetof(struct __sk_buff, cb[2]) + 2),
1483 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1484 offsetof(struct __sk_buff, cb[3])),
1485 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1486 offsetof(struct __sk_buff, cb[3]) + 2),
1487 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1488 offsetof(struct __sk_buff, cb[4])),
1489 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1490 offsetof(struct __sk_buff, cb[4]) + 2),
1491 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1492 offsetof(struct __sk_buff, cb[0])),
1493 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1494 offsetof(struct __sk_buff, cb[0]) + 2),
1495 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1496 offsetof(struct __sk_buff, cb[1])),
1497 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1498 offsetof(struct __sk_buff, cb[1]) + 2),
1499 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1500 offsetof(struct __sk_buff, cb[2])),
1501 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1502 offsetof(struct __sk_buff, cb[2]) + 2),
1503 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1504 offsetof(struct __sk_buff, cb[3])),
1505 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1506 offsetof(struct __sk_buff, cb[3]) + 2),
1507 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1508 offsetof(struct __sk_buff, cb[4])),
1509 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1510 offsetof(struct __sk_buff, cb[4]) + 2),
1516 "check cb access: half, unaligned",
1518 BPF_MOV64_IMM(BPF_REG_0, 0),
1519 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1520 offsetof(struct __sk_buff, cb[0]) + 1),
1523 .errstr = "misaligned context access",
1525 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1528 "check __sk_buff->hash, offset 0, half store not permitted",
1530 BPF_MOV64_IMM(BPF_REG_0, 0),
1531 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1532 offsetof(struct __sk_buff, hash)),
1535 .errstr = "invalid bpf_context access",
1539 "check __sk_buff->tc_index, offset 2, half store not permitted",
1541 BPF_MOV64_IMM(BPF_REG_0, 0),
1542 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1543 offsetof(struct __sk_buff, tc_index) + 2),
1546 .errstr = "invalid bpf_context access",
1550 "check skb->hash half load permitted",
1552 BPF_MOV64_IMM(BPF_REG_0, 0),
1553 #if __BYTE_ORDER == __LITTLE_ENDIAN
1554 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1555 offsetof(struct __sk_buff, hash)),
1557 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1558 offsetof(struct __sk_buff, hash) + 2),
1565 "check skb->hash half load not permitted",
1567 BPF_MOV64_IMM(BPF_REG_0, 0),
1568 #if __BYTE_ORDER == __LITTLE_ENDIAN
1569 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1570 offsetof(struct __sk_buff, hash) + 2),
1572 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1573 offsetof(struct __sk_buff, hash)),
1577 .errstr = "invalid bpf_context access",
1581 "check cb access: half, wrong type",
1583 BPF_MOV64_IMM(BPF_REG_0, 0),
1584 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1585 offsetof(struct __sk_buff, cb[0])),
1588 .errstr = "invalid bpf_context access",
1590 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1593 "check cb access: word",
1595 BPF_MOV64_IMM(BPF_REG_0, 0),
1596 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1597 offsetof(struct __sk_buff, cb[0])),
1598 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1599 offsetof(struct __sk_buff, cb[1])),
1600 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1601 offsetof(struct __sk_buff, cb[2])),
1602 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1603 offsetof(struct __sk_buff, cb[3])),
1604 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1605 offsetof(struct __sk_buff, cb[4])),
1606 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1607 offsetof(struct __sk_buff, cb[0])),
1608 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1609 offsetof(struct __sk_buff, cb[1])),
1610 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1611 offsetof(struct __sk_buff, cb[2])),
1612 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1613 offsetof(struct __sk_buff, cb[3])),
1614 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1615 offsetof(struct __sk_buff, cb[4])),
1621 "check cb access: word, unaligned 1",
1623 BPF_MOV64_IMM(BPF_REG_0, 0),
1624 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1625 offsetof(struct __sk_buff, cb[0]) + 2),
1628 .errstr = "misaligned context access",
1630 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1633 "check cb access: word, unaligned 2",
1635 BPF_MOV64_IMM(BPF_REG_0, 0),
1636 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1637 offsetof(struct __sk_buff, cb[4]) + 1),
1640 .errstr = "misaligned context access",
1642 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1645 "check cb access: word, unaligned 3",
1647 BPF_MOV64_IMM(BPF_REG_0, 0),
1648 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1649 offsetof(struct __sk_buff, cb[4]) + 2),
1652 .errstr = "misaligned context access",
1654 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1657 "check cb access: word, unaligned 4",
1659 BPF_MOV64_IMM(BPF_REG_0, 0),
1660 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1661 offsetof(struct __sk_buff, cb[4]) + 3),
1664 .errstr = "misaligned context access",
1666 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1669 "check cb access: double",
1671 BPF_MOV64_IMM(BPF_REG_0, 0),
1672 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1673 offsetof(struct __sk_buff, cb[0])),
1674 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1675 offsetof(struct __sk_buff, cb[2])),
1676 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1677 offsetof(struct __sk_buff, cb[0])),
1678 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1679 offsetof(struct __sk_buff, cb[2])),
1685 "check cb access: double, unaligned 1",
1687 BPF_MOV64_IMM(BPF_REG_0, 0),
1688 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1689 offsetof(struct __sk_buff, cb[1])),
1692 .errstr = "misaligned context access",
1694 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1697 "check cb access: double, unaligned 2",
1699 BPF_MOV64_IMM(BPF_REG_0, 0),
1700 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1701 offsetof(struct __sk_buff, cb[3])),
1704 .errstr = "misaligned context access",
1706 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1709 "check cb access: double, oob 1",
1711 BPF_MOV64_IMM(BPF_REG_0, 0),
1712 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1713 offsetof(struct __sk_buff, cb[4])),
1716 .errstr = "invalid bpf_context access",
1720 "check cb access: double, oob 2",
1722 BPF_MOV64_IMM(BPF_REG_0, 0),
1723 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1724 offsetof(struct __sk_buff, cb[4])),
1727 .errstr = "invalid bpf_context access",
1731 "check __sk_buff->ifindex dw store not permitted",
1733 BPF_MOV64_IMM(BPF_REG_0, 0),
1734 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1735 offsetof(struct __sk_buff, ifindex)),
1738 .errstr = "invalid bpf_context access",
1742 "check __sk_buff->ifindex dw load not permitted",
1744 BPF_MOV64_IMM(BPF_REG_0, 0),
1745 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1746 offsetof(struct __sk_buff, ifindex)),
1749 .errstr = "invalid bpf_context access",
1753 "check cb access: double, wrong type",
1755 BPF_MOV64_IMM(BPF_REG_0, 0),
1756 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1757 offsetof(struct __sk_buff, cb[0])),
1760 .errstr = "invalid bpf_context access",
1762 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1765 "check out of range skb->cb access",
1767 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1768 offsetof(struct __sk_buff, cb[0]) + 256),
1771 .errstr = "invalid bpf_context access",
1772 .errstr_unpriv = "",
1774 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
1777 "write skb fields from socket prog",
1779 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1780 offsetof(struct __sk_buff, cb[4])),
1781 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1782 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1783 offsetof(struct __sk_buff, mark)),
1784 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1785 offsetof(struct __sk_buff, tc_index)),
1786 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1787 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1788 offsetof(struct __sk_buff, cb[0])),
1789 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1790 offsetof(struct __sk_buff, cb[2])),
1794 .errstr_unpriv = "R1 leaks addr",
1795 .result_unpriv = REJECT,
1798 "write skb fields from tc_cls_act prog",
1800 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1801 offsetof(struct __sk_buff, cb[0])),
1802 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1803 offsetof(struct __sk_buff, mark)),
1804 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1805 offsetof(struct __sk_buff, tc_index)),
1806 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1807 offsetof(struct __sk_buff, tc_index)),
1808 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1809 offsetof(struct __sk_buff, cb[3])),
1812 .errstr_unpriv = "",
1813 .result_unpriv = REJECT,
1815 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1818 "PTR_TO_STACK store/load",
1820 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1821 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1822 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1823 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1829 "PTR_TO_STACK store/load - bad alignment on off",
1831 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1832 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1833 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1834 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1838 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
1841 "PTR_TO_STACK store/load - bad alignment on reg",
1843 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1844 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1845 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1846 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1850 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
1853 "PTR_TO_STACK store/load - out of bounds low",
1855 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1856 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
1857 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1858 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1862 .errstr = "invalid stack off=-79992 size=8",
1863 .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
1866 "PTR_TO_STACK store/load - out of bounds high",
1868 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1869 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1870 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1871 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1875 .errstr = "invalid stack off=0 size=8",
1878 "unpriv: return pointer",
1880 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1884 .result_unpriv = REJECT,
1885 .errstr_unpriv = "R0 leaks addr",
1888 "unpriv: add const to pointer",
1890 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
1891 BPF_MOV64_IMM(BPF_REG_0, 0),
1897 "unpriv: add pointer to pointer",
1899 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
1900 BPF_MOV64_IMM(BPF_REG_0, 0),
1904 .result_unpriv = REJECT,
1905 .errstr_unpriv = "R1 pointer += pointer",
1908 "unpriv: neg pointer",
1910 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
1911 BPF_MOV64_IMM(BPF_REG_0, 0),
1915 .result_unpriv = REJECT,
1916 .errstr_unpriv = "R1 pointer arithmetic",
1919 "unpriv: cmp pointer with const",
1921 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1922 BPF_MOV64_IMM(BPF_REG_0, 0),
1926 .result_unpriv = REJECT,
1927 .errstr_unpriv = "R1 pointer comparison",
1930 "unpriv: cmp pointer with pointer",
1932 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1933 BPF_MOV64_IMM(BPF_REG_0, 0),
1937 .result_unpriv = REJECT,
1938 .errstr_unpriv = "R10 pointer comparison",
1941 "unpriv: check that printk is disallowed",
1943 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1944 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1945 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1946 BPF_MOV64_IMM(BPF_REG_2, 8),
1947 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
1948 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1949 BPF_FUNC_trace_printk),
1950 BPF_MOV64_IMM(BPF_REG_0, 0),
1953 .errstr_unpriv = "unknown func bpf_trace_printk#6",
1954 .result_unpriv = REJECT,
1958 "unpriv: pass pointer to helper function",
1960 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1961 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1962 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1963 BPF_LD_MAP_FD(BPF_REG_1, 0),
1964 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1965 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
1966 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1967 BPF_FUNC_map_update_elem),
1968 BPF_MOV64_IMM(BPF_REG_0, 0),
1971 .fixup_map1 = { 3 },
1972 .errstr_unpriv = "R4 leaks addr",
1973 .result_unpriv = REJECT,
1977 "unpriv: indirectly pass pointer on stack to helper function",
1979 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1980 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1981 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1982 BPF_LD_MAP_FD(BPF_REG_1, 0),
1983 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1984 BPF_FUNC_map_lookup_elem),
1985 BPF_MOV64_IMM(BPF_REG_0, 0),
1988 .fixup_map1 = { 3 },
1989 .errstr = "invalid indirect read from stack off -8+0 size 8",
1993 "unpriv: mangle pointer on stack 1",
1995 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1996 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
1997 BPF_MOV64_IMM(BPF_REG_0, 0),
2000 .errstr_unpriv = "attempt to corrupt spilled",
2001 .result_unpriv = REJECT,
2005 "unpriv: mangle pointer on stack 2",
2007 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2008 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
2009 BPF_MOV64_IMM(BPF_REG_0, 0),
2012 .errstr_unpriv = "attempt to corrupt spilled",
2013 .result_unpriv = REJECT,
2017 "unpriv: read pointer from stack in small chunks",
2019 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2020 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
2021 BPF_MOV64_IMM(BPF_REG_0, 0),
2024 .errstr = "invalid size",
2028 "unpriv: write pointer into ctx",
2030 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
2031 BPF_MOV64_IMM(BPF_REG_0, 0),
2034 .errstr_unpriv = "R1 leaks addr",
2035 .result_unpriv = REJECT,
2036 .errstr = "invalid bpf_context access",
2040 "unpriv: spill/fill of ctx",
2042 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2043 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2044 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2045 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2046 BPF_MOV64_IMM(BPF_REG_0, 0),
2052 "unpriv: spill/fill of ctx 2",
2054 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2055 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2056 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2057 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2058 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2059 BPF_FUNC_get_hash_recalc),
2063 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2066 "unpriv: spill/fill of ctx 3",
2068 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2069 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2070 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2071 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2072 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2073 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2074 BPF_FUNC_get_hash_recalc),
2078 .errstr = "R1 type=fp expected=ctx",
2079 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2082 "unpriv: spill/fill of ctx 4",
2084 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2085 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2086 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2087 BPF_MOV64_IMM(BPF_REG_0, 1),
2088 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2090 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2091 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2092 BPF_FUNC_get_hash_recalc),
2096 .errstr = "R1 type=inv expected=ctx",
2097 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2100 "unpriv: spill/fill of different pointers stx",
2102 BPF_MOV64_IMM(BPF_REG_3, 42),
2103 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2104 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2105 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2106 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2107 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2108 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2109 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2110 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2111 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2112 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2113 offsetof(struct __sk_buff, mark)),
2114 BPF_MOV64_IMM(BPF_REG_0, 0),
2118 .errstr = "same insn cannot be used with different pointers",
2119 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2122 "unpriv: spill/fill of different pointers ldx",
2124 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2125 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2126 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2127 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2128 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2129 -(__s32)offsetof(struct bpf_perf_event_data,
2130 sample_period) - 8),
2131 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2132 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2133 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2134 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2135 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2136 offsetof(struct bpf_perf_event_data,
2138 BPF_MOV64_IMM(BPF_REG_0, 0),
2142 .errstr = "same insn cannot be used with different pointers",
2143 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
2146 "unpriv: write pointer into map elem value",
2148 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2149 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2150 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2151 BPF_LD_MAP_FD(BPF_REG_1, 0),
2152 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2153 BPF_FUNC_map_lookup_elem),
2154 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2155 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
2158 .fixup_map1 = { 3 },
2159 .errstr_unpriv = "R0 leaks addr",
2160 .result_unpriv = REJECT,
2164 "unpriv: partial copy of pointer",
2166 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
2167 BPF_MOV64_IMM(BPF_REG_0, 0),
2170 .errstr_unpriv = "R10 partial copy",
2171 .result_unpriv = REJECT,
2175 "unpriv: pass pointer to tail_call",
2177 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2178 BPF_LD_MAP_FD(BPF_REG_2, 0),
2179 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2180 BPF_FUNC_tail_call),
2181 BPF_MOV64_IMM(BPF_REG_0, 0),
2184 .fixup_prog = { 1 },
2185 .errstr_unpriv = "R3 leaks addr into helper",
2186 .result_unpriv = REJECT,
2190 "unpriv: cmp map pointer with zero",
2192 BPF_MOV64_IMM(BPF_REG_1, 0),
2193 BPF_LD_MAP_FD(BPF_REG_1, 0),
2194 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2195 BPF_MOV64_IMM(BPF_REG_0, 0),
2198 .fixup_map1 = { 1 },
2199 .errstr_unpriv = "R1 pointer comparison",
2200 .result_unpriv = REJECT,
2204 "unpriv: write into frame pointer",
2206 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
2207 BPF_MOV64_IMM(BPF_REG_0, 0),
2210 .errstr = "frame pointer is read only",
2214 "unpriv: spill/fill frame pointer",
2216 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2217 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2218 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2219 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
2220 BPF_MOV64_IMM(BPF_REG_0, 0),
2223 .errstr = "frame pointer is read only",
2227 "unpriv: cmp of frame pointer",
2229 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
2230 BPF_MOV64_IMM(BPF_REG_0, 0),
2233 .errstr_unpriv = "R10 pointer comparison",
2234 .result_unpriv = REJECT,
2238 "unpriv: adding of fp",
2240 BPF_MOV64_IMM(BPF_REG_0, 0),
2241 BPF_MOV64_IMM(BPF_REG_1, 0),
2242 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2243 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
2247 .result_unpriv = REJECT,
2248 .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
2251 "unpriv: cmp of stack pointer",
2253 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2254 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2255 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
2256 BPF_MOV64_IMM(BPF_REG_0, 0),
2259 .errstr_unpriv = "R2 pointer comparison",
2260 .result_unpriv = REJECT,
2264 "runtime/jit: pass negative index to tail_call",
2266 BPF_MOV64_IMM(BPF_REG_3, -1),
2267 BPF_LD_MAP_FD(BPF_REG_2, 0),
2268 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2269 BPF_FUNC_tail_call),
2270 BPF_MOV64_IMM(BPF_REG_0, 0),
2273 .fixup_prog = { 1 },
2277 "runtime/jit: pass > 32bit index to tail_call",
2279 BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
2280 BPF_LD_MAP_FD(BPF_REG_2, 0),
2281 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2282 BPF_FUNC_tail_call),
2283 BPF_MOV64_IMM(BPF_REG_0, 0),
2286 .fixup_prog = { 2 },
2290 "stack pointer arithmetic",
2292 BPF_MOV64_IMM(BPF_REG_1, 4),
2293 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
2294 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
2295 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2296 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2297 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2298 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
2299 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
2300 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2301 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2302 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
2303 BPF_MOV64_IMM(BPF_REG_0, 0),
2309 "raw_stack: no skb_load_bytes",
2311 BPF_MOV64_IMM(BPF_REG_2, 4),
2312 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2313 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2314 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2315 BPF_MOV64_IMM(BPF_REG_4, 8),
2316 /* Call to skb_load_bytes() omitted. */
2317 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2321 .errstr = "invalid read from stack off -8+0 size 8",
2322 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2325 "raw_stack: skb_load_bytes, negative len",
2327 BPF_MOV64_IMM(BPF_REG_2, 4),
2328 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2329 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2330 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2331 BPF_MOV64_IMM(BPF_REG_4, -8),
2332 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2333 BPF_FUNC_skb_load_bytes),
2334 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2338 .errstr = "R4 min value is negative",
2339 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2342 "raw_stack: skb_load_bytes, negative len 2",
2344 BPF_MOV64_IMM(BPF_REG_2, 4),
2345 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2346 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2347 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2348 BPF_MOV64_IMM(BPF_REG_4, ~0),
2349 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2350 BPF_FUNC_skb_load_bytes),
2351 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2355 .errstr = "R4 min value is negative",
2356 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2359 "raw_stack: skb_load_bytes, zero len",
2361 BPF_MOV64_IMM(BPF_REG_2, 4),
2362 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2363 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2364 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2365 BPF_MOV64_IMM(BPF_REG_4, 0),
2366 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2367 BPF_FUNC_skb_load_bytes),
2368 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2372 .errstr = "invalid stack type R3",
2373 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2376 "raw_stack: skb_load_bytes, no init",
2378 BPF_MOV64_IMM(BPF_REG_2, 4),
2379 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2380 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2381 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2382 BPF_MOV64_IMM(BPF_REG_4, 8),
2383 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2384 BPF_FUNC_skb_load_bytes),
2385 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2389 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2392 "raw_stack: skb_load_bytes, init",
2394 BPF_MOV64_IMM(BPF_REG_2, 4),
2395 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2396 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2397 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
2398 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2399 BPF_MOV64_IMM(BPF_REG_4, 8),
2400 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2401 BPF_FUNC_skb_load_bytes),
2402 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2406 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2409 "raw_stack: skb_load_bytes, spilled regs around bounds",
2411 BPF_MOV64_IMM(BPF_REG_2, 4),
2412 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2413 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
2414 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2415 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
2416 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2417 BPF_MOV64_IMM(BPF_REG_4, 8),
2418 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2419 BPF_FUNC_skb_load_bytes),
2420 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2421 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2422 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2423 offsetof(struct __sk_buff, mark)),
2424 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2425 offsetof(struct __sk_buff, priority)),
2426 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2430 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2433 "raw_stack: skb_load_bytes, spilled regs corruption",
2435 BPF_MOV64_IMM(BPF_REG_2, 4),
2436 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2437 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2438 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2439 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2440 BPF_MOV64_IMM(BPF_REG_4, 8),
2441 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2442 BPF_FUNC_skb_load_bytes),
2443 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2444 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2445 offsetof(struct __sk_buff, mark)),
2449 .errstr = "R0 invalid mem access 'inv'",
2450 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2453 "raw_stack: skb_load_bytes, spilled regs corruption 2",
2455 BPF_MOV64_IMM(BPF_REG_2, 4),
2456 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2457 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
2458 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2459 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2460 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
2461 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2462 BPF_MOV64_IMM(BPF_REG_4, 8),
2463 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2464 BPF_FUNC_skb_load_bytes),
2465 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2466 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2467 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
2468 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2469 offsetof(struct __sk_buff, mark)),
2470 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2471 offsetof(struct __sk_buff, priority)),
2472 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2473 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
2474 offsetof(struct __sk_buff, pkt_type)),
2475 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2479 .errstr = "R3 invalid mem access 'inv'",
2480 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2483 "raw_stack: skb_load_bytes, spilled regs + data",
2485 BPF_MOV64_IMM(BPF_REG_2, 4),
2486 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2487 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
2488 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2489 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2490 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
2491 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2492 BPF_MOV64_IMM(BPF_REG_4, 8),
2493 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2494 BPF_FUNC_skb_load_bytes),
2495 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2496 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2497 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
2498 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2499 offsetof(struct __sk_buff, mark)),
2500 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2501 offsetof(struct __sk_buff, priority)),
2502 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2503 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2507 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2510 "raw_stack: skb_load_bytes, invalid access 1",
2512 BPF_MOV64_IMM(BPF_REG_2, 4),
2513 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2514 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
2515 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2516 BPF_MOV64_IMM(BPF_REG_4, 8),
2517 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2518 BPF_FUNC_skb_load_bytes),
2519 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2523 .errstr = "invalid stack type R3 off=-513 access_size=8",
2524 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2527 "raw_stack: skb_load_bytes, invalid access 2",
2529 BPF_MOV64_IMM(BPF_REG_2, 4),
2530 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2531 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2532 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2533 BPF_MOV64_IMM(BPF_REG_4, 8),
2534 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2535 BPF_FUNC_skb_load_bytes),
2536 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2540 .errstr = "invalid stack type R3 off=-1 access_size=8",
2541 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2544 "raw_stack: skb_load_bytes, invalid access 3",
2546 BPF_MOV64_IMM(BPF_REG_2, 4),
2547 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2548 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
2549 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2550 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
2551 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2552 BPF_FUNC_skb_load_bytes),
2553 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2557 .errstr = "R4 min value is negative",
2558 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2561 "raw_stack: skb_load_bytes, invalid access 4",
2563 BPF_MOV64_IMM(BPF_REG_2, 4),
2564 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2565 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2566 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2567 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
2568 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2569 BPF_FUNC_skb_load_bytes),
2570 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2574 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
2575 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2578 "raw_stack: skb_load_bytes, invalid access 5",
2580 BPF_MOV64_IMM(BPF_REG_2, 4),
2581 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2582 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2583 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2584 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
2585 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2586 BPF_FUNC_skb_load_bytes),
2587 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2591 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
2592 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2595 "raw_stack: skb_load_bytes, invalid access 6",
2597 BPF_MOV64_IMM(BPF_REG_2, 4),
2598 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2599 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2600 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2601 BPF_MOV64_IMM(BPF_REG_4, 0),
2602 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2603 BPF_FUNC_skb_load_bytes),
2604 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2608 .errstr = "invalid stack type R3 off=-512 access_size=0",
2609 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2612 "raw_stack: skb_load_bytes, large access",
2614 BPF_MOV64_IMM(BPF_REG_2, 4),
2615 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2616 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2617 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2618 BPF_MOV64_IMM(BPF_REG_4, 512),
2619 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2620 BPF_FUNC_skb_load_bytes),
2621 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2625 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2628 "context stores via ST",
2630 BPF_MOV64_IMM(BPF_REG_0, 0),
2631 BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
2634 .errstr = "BPF_ST stores into R1 context is not allowed",
2636 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2639 "context stores via XADD",
2641 BPF_MOV64_IMM(BPF_REG_0, 0),
2642 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
2643 BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
2646 .errstr = "BPF_XADD stores into R1 context is not allowed",
2648 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2651 "direct packet access: test1",
2653 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2654 offsetof(struct __sk_buff, data)),
2655 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2656 offsetof(struct __sk_buff, data_end)),
2657 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2658 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2659 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2660 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2661 BPF_MOV64_IMM(BPF_REG_0, 0),
2665 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2668 "direct packet access: test2",
2670 BPF_MOV64_IMM(BPF_REG_0, 1),
2671 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
2672 offsetof(struct __sk_buff, data_end)),
2673 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2674 offsetof(struct __sk_buff, data)),
2675 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2676 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
2677 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
2678 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
2679 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
2680 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
2681 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2682 offsetof(struct __sk_buff, data)),
2683 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
2684 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
2685 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
2686 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
2687 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
2688 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
2689 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2690 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2691 offsetof(struct __sk_buff, data_end)),
2692 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
2693 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
2694 BPF_MOV64_IMM(BPF_REG_0, 0),
2698 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2701 "direct packet access: test3",
2703 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2704 offsetof(struct __sk_buff, data)),
2705 BPF_MOV64_IMM(BPF_REG_0, 0),
2708 .errstr = "invalid bpf_context access off=76",
2710 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2713 "direct packet access: test4 (write)",
2715 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2716 offsetof(struct __sk_buff, data)),
2717 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2718 offsetof(struct __sk_buff, data_end)),
2719 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2720 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2721 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2722 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2723 BPF_MOV64_IMM(BPF_REG_0, 0),
2727 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2730 "direct packet access: test5 (pkt_end >= reg, good access)",
2732 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2733 offsetof(struct __sk_buff, data)),
2734 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2735 offsetof(struct __sk_buff, data_end)),
2736 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2737 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2738 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2739 BPF_MOV64_IMM(BPF_REG_0, 1),
2741 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2742 BPF_MOV64_IMM(BPF_REG_0, 0),
2746 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2749 "direct packet access: test6 (pkt_end >= reg, bad access)",
2751 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2752 offsetof(struct __sk_buff, data)),
2753 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2754 offsetof(struct __sk_buff, data_end)),
2755 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2756 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2757 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2758 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2759 BPF_MOV64_IMM(BPF_REG_0, 1),
2761 BPF_MOV64_IMM(BPF_REG_0, 0),
2764 .errstr = "invalid access to packet",
2766 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2769 "direct packet access: test7 (pkt_end >= reg, both accesses)",
2771 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2772 offsetof(struct __sk_buff, data)),
2773 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2774 offsetof(struct __sk_buff, data_end)),
2775 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2776 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2777 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2778 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2779 BPF_MOV64_IMM(BPF_REG_0, 1),
2781 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2782 BPF_MOV64_IMM(BPF_REG_0, 0),
2785 .errstr = "invalid access to packet",
2787 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2790 "direct packet access: test8 (double test, variant 1)",
2792 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2793 offsetof(struct __sk_buff, data)),
2794 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2795 offsetof(struct __sk_buff, data_end)),
2796 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2797 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2798 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
2799 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2800 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2801 BPF_MOV64_IMM(BPF_REG_0, 1),
2803 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2804 BPF_MOV64_IMM(BPF_REG_0, 0),
2808 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2811 "direct packet access: test9 (double test, variant 2)",
2813 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2814 offsetof(struct __sk_buff, data)),
2815 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2816 offsetof(struct __sk_buff, data_end)),
2817 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2818 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2819 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2820 BPF_MOV64_IMM(BPF_REG_0, 1),
2822 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2823 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2824 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2825 BPF_MOV64_IMM(BPF_REG_0, 0),
2829 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2832 "direct packet access: test10 (write invalid)",
2834 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2835 offsetof(struct __sk_buff, data)),
2836 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2837 offsetof(struct __sk_buff, data_end)),
2838 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2839 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2840 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2841 BPF_MOV64_IMM(BPF_REG_0, 0),
2843 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2844 BPF_MOV64_IMM(BPF_REG_0, 0),
2847 .errstr = "invalid access to packet",
2849 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2852 "direct packet access: test11 (shift, good access)",
2854 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2855 offsetof(struct __sk_buff, data)),
2856 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2857 offsetof(struct __sk_buff, data_end)),
2858 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2859 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2860 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2861 BPF_MOV64_IMM(BPF_REG_3, 144),
2862 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2863 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2864 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
2865 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2866 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2867 BPF_MOV64_IMM(BPF_REG_0, 1),
2869 BPF_MOV64_IMM(BPF_REG_0, 0),
2873 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2876 "direct packet access: test12 (and, good access)",
2878 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2879 offsetof(struct __sk_buff, data)),
2880 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2881 offsetof(struct __sk_buff, data_end)),
2882 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2883 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2884 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2885 BPF_MOV64_IMM(BPF_REG_3, 144),
2886 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2887 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2888 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2889 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2890 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2891 BPF_MOV64_IMM(BPF_REG_0, 1),
2893 BPF_MOV64_IMM(BPF_REG_0, 0),
2897 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2900 "direct packet access: test13 (branches, good access)",
2902 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2903 offsetof(struct __sk_buff, data)),
2904 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2905 offsetof(struct __sk_buff, data_end)),
2906 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2907 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2908 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
2909 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2910 offsetof(struct __sk_buff, mark)),
2911 BPF_MOV64_IMM(BPF_REG_4, 1),
2912 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
2913 BPF_MOV64_IMM(BPF_REG_3, 14),
2914 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
2915 BPF_MOV64_IMM(BPF_REG_3, 24),
2916 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2917 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2918 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2919 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2920 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2921 BPF_MOV64_IMM(BPF_REG_0, 1),
2923 BPF_MOV64_IMM(BPF_REG_0, 0),
2927 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2930 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
2932 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2933 offsetof(struct __sk_buff, data)),
2934 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2935 offsetof(struct __sk_buff, data_end)),
2936 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2937 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2938 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
2939 BPF_MOV64_IMM(BPF_REG_5, 12),
2940 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
2941 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2942 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2943 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
2944 BPF_MOV64_IMM(BPF_REG_0, 1),
2946 BPF_MOV64_IMM(BPF_REG_0, 0),
2950 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2953 "direct packet access: test15 (spill with xadd)",
2955 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2956 offsetof(struct __sk_buff, data)),
2957 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2958 offsetof(struct __sk_buff, data_end)),
2959 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2960 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2961 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2962 BPF_MOV64_IMM(BPF_REG_5, 4096),
2963 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2964 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2965 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2966 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
2967 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
2968 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
2969 BPF_MOV64_IMM(BPF_REG_0, 0),
2972 .errstr = "R2 invalid mem access 'inv'",
2974 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2977 "direct packet access: test16 (arith on data_end)",
2979 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2980 offsetof(struct __sk_buff, data)),
2981 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2982 offsetof(struct __sk_buff, data_end)),
2983 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2984 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2985 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
2986 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2987 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2988 BPF_MOV64_IMM(BPF_REG_0, 0),
2991 .errstr = "invalid access to packet",
2993 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2996 "direct packet access: test17 (pruning, alignment)",
2998 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2999 offsetof(struct __sk_buff, data)),
3000 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3001 offsetof(struct __sk_buff, data_end)),
3002 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3003 offsetof(struct __sk_buff, mark)),
3004 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3005 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
3006 BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
3007 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3008 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
3009 BPF_MOV64_IMM(BPF_REG_0, 0),
3011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
3014 .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
3016 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3017 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
3020 "direct packet access: test18 (imm += pkt_ptr, 1)",
3022 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3023 offsetof(struct __sk_buff, data)),
3024 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3025 offsetof(struct __sk_buff, data_end)),
3026 BPF_MOV64_IMM(BPF_REG_0, 8),
3027 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3028 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3029 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3030 BPF_MOV64_IMM(BPF_REG_0, 0),
3034 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3037 "direct packet access: test19 (imm += pkt_ptr, 2)",
3039 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3040 offsetof(struct __sk_buff, data)),
3041 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3042 offsetof(struct __sk_buff, data_end)),
3043 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3044 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3045 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
3046 BPF_MOV64_IMM(BPF_REG_4, 4),
3047 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3048 BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
3049 BPF_MOV64_IMM(BPF_REG_0, 0),
3053 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3056 "direct packet access: test20 (x += pkt_ptr, 1)",
3058 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3059 offsetof(struct __sk_buff, data)),
3060 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3061 offsetof(struct __sk_buff, data_end)),
3062 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3063 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3064 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3065 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
3066 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3067 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3068 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3069 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
3070 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3071 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3072 BPF_MOV64_IMM(BPF_REG_0, 0),
3075 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3079 "direct packet access: test21 (x += pkt_ptr, 2)",
3081 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3082 offsetof(struct __sk_buff, data)),
3083 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3084 offsetof(struct __sk_buff, data_end)),
3085 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3086 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3087 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
3088 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3089 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3090 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
3091 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
3092 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3093 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3094 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
3095 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3096 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3097 BPF_MOV64_IMM(BPF_REG_0, 0),
3100 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3104 "direct packet access: test22 (x += pkt_ptr, 3)",
3106 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3107 offsetof(struct __sk_buff, data)),
3108 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3109 offsetof(struct __sk_buff, data_end)),
3110 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3112 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
3113 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
3114 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
3115 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
3116 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
3117 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3118 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3119 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
3120 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
3121 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3122 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
3123 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
3124 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3125 BPF_MOV64_IMM(BPF_REG_2, 1),
3126 BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
3127 BPF_MOV64_IMM(BPF_REG_0, 0),
3130 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3134 "direct packet access: test23 (x += pkt_ptr, 4)",
3136 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3137 offsetof(struct __sk_buff, data)),
3138 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3139 offsetof(struct __sk_buff, data_end)),
3140 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3141 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3142 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3143 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
3144 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3145 BPF_MOV64_IMM(BPF_REG_0, 31),
3146 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3147 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3148 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3149 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
3150 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3151 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3152 BPF_MOV64_IMM(BPF_REG_0, 0),
3155 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3157 .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
3160 "direct packet access: test24 (x += pkt_ptr, 5)",
3162 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3163 offsetof(struct __sk_buff, data)),
3164 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3165 offsetof(struct __sk_buff, data_end)),
3166 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3167 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3168 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3169 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
3170 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3171 BPF_MOV64_IMM(BPF_REG_0, 64),
3172 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3173 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3174 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3175 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
3176 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3177 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3178 BPF_MOV64_IMM(BPF_REG_0, 0),
3181 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3185 "direct packet access: test25 (marking on <, good access)",
3187 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3188 offsetof(struct __sk_buff, data)),
3189 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3190 offsetof(struct __sk_buff, data_end)),
3191 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3192 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3193 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
3194 BPF_MOV64_IMM(BPF_REG_0, 0),
3196 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3197 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3200 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3203 "direct packet access: test26 (marking on <, bad access)",
3205 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3206 offsetof(struct __sk_buff, data)),
3207 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3208 offsetof(struct __sk_buff, data_end)),
3209 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3210 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3211 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
3212 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3213 BPF_MOV64_IMM(BPF_REG_0, 0),
3215 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
3218 .errstr = "invalid access to packet",
3219 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3222 "direct packet access: test27 (marking on <=, good access)",
3224 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3225 offsetof(struct __sk_buff, data)),
3226 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3227 offsetof(struct __sk_buff, data_end)),
3228 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3229 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3230 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
3231 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3232 BPF_MOV64_IMM(BPF_REG_0, 1),
3236 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3239 "direct packet access: test28 (marking on <=, bad access)",
3241 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3242 offsetof(struct __sk_buff, data)),
3243 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3244 offsetof(struct __sk_buff, data_end)),
3245 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3246 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3247 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
3248 BPF_MOV64_IMM(BPF_REG_0, 1),
3250 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3251 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3254 .errstr = "invalid access to packet",
3255 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3258 "helper access to packet: test1, valid packet_ptr range",
3260 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3261 offsetof(struct xdp_md, data)),
3262 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3263 offsetof(struct xdp_md, data_end)),
3264 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3265 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3266 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3267 BPF_LD_MAP_FD(BPF_REG_1, 0),
3268 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3269 BPF_MOV64_IMM(BPF_REG_4, 0),
3270 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3271 BPF_FUNC_map_update_elem),
3272 BPF_MOV64_IMM(BPF_REG_0, 0),
3275 .fixup_map1 = { 5 },
3276 .result_unpriv = ACCEPT,
3278 .prog_type = BPF_PROG_TYPE_XDP,
3281 "helper access to packet: test2, unchecked packet_ptr",
3283 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3284 offsetof(struct xdp_md, data)),
3285 BPF_LD_MAP_FD(BPF_REG_1, 0),
3286 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3287 BPF_FUNC_map_lookup_elem),
3288 BPF_MOV64_IMM(BPF_REG_0, 0),
3291 .fixup_map1 = { 1 },
3293 .errstr = "invalid access to packet",
3294 .prog_type = BPF_PROG_TYPE_XDP,
3297 "helper access to packet: test3, variable add",
3299 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3300 offsetof(struct xdp_md, data)),
3301 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3302 offsetof(struct xdp_md, data_end)),
3303 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3304 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3305 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3306 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3307 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3308 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3309 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3310 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3311 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3312 BPF_LD_MAP_FD(BPF_REG_1, 0),
3313 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
3314 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3315 BPF_FUNC_map_lookup_elem),
3316 BPF_MOV64_IMM(BPF_REG_0, 0),
3319 .fixup_map1 = { 11 },
3321 .prog_type = BPF_PROG_TYPE_XDP,
3324 "helper access to packet: test4, packet_ptr with bad range",
3326 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3327 offsetof(struct xdp_md, data)),
3328 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3329 offsetof(struct xdp_md, data_end)),
3330 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3331 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3332 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3333 BPF_MOV64_IMM(BPF_REG_0, 0),
3335 BPF_LD_MAP_FD(BPF_REG_1, 0),
3336 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3337 BPF_FUNC_map_lookup_elem),
3338 BPF_MOV64_IMM(BPF_REG_0, 0),
3341 .fixup_map1 = { 7 },
3343 .errstr = "invalid access to packet",
3344 .prog_type = BPF_PROG_TYPE_XDP,
3347 "helper access to packet: test5, packet_ptr with too short range",
3349 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3350 offsetof(struct xdp_md, data)),
3351 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3352 offsetof(struct xdp_md, data_end)),
3353 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3354 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3355 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3356 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3357 BPF_LD_MAP_FD(BPF_REG_1, 0),
3358 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3359 BPF_FUNC_map_lookup_elem),
3360 BPF_MOV64_IMM(BPF_REG_0, 0),
3363 .fixup_map1 = { 6 },
3365 .errstr = "invalid access to packet",
3366 .prog_type = BPF_PROG_TYPE_XDP,
3369 "helper access to packet: test6, cls valid packet_ptr range",
3371 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3372 offsetof(struct __sk_buff, data)),
3373 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3374 offsetof(struct __sk_buff, data_end)),
3375 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3376 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3377 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3378 BPF_LD_MAP_FD(BPF_REG_1, 0),
3379 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3380 BPF_MOV64_IMM(BPF_REG_4, 0),
3381 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3382 BPF_FUNC_map_update_elem),
3383 BPF_MOV64_IMM(BPF_REG_0, 0),
3386 .fixup_map1 = { 5 },
3388 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3391 "helper access to packet: test7, cls unchecked packet_ptr",
3393 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3394 offsetof(struct __sk_buff, data)),
3395 BPF_LD_MAP_FD(BPF_REG_1, 0),
3396 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3397 BPF_FUNC_map_lookup_elem),
3398 BPF_MOV64_IMM(BPF_REG_0, 0),
3401 .fixup_map1 = { 1 },
3403 .errstr = "invalid access to packet",
3404 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3407 "helper access to packet: test8, cls variable add",
3409 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3410 offsetof(struct __sk_buff, data)),
3411 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3412 offsetof(struct __sk_buff, data_end)),
3413 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3414 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3415 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3416 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3417 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3418 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3419 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3420 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3421 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3422 BPF_LD_MAP_FD(BPF_REG_1, 0),
3423 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
3424 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3425 BPF_FUNC_map_lookup_elem),
3426 BPF_MOV64_IMM(BPF_REG_0, 0),
3429 .fixup_map1 = { 11 },
3431 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3434 "helper access to packet: test9, cls packet_ptr with bad range",
3436 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3437 offsetof(struct __sk_buff, data)),
3438 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3439 offsetof(struct __sk_buff, data_end)),
3440 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3441 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3442 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3443 BPF_MOV64_IMM(BPF_REG_0, 0),
3445 BPF_LD_MAP_FD(BPF_REG_1, 0),
3446 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3447 BPF_FUNC_map_lookup_elem),
3448 BPF_MOV64_IMM(BPF_REG_0, 0),
3451 .fixup_map1 = { 7 },
3453 .errstr = "invalid access to packet",
3454 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3457 "helper access to packet: test10, cls packet_ptr with too short range",
3459 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3460 offsetof(struct __sk_buff, data)),
3461 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3462 offsetof(struct __sk_buff, data_end)),
3463 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3464 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3465 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3466 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3467 BPF_LD_MAP_FD(BPF_REG_1, 0),
3468 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3469 BPF_FUNC_map_lookup_elem),
3470 BPF_MOV64_IMM(BPF_REG_0, 0),
3473 .fixup_map1 = { 6 },
3475 .errstr = "invalid access to packet",
3476 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3479 "helper access to packet: test11, cls unsuitable helper 1",
3481 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3482 offsetof(struct __sk_buff, data)),
3483 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3484 offsetof(struct __sk_buff, data_end)),
3485 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3486 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3487 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
3488 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
3489 BPF_MOV64_IMM(BPF_REG_2, 0),
3490 BPF_MOV64_IMM(BPF_REG_4, 42),
3491 BPF_MOV64_IMM(BPF_REG_5, 0),
3492 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3493 BPF_FUNC_skb_store_bytes),
3494 BPF_MOV64_IMM(BPF_REG_0, 0),
3498 .errstr = "helper access to the packet",
3499 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3502 "helper access to packet: test12, cls unsuitable helper 2",
3504 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3505 offsetof(struct __sk_buff, data)),
3506 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3507 offsetof(struct __sk_buff, data_end)),
3508 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3509 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
3510 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
3511 BPF_MOV64_IMM(BPF_REG_2, 0),
3512 BPF_MOV64_IMM(BPF_REG_4, 4),
3513 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3514 BPF_FUNC_skb_load_bytes),
3515 BPF_MOV64_IMM(BPF_REG_0, 0),
3519 .errstr = "helper access to the packet",
3520 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3523 "helper access to packet: test13, cls helper ok",
3525 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3526 offsetof(struct __sk_buff, data)),
3527 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3528 offsetof(struct __sk_buff, data_end)),
3529 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3530 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3531 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3532 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3533 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3534 BPF_MOV64_IMM(BPF_REG_2, 4),
3535 BPF_MOV64_IMM(BPF_REG_3, 0),
3536 BPF_MOV64_IMM(BPF_REG_4, 0),
3537 BPF_MOV64_IMM(BPF_REG_5, 0),
3538 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3539 BPF_FUNC_csum_diff),
3540 BPF_MOV64_IMM(BPF_REG_0, 0),
3544 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3547 "helper access to packet: test14, cls helper ok sub",
3549 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3550 offsetof(struct __sk_buff, data)),
3551 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3552 offsetof(struct __sk_buff, data_end)),
3553 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3554 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3555 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3556 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3557 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
3558 BPF_MOV64_IMM(BPF_REG_2, 4),
3559 BPF_MOV64_IMM(BPF_REG_3, 0),
3560 BPF_MOV64_IMM(BPF_REG_4, 0),
3561 BPF_MOV64_IMM(BPF_REG_5, 0),
3562 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3563 BPF_FUNC_csum_diff),
3564 BPF_MOV64_IMM(BPF_REG_0, 0),
3568 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3571 "helper access to packet: test15, cls helper fail sub",
3573 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3574 offsetof(struct __sk_buff, data)),
3575 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3576 offsetof(struct __sk_buff, data_end)),
3577 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3578 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3579 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3580 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3581 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
3582 BPF_MOV64_IMM(BPF_REG_2, 4),
3583 BPF_MOV64_IMM(BPF_REG_3, 0),
3584 BPF_MOV64_IMM(BPF_REG_4, 0),
3585 BPF_MOV64_IMM(BPF_REG_5, 0),
3586 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3587 BPF_FUNC_csum_diff),
3588 BPF_MOV64_IMM(BPF_REG_0, 0),
3592 .errstr = "invalid access to packet",
3593 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3596 "helper access to packet: test16, cls helper fail range 1",
3598 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3599 offsetof(struct __sk_buff, data)),
3600 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3601 offsetof(struct __sk_buff, data_end)),
3602 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3603 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3604 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3605 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3606 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3607 BPF_MOV64_IMM(BPF_REG_2, 8),
3608 BPF_MOV64_IMM(BPF_REG_3, 0),
3609 BPF_MOV64_IMM(BPF_REG_4, 0),
3610 BPF_MOV64_IMM(BPF_REG_5, 0),
3611 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3612 BPF_FUNC_csum_diff),
3613 BPF_MOV64_IMM(BPF_REG_0, 0),
3617 .errstr = "invalid access to packet",
3618 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3621 "helper access to packet: test17, cls helper fail range 2",
3623 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3624 offsetof(struct __sk_buff, data)),
3625 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3626 offsetof(struct __sk_buff, data_end)),
3627 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3628 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3629 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3630 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3631 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3632 BPF_MOV64_IMM(BPF_REG_2, -9),
3633 BPF_MOV64_IMM(BPF_REG_3, 0),
3634 BPF_MOV64_IMM(BPF_REG_4, 0),
3635 BPF_MOV64_IMM(BPF_REG_5, 0),
3636 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3637 BPF_FUNC_csum_diff),
3638 BPF_MOV64_IMM(BPF_REG_0, 0),
3642 .errstr = "R2 min value is negative",
3643 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3646 "helper access to packet: test18, cls helper fail range 3",
3648 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3649 offsetof(struct __sk_buff, data)),
3650 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3651 offsetof(struct __sk_buff, data_end)),
3652 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3653 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3654 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3655 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3656 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3657 BPF_MOV64_IMM(BPF_REG_2, ~0),
3658 BPF_MOV64_IMM(BPF_REG_3, 0),
3659 BPF_MOV64_IMM(BPF_REG_4, 0),
3660 BPF_MOV64_IMM(BPF_REG_5, 0),
3661 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3662 BPF_FUNC_csum_diff),
3663 BPF_MOV64_IMM(BPF_REG_0, 0),
3667 .errstr = "R2 min value is negative",
3668 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3671 "helper access to packet: test19, cls helper fail range zero",
3673 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3674 offsetof(struct __sk_buff, data)),
3675 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3676 offsetof(struct __sk_buff, data_end)),
3677 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3678 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3679 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3680 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3681 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3682 BPF_MOV64_IMM(BPF_REG_2, 0),
3683 BPF_MOV64_IMM(BPF_REG_3, 0),
3684 BPF_MOV64_IMM(BPF_REG_4, 0),
3685 BPF_MOV64_IMM(BPF_REG_5, 0),
3686 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3687 BPF_FUNC_csum_diff),
3688 BPF_MOV64_IMM(BPF_REG_0, 0),
3692 .errstr = "invalid access to packet",
3693 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3696 "helper access to packet: test20, pkt end as input",
3698 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3699 offsetof(struct __sk_buff, data)),
3700 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3701 offsetof(struct __sk_buff, data_end)),
3702 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3703 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3704 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3705 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3706 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
3707 BPF_MOV64_IMM(BPF_REG_2, 4),
3708 BPF_MOV64_IMM(BPF_REG_3, 0),
3709 BPF_MOV64_IMM(BPF_REG_4, 0),
3710 BPF_MOV64_IMM(BPF_REG_5, 0),
3711 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3712 BPF_FUNC_csum_diff),
3713 BPF_MOV64_IMM(BPF_REG_0, 0),
3717 .errstr = "R1 type=pkt_end expected=fp",
3718 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3721 "helper access to packet: test21, wrong reg",
3723 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3724 offsetof(struct __sk_buff, data)),
3725 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3726 offsetof(struct __sk_buff, data_end)),
3727 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3728 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3729 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3730 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3731 BPF_MOV64_IMM(BPF_REG_2, 4),
3732 BPF_MOV64_IMM(BPF_REG_3, 0),
3733 BPF_MOV64_IMM(BPF_REG_4, 0),
3734 BPF_MOV64_IMM(BPF_REG_5, 0),
3735 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3736 BPF_FUNC_csum_diff),
3737 BPF_MOV64_IMM(BPF_REG_0, 0),
3741 .errstr = "invalid access to packet",
3742 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3745 "valid map access into an array with a constant",
3747 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3748 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3749 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3750 BPF_LD_MAP_FD(BPF_REG_1, 0),
3751 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3752 BPF_FUNC_map_lookup_elem),
3753 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3754 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3755 offsetof(struct test_val, foo)),
3758 .fixup_map2 = { 3 },
3759 .errstr_unpriv = "R0 leaks addr",
3760 .result_unpriv = REJECT,
3764 "valid map access into an array with a register",
3766 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3767 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3768 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3769 BPF_LD_MAP_FD(BPF_REG_1, 0),
3770 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3771 BPF_FUNC_map_lookup_elem),
3772 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3773 BPF_MOV64_IMM(BPF_REG_1, 4),
3774 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3775 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3776 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3777 offsetof(struct test_val, foo)),
3780 .fixup_map2 = { 3 },
3781 .errstr_unpriv = "R0 leaks addr",
3782 .result_unpriv = REJECT,
3784 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3787 "valid map access into an array with a variable",
3789 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3790 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3791 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3792 BPF_LD_MAP_FD(BPF_REG_1, 0),
3793 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3794 BPF_FUNC_map_lookup_elem),
3795 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3796 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3797 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
3798 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3799 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3800 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3801 offsetof(struct test_val, foo)),
3804 .fixup_map2 = { 3 },
3805 .errstr_unpriv = "R0 leaks addr",
3806 .result_unpriv = REJECT,
3808 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3811 "valid map access into an array with a signed variable",
3813 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3814 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3816 BPF_LD_MAP_FD(BPF_REG_1, 0),
3817 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3818 BPF_FUNC_map_lookup_elem),
3819 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
3820 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3821 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
3822 BPF_MOV32_IMM(BPF_REG_1, 0),
3823 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3824 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3825 BPF_MOV32_IMM(BPF_REG_1, 0),
3826 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3827 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3828 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3829 offsetof(struct test_val, foo)),
3832 .fixup_map2 = { 3 },
3833 .errstr_unpriv = "R0 leaks addr",
3834 .result_unpriv = REJECT,
3836 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3839 "invalid map access into an array with a constant",
3841 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3842 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3843 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3844 BPF_LD_MAP_FD(BPF_REG_1, 0),
3845 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3846 BPF_FUNC_map_lookup_elem),
3847 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3848 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
3849 offsetof(struct test_val, foo)),
3852 .fixup_map2 = { 3 },
3853 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
3857 "invalid map access into an array with a register",
3859 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3860 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3861 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3862 BPF_LD_MAP_FD(BPF_REG_1, 0),
3863 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3864 BPF_FUNC_map_lookup_elem),
3865 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3866 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
3867 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3868 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3869 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3870 offsetof(struct test_val, foo)),
3873 .fixup_map2 = { 3 },
3874 .errstr = "R0 min value is outside of the array range",
3876 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3879 "invalid map access into an array with a variable",
3881 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3882 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3883 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3884 BPF_LD_MAP_FD(BPF_REG_1, 0),
3885 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3886 BPF_FUNC_map_lookup_elem),
3887 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3888 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3889 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3890 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3891 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3892 offsetof(struct test_val, foo)),
3895 .fixup_map2 = { 3 },
3896 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
3898 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3901 "invalid map access into an array with no floor check",
3903 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3904 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3905 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3906 BPF_LD_MAP_FD(BPF_REG_1, 0),
3907 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3908 BPF_FUNC_map_lookup_elem),
3909 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3910 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
3911 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3912 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3913 BPF_MOV32_IMM(BPF_REG_1, 0),
3914 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3915 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3916 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3917 offsetof(struct test_val, foo)),
3920 .fixup_map2 = { 3 },
3921 .errstr_unpriv = "R0 leaks addr",
3922 .errstr = "R0 unbounded memory access",
3923 .result_unpriv = REJECT,
3925 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3928 "invalid map access into an array with a invalid max check",
3930 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3931 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3932 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3933 BPF_LD_MAP_FD(BPF_REG_1, 0),
3934 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3935 BPF_FUNC_map_lookup_elem),
3936 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3937 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3938 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
3939 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3940 BPF_MOV32_IMM(BPF_REG_1, 0),
3941 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3942 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3943 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3944 offsetof(struct test_val, foo)),
3947 .fixup_map2 = { 3 },
3948 .errstr_unpriv = "R0 leaks addr",
3949 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
3950 .result_unpriv = REJECT,
3952 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3955 "invalid map access into an array with a invalid max check",
3957 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3958 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3959 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3960 BPF_LD_MAP_FD(BPF_REG_1, 0),
3961 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3962 BPF_FUNC_map_lookup_elem),
3963 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
3964 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
3965 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3966 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3967 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3968 BPF_LD_MAP_FD(BPF_REG_1, 0),
3969 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3970 BPF_FUNC_map_lookup_elem),
3971 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
3972 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
3973 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3974 offsetof(struct test_val, foo)),
3977 .fixup_map2 = { 3, 11 },
3978 .errstr_unpriv = "R0 pointer += pointer",
3979 .errstr = "R0 invalid mem access 'inv'",
3980 .result_unpriv = REJECT,
3982 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3985 "multiple registers share map_lookup_elem result",
3987 BPF_MOV64_IMM(BPF_REG_1, 10),
3988 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3989 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3990 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3991 BPF_LD_MAP_FD(BPF_REG_1, 0),
3992 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3993 BPF_FUNC_map_lookup_elem),
3994 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3995 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3996 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3999 .fixup_map1 = { 4 },
4001 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4004 "alu ops on ptr_to_map_value_or_null, 1",
4006 BPF_MOV64_IMM(BPF_REG_1, 10),
4007 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4008 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4009 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4010 BPF_LD_MAP_FD(BPF_REG_1, 0),
4011 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4012 BPF_FUNC_map_lookup_elem),
4013 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4014 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
4015 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
4016 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4017 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4020 .fixup_map1 = { 4 },
4021 .errstr = "R4 invalid mem access",
4023 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4026 "alu ops on ptr_to_map_value_or_null, 2",
4028 BPF_MOV64_IMM(BPF_REG_1, 10),
4029 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4030 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4031 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4032 BPF_LD_MAP_FD(BPF_REG_1, 0),
4033 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4034 BPF_FUNC_map_lookup_elem),
4035 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4036 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
4037 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4038 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4041 .fixup_map1 = { 4 },
4042 .errstr = "R4 invalid mem access",
4044 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4047 "alu ops on ptr_to_map_value_or_null, 3",
4049 BPF_MOV64_IMM(BPF_REG_1, 10),
4050 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4051 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4052 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4053 BPF_LD_MAP_FD(BPF_REG_1, 0),
4054 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4055 BPF_FUNC_map_lookup_elem),
4056 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4057 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
4058 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4059 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4062 .fixup_map1 = { 4 },
4063 .errstr = "R4 invalid mem access",
4065 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4068 "invalid memory access with multiple map_lookup_elem calls",
4070 BPF_MOV64_IMM(BPF_REG_1, 10),
4071 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4072 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4073 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4074 BPF_LD_MAP_FD(BPF_REG_1, 0),
4075 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4076 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4077 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4078 BPF_FUNC_map_lookup_elem),
4079 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4080 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4081 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4082 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4083 BPF_FUNC_map_lookup_elem),
4084 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4085 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4088 .fixup_map1 = { 4 },
4090 .errstr = "R4 !read_ok",
4091 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4094 "valid indirect map_lookup_elem access with 2nd lookup in branch",
4096 BPF_MOV64_IMM(BPF_REG_1, 10),
4097 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4098 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4099 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4100 BPF_LD_MAP_FD(BPF_REG_1, 0),
4101 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4102 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4103 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4104 BPF_FUNC_map_lookup_elem),
4105 BPF_MOV64_IMM(BPF_REG_2, 10),
4106 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
4107 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4108 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4109 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4110 BPF_FUNC_map_lookup_elem),
4111 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4112 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4113 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4116 .fixup_map1 = { 4 },
4118 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4121 "invalid map access from else condition",
4123 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4124 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4125 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4126 BPF_LD_MAP_FD(BPF_REG_1, 0),
4127 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
4128 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4129 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4130 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
4131 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
4132 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4133 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4134 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
4137 .fixup_map2 = { 3 },
4138 .errstr = "R0 unbounded memory access",
4140 .errstr_unpriv = "R0 leaks addr",
4141 .result_unpriv = REJECT,
4142 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4145 "constant register |= constant should keep constant type",
4147 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4148 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4149 BPF_MOV64_IMM(BPF_REG_2, 34),
4150 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
4151 BPF_MOV64_IMM(BPF_REG_3, 0),
4152 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4156 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4159 "constant register |= constant should not bypass stack boundary checks",
4161 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4162 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4163 BPF_MOV64_IMM(BPF_REG_2, 34),
4164 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
4165 BPF_MOV64_IMM(BPF_REG_3, 0),
4166 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4169 .errstr = "invalid stack type R1 off=-48 access_size=58",
4171 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4174 "constant register |= constant register should keep constant type",
4176 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4177 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4178 BPF_MOV64_IMM(BPF_REG_2, 34),
4179 BPF_MOV64_IMM(BPF_REG_4, 13),
4180 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
4181 BPF_MOV64_IMM(BPF_REG_3, 0),
4182 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4186 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4189 "constant register |= constant register should not bypass stack boundary checks",
4191 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4192 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4193 BPF_MOV64_IMM(BPF_REG_2, 34),
4194 BPF_MOV64_IMM(BPF_REG_4, 24),
4195 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
4196 BPF_MOV64_IMM(BPF_REG_3, 0),
4197 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4200 .errstr = "invalid stack type R1 off=-48 access_size=58",
4202 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4205 "invalid direct packet write for LWT_IN",
4207 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4208 offsetof(struct __sk_buff, data)),
4209 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4210 offsetof(struct __sk_buff, data_end)),
4211 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4212 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4213 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4214 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4215 BPF_MOV64_IMM(BPF_REG_0, 0),
4218 .errstr = "cannot write into packet",
4220 .prog_type = BPF_PROG_TYPE_LWT_IN,
4223 "invalid direct packet write for LWT_OUT",
4225 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4226 offsetof(struct __sk_buff, data)),
4227 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4228 offsetof(struct __sk_buff, data_end)),
4229 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4230 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4231 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4232 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4233 BPF_MOV64_IMM(BPF_REG_0, 0),
4236 .errstr = "cannot write into packet",
4238 .prog_type = BPF_PROG_TYPE_LWT_OUT,
4241 "direct packet write for LWT_XMIT",
4243 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4244 offsetof(struct __sk_buff, data)),
4245 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4246 offsetof(struct __sk_buff, data_end)),
4247 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4248 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4249 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4250 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4251 BPF_MOV64_IMM(BPF_REG_0, 0),
4255 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4258 "direct packet read for LWT_IN",
4260 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4261 offsetof(struct __sk_buff, data)),
4262 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4263 offsetof(struct __sk_buff, data_end)),
4264 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4265 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4266 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4267 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4268 BPF_MOV64_IMM(BPF_REG_0, 0),
4272 .prog_type = BPF_PROG_TYPE_LWT_IN,
4275 "direct packet read for LWT_OUT",
4277 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4278 offsetof(struct __sk_buff, data)),
4279 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4280 offsetof(struct __sk_buff, data_end)),
4281 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4282 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4283 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4284 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4285 BPF_MOV64_IMM(BPF_REG_0, 0),
4289 .prog_type = BPF_PROG_TYPE_LWT_OUT,
4292 "direct packet read for LWT_XMIT",
4294 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4295 offsetof(struct __sk_buff, data)),
4296 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4297 offsetof(struct __sk_buff, data_end)),
4298 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4299 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4300 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4301 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4302 BPF_MOV64_IMM(BPF_REG_0, 0),
4306 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4309 "overlapping checks for direct packet access",
4311 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4312 offsetof(struct __sk_buff, data)),
4313 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4314 offsetof(struct __sk_buff, data_end)),
4315 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4316 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4317 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
4318 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4319 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
4320 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
4321 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
4322 BPF_MOV64_IMM(BPF_REG_0, 0),
4326 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4329 "invalid access of tc_classid for LWT_IN",
4331 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4332 offsetof(struct __sk_buff, tc_classid)),
4336 .errstr = "invalid bpf_context access",
4339 "invalid access of tc_classid for LWT_OUT",
4341 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4342 offsetof(struct __sk_buff, tc_classid)),
4346 .errstr = "invalid bpf_context access",
4349 "invalid access of tc_classid for LWT_XMIT",
4351 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4352 offsetof(struct __sk_buff, tc_classid)),
4356 .errstr = "invalid bpf_context access",
4359 "leak pointer into ctx 1",
4361 BPF_MOV64_IMM(BPF_REG_0, 0),
4362 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4363 offsetof(struct __sk_buff, cb[0])),
4364 BPF_LD_MAP_FD(BPF_REG_2, 0),
4365 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
4366 offsetof(struct __sk_buff, cb[0])),
4369 .fixup_map1 = { 2 },
4370 .errstr_unpriv = "R2 leaks addr into mem",
4371 .result_unpriv = REJECT,
4373 .errstr = "BPF_XADD stores into R1 context is not allowed",
4376 "leak pointer into ctx 2",
4378 BPF_MOV64_IMM(BPF_REG_0, 0),
4379 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4380 offsetof(struct __sk_buff, cb[0])),
4381 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
4382 offsetof(struct __sk_buff, cb[0])),
4385 .errstr_unpriv = "R10 leaks addr into mem",
4386 .result_unpriv = REJECT,
4388 .errstr = "BPF_XADD stores into R1 context is not allowed",
4391 "leak pointer into ctx 3",
4393 BPF_MOV64_IMM(BPF_REG_0, 0),
4394 BPF_LD_MAP_FD(BPF_REG_2, 0),
4395 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
4396 offsetof(struct __sk_buff, cb[0])),
4399 .fixup_map1 = { 1 },
4400 .errstr_unpriv = "R2 leaks addr into ctx",
4401 .result_unpriv = REJECT,
4405 "leak pointer into map val",
4407 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
4408 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4409 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4410 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4411 BPF_LD_MAP_FD(BPF_REG_1, 0),
4412 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4413 BPF_FUNC_map_lookup_elem),
4414 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
4415 BPF_MOV64_IMM(BPF_REG_3, 0),
4416 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
4417 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
4418 BPF_MOV64_IMM(BPF_REG_0, 0),
4421 .fixup_map1 = { 4 },
4422 .errstr_unpriv = "R6 leaks addr into mem",
4423 .result_unpriv = REJECT,
4427 "helper access to map: full range",
4429 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4430 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4431 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4432 BPF_LD_MAP_FD(BPF_REG_1, 0),
4433 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4434 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4435 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4436 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4437 BPF_MOV64_IMM(BPF_REG_3, 0),
4438 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4441 .fixup_map2 = { 3 },
4443 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4446 "helper access to map: partial range",
4448 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4449 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4450 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4451 BPF_LD_MAP_FD(BPF_REG_1, 0),
4452 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4453 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4454 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4455 BPF_MOV64_IMM(BPF_REG_2, 8),
4456 BPF_MOV64_IMM(BPF_REG_3, 0),
4457 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4460 .fixup_map2 = { 3 },
4462 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4465 "helper access to map: empty range",
4467 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4468 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4469 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4470 BPF_LD_MAP_FD(BPF_REG_1, 0),
4471 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4472 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4473 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4474 BPF_MOV64_IMM(BPF_REG_2, 0),
4475 BPF_MOV64_IMM(BPF_REG_3, 0),
4476 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4479 .fixup_map2 = { 3 },
4480 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
4482 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4485 "helper access to map: out-of-bound range",
4487 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4488 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4489 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4490 BPF_LD_MAP_FD(BPF_REG_1, 0),
4491 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4492 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4493 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4494 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
4495 BPF_MOV64_IMM(BPF_REG_3, 0),
4496 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4499 .fixup_map2 = { 3 },
4500 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
4502 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4505 "helper access to map: negative range",
4507 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4508 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4509 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4510 BPF_LD_MAP_FD(BPF_REG_1, 0),
4511 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4512 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4513 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4514 BPF_MOV64_IMM(BPF_REG_2, -8),
4515 BPF_MOV64_IMM(BPF_REG_3, 0),
4516 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4519 .fixup_map2 = { 3 },
4520 .errstr = "R2 min value is negative",
4522 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4525 "helper access to adjusted map (via const imm): full range",
4527 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4528 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4529 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4530 BPF_LD_MAP_FD(BPF_REG_1, 0),
4531 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4532 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4533 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4534 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4535 offsetof(struct test_val, foo)),
4536 BPF_MOV64_IMM(BPF_REG_2,
4537 sizeof(struct test_val) -
4538 offsetof(struct test_val, foo)),
4539 BPF_MOV64_IMM(BPF_REG_3, 0),
4540 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4543 .fixup_map2 = { 3 },
4545 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4548 "helper access to adjusted map (via const imm): partial range",
4550 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4551 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4552 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4553 BPF_LD_MAP_FD(BPF_REG_1, 0),
4554 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4555 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4556 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4557 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4558 offsetof(struct test_val, foo)),
4559 BPF_MOV64_IMM(BPF_REG_2, 8),
4560 BPF_MOV64_IMM(BPF_REG_3, 0),
4561 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4564 .fixup_map2 = { 3 },
4566 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4569 "helper access to adjusted map (via const imm): empty range",
4571 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4572 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4573 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4574 BPF_LD_MAP_FD(BPF_REG_1, 0),
4575 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4576 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4577 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4578 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4579 offsetof(struct test_val, foo)),
4580 BPF_MOV64_IMM(BPF_REG_2, 0),
4581 BPF_MOV64_IMM(BPF_REG_3, 0),
4582 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4585 .fixup_map2 = { 3 },
4586 .errstr = "invalid access to map value, value_size=48 off=4 size=0",
4588 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4591 "helper access to adjusted map (via const imm): out-of-bound range",
4593 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4594 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4595 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4596 BPF_LD_MAP_FD(BPF_REG_1, 0),
4597 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4598 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4599 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4600 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4601 offsetof(struct test_val, foo)),
4602 BPF_MOV64_IMM(BPF_REG_2,
4603 sizeof(struct test_val) -
4604 offsetof(struct test_val, foo) + 8),
4605 BPF_MOV64_IMM(BPF_REG_3, 0),
4606 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4609 .fixup_map2 = { 3 },
4610 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
4612 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4615 "helper access to adjusted map (via const imm): negative range (> adjustment)",
4617 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4618 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4619 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4620 BPF_LD_MAP_FD(BPF_REG_1, 0),
4621 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4622 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4623 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4624 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4625 offsetof(struct test_val, foo)),
4626 BPF_MOV64_IMM(BPF_REG_2, -8),
4627 BPF_MOV64_IMM(BPF_REG_3, 0),
4628 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4631 .fixup_map2 = { 3 },
4632 .errstr = "R2 min value is negative",
4634 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4637 "helper access to adjusted map (via const imm): negative range (< adjustment)",
4639 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4640 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4641 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4642 BPF_LD_MAP_FD(BPF_REG_1, 0),
4643 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4644 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4645 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4646 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4647 offsetof(struct test_val, foo)),
4648 BPF_MOV64_IMM(BPF_REG_2, -1),
4649 BPF_MOV64_IMM(BPF_REG_3, 0),
4650 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4653 .fixup_map2 = { 3 },
4654 .errstr = "R2 min value is negative",
4656 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4659 "helper access to adjusted map (via const reg): full range",
4661 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4662 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4663 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4664 BPF_LD_MAP_FD(BPF_REG_1, 0),
4665 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4666 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4667 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4668 BPF_MOV64_IMM(BPF_REG_3,
4669 offsetof(struct test_val, foo)),
4670 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4671 BPF_MOV64_IMM(BPF_REG_2,
4672 sizeof(struct test_val) -
4673 offsetof(struct test_val, foo)),
4674 BPF_MOV64_IMM(BPF_REG_3, 0),
4675 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4678 .fixup_map2 = { 3 },
4680 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4683 "helper access to adjusted map (via const reg): partial range",
4685 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4686 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4687 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4688 BPF_LD_MAP_FD(BPF_REG_1, 0),
4689 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4690 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4691 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4692 BPF_MOV64_IMM(BPF_REG_3,
4693 offsetof(struct test_val, foo)),
4694 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4695 BPF_MOV64_IMM(BPF_REG_2, 8),
4696 BPF_MOV64_IMM(BPF_REG_3, 0),
4697 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4700 .fixup_map2 = { 3 },
4702 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4705 "helper access to adjusted map (via const reg): empty range",
4707 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4708 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4709 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4710 BPF_LD_MAP_FD(BPF_REG_1, 0),
4711 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4712 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4713 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4714 BPF_MOV64_IMM(BPF_REG_3, 0),
4715 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4716 BPF_MOV64_IMM(BPF_REG_2, 0),
4717 BPF_MOV64_IMM(BPF_REG_3, 0),
4718 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4721 .fixup_map2 = { 3 },
4722 .errstr = "R1 min value is outside of the array range",
4724 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4727 "helper access to adjusted map (via const reg): out-of-bound range",
4729 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4730 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4731 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4732 BPF_LD_MAP_FD(BPF_REG_1, 0),
4733 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4734 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4735 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4736 BPF_MOV64_IMM(BPF_REG_3,
4737 offsetof(struct test_val, foo)),
4738 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4739 BPF_MOV64_IMM(BPF_REG_2,
4740 sizeof(struct test_val) -
4741 offsetof(struct test_val, foo) + 8),
4742 BPF_MOV64_IMM(BPF_REG_3, 0),
4743 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4746 .fixup_map2 = { 3 },
4747 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
4749 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4752 "helper access to adjusted map (via const reg): negative range (> adjustment)",
4754 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4755 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4756 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4757 BPF_LD_MAP_FD(BPF_REG_1, 0),
4758 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4759 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4760 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4761 BPF_MOV64_IMM(BPF_REG_3,
4762 offsetof(struct test_val, foo)),
4763 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4764 BPF_MOV64_IMM(BPF_REG_2, -8),
4765 BPF_MOV64_IMM(BPF_REG_3, 0),
4766 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4769 .fixup_map2 = { 3 },
4770 .errstr = "R2 min value is negative",
4772 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4775 "helper access to adjusted map (via const reg): negative range (< adjustment)",
4777 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4778 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4779 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4780 BPF_LD_MAP_FD(BPF_REG_1, 0),
4781 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4782 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4783 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4784 BPF_MOV64_IMM(BPF_REG_3,
4785 offsetof(struct test_val, foo)),
4786 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4787 BPF_MOV64_IMM(BPF_REG_2, -1),
4788 BPF_MOV64_IMM(BPF_REG_3, 0),
4789 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4792 .fixup_map2 = { 3 },
4793 .errstr = "R2 min value is negative",
4795 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4798 "helper access to adjusted map (via variable): full range",
4800 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4801 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4802 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4803 BPF_LD_MAP_FD(BPF_REG_1, 0),
4804 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4805 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4806 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4807 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4808 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4809 offsetof(struct test_val, foo), 4),
4810 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4811 BPF_MOV64_IMM(BPF_REG_2,
4812 sizeof(struct test_val) -
4813 offsetof(struct test_val, foo)),
4814 BPF_MOV64_IMM(BPF_REG_3, 0),
4815 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4818 .fixup_map2 = { 3 },
4820 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4823 "helper access to adjusted map (via variable): partial range",
4825 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4826 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4827 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4828 BPF_LD_MAP_FD(BPF_REG_1, 0),
4829 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4830 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4831 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4832 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4833 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4834 offsetof(struct test_val, foo), 4),
4835 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4836 BPF_MOV64_IMM(BPF_REG_2, 8),
4837 BPF_MOV64_IMM(BPF_REG_3, 0),
4838 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4841 .fixup_map2 = { 3 },
4843 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4846 "helper access to adjusted map (via variable): empty range",
4848 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4849 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4850 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4851 BPF_LD_MAP_FD(BPF_REG_1, 0),
4852 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4853 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4854 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4855 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4856 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4857 offsetof(struct test_val, foo), 4),
4858 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4859 BPF_MOV64_IMM(BPF_REG_2, 0),
4860 BPF_MOV64_IMM(BPF_REG_3, 0),
4861 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4864 .fixup_map2 = { 3 },
4865 .errstr = "R1 min value is outside of the array range",
4867 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4870 "helper access to adjusted map (via variable): no max check",
4872 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4873 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4874 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4875 BPF_LD_MAP_FD(BPF_REG_1, 0),
4876 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4877 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4878 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4879 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4880 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4881 BPF_MOV64_IMM(BPF_REG_2, 1),
4882 BPF_MOV64_IMM(BPF_REG_3, 0),
4883 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4886 .fixup_map2 = { 3 },
4887 .errstr = "R1 unbounded memory access",
4889 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4892 "helper access to adjusted map (via variable): wrong max check",
4894 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4895 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4896 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4897 BPF_LD_MAP_FD(BPF_REG_1, 0),
4898 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4899 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4900 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4901 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4902 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4903 offsetof(struct test_val, foo), 4),
4904 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4905 BPF_MOV64_IMM(BPF_REG_2,
4906 sizeof(struct test_val) -
4907 offsetof(struct test_val, foo) + 1),
4908 BPF_MOV64_IMM(BPF_REG_3, 0),
4909 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4912 .fixup_map2 = { 3 },
4913 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
4915 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4918 "helper access to map: bounds check using <, good access",
4920 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4921 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4922 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4923 BPF_LD_MAP_FD(BPF_REG_1, 0),
4924 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4925 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4926 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4927 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4928 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
4929 BPF_MOV64_IMM(BPF_REG_0, 0),
4931 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4932 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4933 BPF_MOV64_IMM(BPF_REG_0, 0),
4936 .fixup_map2 = { 3 },
4938 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4941 "helper access to map: bounds check using <, bad access",
4943 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4944 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4945 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4946 BPF_LD_MAP_FD(BPF_REG_1, 0),
4947 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4948 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4949 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4950 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4951 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
4952 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4953 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4954 BPF_MOV64_IMM(BPF_REG_0, 0),
4956 BPF_MOV64_IMM(BPF_REG_0, 0),
4959 .fixup_map2 = { 3 },
4961 .errstr = "R1 unbounded memory access",
4962 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4965 "helper access to map: bounds check using <=, good access",
4967 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4968 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4969 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4970 BPF_LD_MAP_FD(BPF_REG_1, 0),
4971 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4972 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4973 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4974 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4975 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
4976 BPF_MOV64_IMM(BPF_REG_0, 0),
4978 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4979 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4980 BPF_MOV64_IMM(BPF_REG_0, 0),
4983 .fixup_map2 = { 3 },
4985 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4988 "helper access to map: bounds check using <=, bad access",
4990 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4991 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4992 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4993 BPF_LD_MAP_FD(BPF_REG_1, 0),
4994 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4995 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4996 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4997 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4998 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
4999 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5000 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5001 BPF_MOV64_IMM(BPF_REG_0, 0),
5003 BPF_MOV64_IMM(BPF_REG_0, 0),
5006 .fixup_map2 = { 3 },
5008 .errstr = "R1 unbounded memory access",
5009 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5012 "helper access to map: bounds check using s<, good access",
5014 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5015 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5016 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5017 BPF_LD_MAP_FD(BPF_REG_1, 0),
5018 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5019 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5020 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5021 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5022 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5023 BPF_MOV64_IMM(BPF_REG_0, 0),
5025 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
5026 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5027 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5028 BPF_MOV64_IMM(BPF_REG_0, 0),
5031 .fixup_map2 = { 3 },
5033 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5036 "helper access to map: bounds check using s<, good access 2",
5038 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5039 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5040 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5041 BPF_LD_MAP_FD(BPF_REG_1, 0),
5042 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5043 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5044 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5045 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5046 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5047 BPF_MOV64_IMM(BPF_REG_0, 0),
5049 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
5050 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5051 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5052 BPF_MOV64_IMM(BPF_REG_0, 0),
5055 .fixup_map2 = { 3 },
5057 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5060 "helper access to map: bounds check using s<, bad access",
5062 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5063 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5064 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5065 BPF_LD_MAP_FD(BPF_REG_1, 0),
5066 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5067 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5068 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5069 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
5070 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5071 BPF_MOV64_IMM(BPF_REG_0, 0),
5073 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
5074 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5075 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5076 BPF_MOV64_IMM(BPF_REG_0, 0),
5079 .fixup_map2 = { 3 },
5081 .errstr = "R1 min value is negative",
5082 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5085 "helper access to map: bounds check using s<=, good access",
5087 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5088 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5089 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5090 BPF_LD_MAP_FD(BPF_REG_1, 0),
5091 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5092 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5093 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5094 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5095 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5096 BPF_MOV64_IMM(BPF_REG_0, 0),
5098 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
5099 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5100 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5101 BPF_MOV64_IMM(BPF_REG_0, 0),
5104 .fixup_map2 = { 3 },
5106 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5109 "helper access to map: bounds check using s<=, good access 2",
5111 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5112 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5113 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5114 BPF_LD_MAP_FD(BPF_REG_1, 0),
5115 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5116 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5117 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5118 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5119 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5120 BPF_MOV64_IMM(BPF_REG_0, 0),
5122 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5123 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5124 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5125 BPF_MOV64_IMM(BPF_REG_0, 0),
5128 .fixup_map2 = { 3 },
5130 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5133 "helper access to map: bounds check using s<=, bad access",
5135 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5136 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5137 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5138 BPF_LD_MAP_FD(BPF_REG_1, 0),
5139 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5140 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5141 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5142 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
5143 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5144 BPF_MOV64_IMM(BPF_REG_0, 0),
5146 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5147 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5148 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5149 BPF_MOV64_IMM(BPF_REG_0, 0),
5152 .fixup_map2 = { 3 },
5154 .errstr = "R1 min value is negative",
5155 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5158 "map element value is preserved across register spilling",
5160 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5161 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5162 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5163 BPF_LD_MAP_FD(BPF_REG_1, 0),
5164 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5165 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5166 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5167 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5168 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5169 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5170 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5171 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5174 .fixup_map2 = { 3 },
5175 .errstr_unpriv = "R0 leaks addr",
5177 .result_unpriv = REJECT,
5180 "map element value or null is marked on register spilling",
5182 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5183 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5184 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5185 BPF_LD_MAP_FD(BPF_REG_1, 0),
5186 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5187 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5188 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
5189 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5190 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5191 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5192 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5195 .fixup_map2 = { 3 },
5196 .errstr_unpriv = "R0 leaks addr",
5198 .result_unpriv = REJECT,
5201 "map element value store of cleared call register",
5203 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5204 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5205 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5206 BPF_LD_MAP_FD(BPF_REG_1, 0),
5207 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5208 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5209 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
5212 .fixup_map2 = { 3 },
5213 .errstr_unpriv = "R1 !read_ok",
5214 .errstr = "R1 !read_ok",
5216 .result_unpriv = REJECT,
5219 "map element value with unaligned store",
5221 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5222 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5223 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5224 BPF_LD_MAP_FD(BPF_REG_1, 0),
5225 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5226 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
5227 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5228 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5229 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
5230 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
5231 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5232 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
5233 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
5234 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
5235 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
5236 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
5237 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
5238 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
5239 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
5240 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
5241 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
5242 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
5243 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
5246 .fixup_map2 = { 3 },
5247 .errstr_unpriv = "R0 leaks addr",
5249 .result_unpriv = REJECT,
5250 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5253 "map element value with unaligned load",
5255 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5256 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5257 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5258 BPF_LD_MAP_FD(BPF_REG_1, 0),
5259 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5260 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5261 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5262 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
5263 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5264 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5265 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
5266 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5267 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
5268 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
5269 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
5270 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5271 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
5274 .fixup_map2 = { 3 },
5275 .errstr_unpriv = "R0 leaks addr",
5277 .result_unpriv = REJECT,
5278 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5281 "map element value illegal alu op, 1",
5283 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5284 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5285 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5286 BPF_LD_MAP_FD(BPF_REG_1, 0),
5287 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5288 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5289 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
5290 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5293 .fixup_map2 = { 3 },
5294 .errstr_unpriv = "R0 bitwise operator &= on pointer",
5295 .errstr = "invalid mem access 'inv'",
5297 .result_unpriv = REJECT,
5300 "map element value illegal alu op, 2",
5302 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5303 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5304 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5305 BPF_LD_MAP_FD(BPF_REG_1, 0),
5306 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5307 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5308 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
5309 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5312 .fixup_map2 = { 3 },
5313 .errstr_unpriv = "R0 32-bit pointer arithmetic prohibited",
5314 .errstr = "invalid mem access 'inv'",
5316 .result_unpriv = REJECT,
5319 "map element value illegal alu op, 3",
5321 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5322 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5323 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5324 BPF_LD_MAP_FD(BPF_REG_1, 0),
5325 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5326 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5327 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
5328 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5331 .fixup_map2 = { 3 },
5332 .errstr_unpriv = "R0 pointer arithmetic with /= operator",
5333 .errstr = "invalid mem access 'inv'",
5335 .result_unpriv = REJECT,
5338 "map element value illegal alu op, 4",
5340 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5341 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5342 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5343 BPF_LD_MAP_FD(BPF_REG_1, 0),
5344 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5345 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5346 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
5347 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5350 .fixup_map2 = { 3 },
5351 .errstr_unpriv = "R0 pointer arithmetic prohibited",
5352 .errstr = "invalid mem access 'inv'",
5354 .result_unpriv = REJECT,
5357 "map element value illegal alu op, 5",
5359 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5360 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5361 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5362 BPF_LD_MAP_FD(BPF_REG_1, 0),
5363 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5364 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5365 BPF_MOV64_IMM(BPF_REG_3, 4096),
5366 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5367 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5368 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5369 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
5370 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
5371 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5374 .fixup_map2 = { 3 },
5375 .errstr = "R0 invalid mem access 'inv'",
5379 "map element value is preserved across register spilling",
5381 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5382 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5383 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5384 BPF_LD_MAP_FD(BPF_REG_1, 0),
5385 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5386 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5387 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
5388 offsetof(struct test_val, foo)),
5389 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5390 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5391 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5392 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5393 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5394 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5397 .fixup_map2 = { 3 },
5398 .errstr_unpriv = "R0 leaks addr",
5400 .result_unpriv = REJECT,
5401 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5404 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
5406 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5407 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5408 BPF_MOV64_IMM(BPF_REG_0, 0),
5409 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5410 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5411 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5412 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5413 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5414 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5415 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5416 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5417 BPF_MOV64_IMM(BPF_REG_2, 16),
5418 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5419 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5420 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5421 BPF_MOV64_IMM(BPF_REG_4, 0),
5422 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5423 BPF_MOV64_IMM(BPF_REG_3, 0),
5424 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5425 BPF_MOV64_IMM(BPF_REG_0, 0),
5429 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5432 "helper access to variable memory: stack, bitwise AND, zero included",
5434 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5435 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5436 BPF_MOV64_IMM(BPF_REG_2, 16),
5437 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5438 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5439 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5440 BPF_MOV64_IMM(BPF_REG_3, 0),
5441 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5444 .errstr = "invalid stack type R1 off=-64 access_size=0",
5446 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5449 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
5451 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5452 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5453 BPF_MOV64_IMM(BPF_REG_2, 16),
5454 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5455 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5456 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
5457 BPF_MOV64_IMM(BPF_REG_4, 0),
5458 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5459 BPF_MOV64_IMM(BPF_REG_3, 0),
5460 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5461 BPF_MOV64_IMM(BPF_REG_0, 0),
5464 .errstr = "invalid stack type R1 off=-64 access_size=65",
5466 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5469 "helper access to variable memory: stack, JMP, correct bounds",
5471 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5472 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5473 BPF_MOV64_IMM(BPF_REG_0, 0),
5474 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5475 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5476 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5477 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5478 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5479 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5480 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5481 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5482 BPF_MOV64_IMM(BPF_REG_2, 16),
5483 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5484 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5485 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
5486 BPF_MOV64_IMM(BPF_REG_4, 0),
5487 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5488 BPF_MOV64_IMM(BPF_REG_3, 0),
5489 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5490 BPF_MOV64_IMM(BPF_REG_0, 0),
5494 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5497 "helper access to variable memory: stack, JMP (signed), correct bounds",
5499 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5500 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5501 BPF_MOV64_IMM(BPF_REG_0, 0),
5502 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5503 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5504 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5505 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5506 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5507 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5508 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5509 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5510 BPF_MOV64_IMM(BPF_REG_2, 16),
5511 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5512 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5513 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
5514 BPF_MOV64_IMM(BPF_REG_4, 0),
5515 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5516 BPF_MOV64_IMM(BPF_REG_3, 0),
5517 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5518 BPF_MOV64_IMM(BPF_REG_0, 0),
5522 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5525 "helper access to variable memory: stack, JMP, bounds + offset",
5527 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5528 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5529 BPF_MOV64_IMM(BPF_REG_2, 16),
5530 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5531 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5532 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
5533 BPF_MOV64_IMM(BPF_REG_4, 0),
5534 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
5535 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
5536 BPF_MOV64_IMM(BPF_REG_3, 0),
5537 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5538 BPF_MOV64_IMM(BPF_REG_0, 0),
5541 .errstr = "invalid stack type R1 off=-64 access_size=65",
5543 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5546 "helper access to variable memory: stack, JMP, wrong max",
5548 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5549 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5550 BPF_MOV64_IMM(BPF_REG_2, 16),
5551 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5552 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5553 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
5554 BPF_MOV64_IMM(BPF_REG_4, 0),
5555 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5556 BPF_MOV64_IMM(BPF_REG_3, 0),
5557 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5558 BPF_MOV64_IMM(BPF_REG_0, 0),
5561 .errstr = "invalid stack type R1 off=-64 access_size=65",
5563 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5566 "helper access to variable memory: stack, JMP, no max check",
5568 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5569 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5570 BPF_MOV64_IMM(BPF_REG_2, 16),
5571 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5572 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5573 BPF_MOV64_IMM(BPF_REG_4, 0),
5574 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5575 BPF_MOV64_IMM(BPF_REG_3, 0),
5576 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5577 BPF_MOV64_IMM(BPF_REG_0, 0),
5580 /* because max wasn't checked, signed min is negative */
5581 .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
5583 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5586 "helper access to variable memory: stack, JMP, no min check",
5588 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5589 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5590 BPF_MOV64_IMM(BPF_REG_2, 16),
5591 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5592 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5593 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
5594 BPF_MOV64_IMM(BPF_REG_3, 0),
5595 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5596 BPF_MOV64_IMM(BPF_REG_0, 0),
5599 .errstr = "invalid stack type R1 off=-64 access_size=0",
5601 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5604 "helper access to variable memory: stack, JMP (signed), no min check",
5606 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5607 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5608 BPF_MOV64_IMM(BPF_REG_2, 16),
5609 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5610 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5611 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
5612 BPF_MOV64_IMM(BPF_REG_3, 0),
5613 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5614 BPF_MOV64_IMM(BPF_REG_0, 0),
5617 .errstr = "R2 min value is negative",
5619 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5622 "helper access to variable memory: map, JMP, correct bounds",
5624 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5625 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5626 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5627 BPF_LD_MAP_FD(BPF_REG_1, 0),
5628 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5629 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5630 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5631 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5632 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5633 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5634 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5635 sizeof(struct test_val), 4),
5636 BPF_MOV64_IMM(BPF_REG_4, 0),
5637 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5638 BPF_MOV64_IMM(BPF_REG_3, 0),
5639 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5640 BPF_MOV64_IMM(BPF_REG_0, 0),
5643 .fixup_map2 = { 3 },
5645 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5648 "helper access to variable memory: map, JMP, wrong max",
5650 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5651 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5652 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5653 BPF_LD_MAP_FD(BPF_REG_1, 0),
5654 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5655 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5656 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5657 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5658 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5659 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5660 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5661 sizeof(struct test_val) + 1, 4),
5662 BPF_MOV64_IMM(BPF_REG_4, 0),
5663 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5664 BPF_MOV64_IMM(BPF_REG_3, 0),
5665 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5666 BPF_MOV64_IMM(BPF_REG_0, 0),
5669 .fixup_map2 = { 3 },
5670 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
5672 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5675 "helper access to variable memory: map adjusted, JMP, correct bounds",
5677 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5678 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5679 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5680 BPF_LD_MAP_FD(BPF_REG_1, 0),
5681 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5682 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5683 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5684 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
5685 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5686 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5687 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5688 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5689 sizeof(struct test_val) - 20, 4),
5690 BPF_MOV64_IMM(BPF_REG_4, 0),
5691 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5692 BPF_MOV64_IMM(BPF_REG_3, 0),
5693 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5694 BPF_MOV64_IMM(BPF_REG_0, 0),
5697 .fixup_map2 = { 3 },
5699 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5702 "helper access to variable memory: map adjusted, JMP, wrong max",
5704 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5705 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5706 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5707 BPF_LD_MAP_FD(BPF_REG_1, 0),
5708 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5709 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5710 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5711 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
5712 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5713 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5714 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5715 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5716 sizeof(struct test_val) - 19, 4),
5717 BPF_MOV64_IMM(BPF_REG_4, 0),
5718 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5719 BPF_MOV64_IMM(BPF_REG_3, 0),
5720 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5721 BPF_MOV64_IMM(BPF_REG_0, 0),
5724 .fixup_map2 = { 3 },
5725 .errstr = "R1 min value is outside of the array range",
5727 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5730 "helper access to variable memory: size = 0 allowed on NULL",
5732 BPF_MOV64_IMM(BPF_REG_1, 0),
5733 BPF_MOV64_IMM(BPF_REG_2, 0),
5734 BPF_MOV64_IMM(BPF_REG_3, 0),
5735 BPF_MOV64_IMM(BPF_REG_4, 0),
5736 BPF_MOV64_IMM(BPF_REG_5, 0),
5737 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5741 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5744 "helper access to variable memory: size > 0 not allowed on NULL",
5746 BPF_MOV64_IMM(BPF_REG_1, 0),
5747 BPF_MOV64_IMM(BPF_REG_2, 0),
5748 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5749 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5750 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5751 BPF_MOV64_IMM(BPF_REG_3, 0),
5752 BPF_MOV64_IMM(BPF_REG_4, 0),
5753 BPF_MOV64_IMM(BPF_REG_5, 0),
5754 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5757 .errstr = "R1 type=inv expected=fp",
5759 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5762 "helper access to variable memory: size = 0 not allowed on != NULL",
5764 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5765 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
5766 BPF_MOV64_IMM(BPF_REG_2, 0),
5767 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
5768 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
5769 BPF_MOV64_IMM(BPF_REG_3, 0),
5770 BPF_MOV64_IMM(BPF_REG_4, 0),
5771 BPF_MOV64_IMM(BPF_REG_5, 0),
5772 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5775 .errstr = "invalid stack type R1 off=-8 access_size=0",
5777 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5780 "helper access to variable memory: 8 bytes leak",
5782 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5783 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5784 BPF_MOV64_IMM(BPF_REG_0, 0),
5785 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5786 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5787 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5788 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5789 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5790 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5791 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5792 BPF_MOV64_IMM(BPF_REG_2, 0),
5793 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5794 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5795 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
5796 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
5797 BPF_MOV64_IMM(BPF_REG_3, 0),
5798 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5799 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5802 .errstr = "invalid indirect read from stack off -64+32 size 64",
5804 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5807 "helper access to variable memory: 8 bytes no leak (init memory)",
5809 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5810 BPF_MOV64_IMM(BPF_REG_0, 0),
5811 BPF_MOV64_IMM(BPF_REG_0, 0),
5812 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5813 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5814 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5815 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5816 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5817 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5818 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5819 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5820 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5821 BPF_MOV64_IMM(BPF_REG_2, 0),
5822 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
5823 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
5824 BPF_MOV64_IMM(BPF_REG_3, 0),
5825 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5826 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5830 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5833 "invalid and of negative number",
5835 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5836 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5837 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5838 BPF_LD_MAP_FD(BPF_REG_1, 0),
5839 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5840 BPF_FUNC_map_lookup_elem),
5841 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5842 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
5843 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
5844 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5845 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5846 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5847 offsetof(struct test_val, foo)),
5850 .fixup_map2 = { 3 },
5851 .errstr = "R0 max value is outside of the array range",
5853 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5856 "invalid range check",
5858 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5859 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5860 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5861 BPF_LD_MAP_FD(BPF_REG_1, 0),
5862 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5863 BPF_FUNC_map_lookup_elem),
5864 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
5865 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5866 BPF_MOV64_IMM(BPF_REG_9, 1),
5867 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
5868 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
5869 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
5870 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
5871 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
5872 BPF_MOV32_IMM(BPF_REG_3, 1),
5873 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
5874 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
5875 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
5876 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
5877 BPF_MOV64_REG(BPF_REG_0, 0),
5880 .fixup_map2 = { 3 },
5881 .errstr = "R0 max value is outside of the array range",
5883 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5886 "map in map access",
5888 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5889 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5890 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5891 BPF_LD_MAP_FD(BPF_REG_1, 0),
5892 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5893 BPF_FUNC_map_lookup_elem),
5894 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5895 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5896 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5897 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5898 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5899 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5900 BPF_FUNC_map_lookup_elem),
5901 BPF_MOV64_IMM(BPF_REG_0, 0),
5904 .fixup_map_in_map = { 3 },
5908 "invalid inner map pointer",
5910 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5911 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5912 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5913 BPF_LD_MAP_FD(BPF_REG_1, 0),
5914 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5915 BPF_FUNC_map_lookup_elem),
5916 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5917 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5918 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5919 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5920 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5921 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
5922 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5923 BPF_FUNC_map_lookup_elem),
5924 BPF_MOV64_IMM(BPF_REG_0, 0),
5927 .fixup_map_in_map = { 3 },
5928 .errstr = "R1 type=inv expected=map_ptr",
5929 .errstr_unpriv = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
5933 "forgot null checking on the inner map pointer",
5935 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5936 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5937 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5938 BPF_LD_MAP_FD(BPF_REG_1, 0),
5939 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5940 BPF_FUNC_map_lookup_elem),
5941 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5942 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5943 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5944 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5945 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5946 BPF_FUNC_map_lookup_elem),
5947 BPF_MOV64_IMM(BPF_REG_0, 0),
5950 .fixup_map_in_map = { 3 },
5951 .errstr = "R1 type=map_value_or_null expected=map_ptr",
5955 "ld_abs: check calling conv, r1",
5957 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5958 BPF_MOV64_IMM(BPF_REG_1, 0),
5959 BPF_LD_ABS(BPF_W, -0x200000),
5960 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5963 .errstr = "R1 !read_ok",
5967 "ld_abs: check calling conv, r2",
5969 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5970 BPF_MOV64_IMM(BPF_REG_2, 0),
5971 BPF_LD_ABS(BPF_W, -0x200000),
5972 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5975 .errstr = "R2 !read_ok",
5979 "ld_abs: check calling conv, r3",
5981 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5982 BPF_MOV64_IMM(BPF_REG_3, 0),
5983 BPF_LD_ABS(BPF_W, -0x200000),
5984 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
5987 .errstr = "R3 !read_ok",
5991 "ld_abs: check calling conv, r4",
5993 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5994 BPF_MOV64_IMM(BPF_REG_4, 0),
5995 BPF_LD_ABS(BPF_W, -0x200000),
5996 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
5999 .errstr = "R4 !read_ok",
6003 "ld_abs: check calling conv, r5",
6005 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6006 BPF_MOV64_IMM(BPF_REG_5, 0),
6007 BPF_LD_ABS(BPF_W, -0x200000),
6008 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
6011 .errstr = "R5 !read_ok",
6015 "ld_abs: check calling conv, r7",
6017 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6018 BPF_MOV64_IMM(BPF_REG_7, 0),
6019 BPF_LD_ABS(BPF_W, -0x200000),
6020 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
6026 "ld_ind: check calling conv, r1",
6028 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6029 BPF_MOV64_IMM(BPF_REG_1, 1),
6030 BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
6031 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
6034 .errstr = "R1 !read_ok",
6038 "ld_ind: check calling conv, r2",
6040 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6041 BPF_MOV64_IMM(BPF_REG_2, 1),
6042 BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
6043 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
6046 .errstr = "R2 !read_ok",
6050 "ld_ind: check calling conv, r3",
6052 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6053 BPF_MOV64_IMM(BPF_REG_3, 1),
6054 BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
6055 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
6058 .errstr = "R3 !read_ok",
6062 "ld_ind: check calling conv, r4",
6064 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6065 BPF_MOV64_IMM(BPF_REG_4, 1),
6066 BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
6067 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
6070 .errstr = "R4 !read_ok",
6074 "ld_ind: check calling conv, r5",
6076 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6077 BPF_MOV64_IMM(BPF_REG_5, 1),
6078 BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
6079 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
6082 .errstr = "R5 !read_ok",
6086 "ld_ind: check calling conv, r7",
6088 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6089 BPF_MOV64_IMM(BPF_REG_7, 1),
6090 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
6091 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
6097 "check bpf_perf_event_data->sample_period byte load permitted",
6099 BPF_MOV64_IMM(BPF_REG_0, 0),
6100 #if __BYTE_ORDER == __LITTLE_ENDIAN
6101 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
6102 offsetof(struct bpf_perf_event_data, sample_period)),
6104 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
6105 offsetof(struct bpf_perf_event_data, sample_period) + 7),
6110 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6113 "check bpf_perf_event_data->sample_period half load permitted",
6115 BPF_MOV64_IMM(BPF_REG_0, 0),
6116 #if __BYTE_ORDER == __LITTLE_ENDIAN
6117 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6118 offsetof(struct bpf_perf_event_data, sample_period)),
6120 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6121 offsetof(struct bpf_perf_event_data, sample_period) + 6),
6126 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6129 "check bpf_perf_event_data->sample_period word load permitted",
6131 BPF_MOV64_IMM(BPF_REG_0, 0),
6132 #if __BYTE_ORDER == __LITTLE_ENDIAN
6133 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6134 offsetof(struct bpf_perf_event_data, sample_period)),
6136 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6137 offsetof(struct bpf_perf_event_data, sample_period) + 4),
6142 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6145 "check bpf_perf_event_data->sample_period dword load permitted",
6147 BPF_MOV64_IMM(BPF_REG_0, 0),
6148 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
6149 offsetof(struct bpf_perf_event_data, sample_period)),
6153 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6156 "check skb->data half load not permitted",
6158 BPF_MOV64_IMM(BPF_REG_0, 0),
6159 #if __BYTE_ORDER == __LITTLE_ENDIAN
6160 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6161 offsetof(struct __sk_buff, data)),
6163 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6164 offsetof(struct __sk_buff, data) + 2),
6169 .errstr = "invalid bpf_context access",
6172 "check skb->tc_classid half load not permitted for lwt prog",
6174 BPF_MOV64_IMM(BPF_REG_0, 0),
6175 #if __BYTE_ORDER == __LITTLE_ENDIAN
6176 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6177 offsetof(struct __sk_buff, tc_classid)),
6179 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6180 offsetof(struct __sk_buff, tc_classid) + 2),
6185 .errstr = "invalid bpf_context access",
6186 .prog_type = BPF_PROG_TYPE_LWT_IN,
6189 "bounds checks mixing signed and unsigned, positive bounds",
6191 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6192 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6193 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6194 BPF_LD_MAP_FD(BPF_REG_1, 0),
6195 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6196 BPF_FUNC_map_lookup_elem),
6197 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6198 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6199 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6200 BPF_MOV64_IMM(BPF_REG_2, 2),
6201 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
6202 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
6203 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6204 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6205 BPF_MOV64_IMM(BPF_REG_0, 0),
6208 .fixup_map1 = { 3 },
6209 .errstr = "unbounded min value",
6213 "bounds checks mixing signed and unsigned",
6215 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6216 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6217 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6218 BPF_LD_MAP_FD(BPF_REG_1, 0),
6219 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6220 BPF_FUNC_map_lookup_elem),
6221 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6222 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6223 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6224 BPF_MOV64_IMM(BPF_REG_2, -1),
6225 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6226 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6227 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6228 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6229 BPF_MOV64_IMM(BPF_REG_0, 0),
6232 .fixup_map1 = { 3 },
6233 .errstr = "unbounded min value",
6237 "bounds checks mixing signed and unsigned, variant 2",
6239 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6240 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6241 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6242 BPF_LD_MAP_FD(BPF_REG_1, 0),
6243 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6244 BPF_FUNC_map_lookup_elem),
6245 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6246 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6247 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6248 BPF_MOV64_IMM(BPF_REG_2, -1),
6249 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6250 BPF_MOV64_IMM(BPF_REG_8, 0),
6251 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
6252 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6253 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6254 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6255 BPF_MOV64_IMM(BPF_REG_0, 0),
6258 .fixup_map1 = { 3 },
6259 .errstr = "unbounded min value",
6263 "bounds checks mixing signed and unsigned, variant 3",
6265 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6266 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6267 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6268 BPF_LD_MAP_FD(BPF_REG_1, 0),
6269 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6270 BPF_FUNC_map_lookup_elem),
6271 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6272 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6273 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6274 BPF_MOV64_IMM(BPF_REG_2, -1),
6275 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
6276 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
6277 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6278 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6279 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6280 BPF_MOV64_IMM(BPF_REG_0, 0),
6283 .fixup_map1 = { 3 },
6284 .errstr = "unbounded min value",
6288 "bounds checks mixing signed and unsigned, variant 4",
6290 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6291 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6292 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6293 BPF_LD_MAP_FD(BPF_REG_1, 0),
6294 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6295 BPF_FUNC_map_lookup_elem),
6296 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6297 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6298 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6299 BPF_MOV64_IMM(BPF_REG_2, 1),
6300 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
6301 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6302 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6303 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6304 BPF_MOV64_IMM(BPF_REG_0, 0),
6307 .fixup_map1 = { 3 },
6311 "bounds checks mixing signed and unsigned, variant 5",
6313 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6314 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6315 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6316 BPF_LD_MAP_FD(BPF_REG_1, 0),
6317 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6318 BPF_FUNC_map_lookup_elem),
6319 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6320 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6321 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6322 BPF_MOV64_IMM(BPF_REG_2, -1),
6323 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6324 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
6325 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
6326 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6327 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6328 BPF_MOV64_IMM(BPF_REG_0, 0),
6331 .fixup_map1 = { 3 },
6332 .errstr = "unbounded min value",
6336 "bounds checks mixing signed and unsigned, variant 6",
6338 BPF_MOV64_IMM(BPF_REG_2, 0),
6339 BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
6340 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
6341 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6342 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
6343 BPF_MOV64_IMM(BPF_REG_6, -1),
6344 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
6345 BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
6346 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
6347 BPF_MOV64_IMM(BPF_REG_5, 0),
6348 BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
6349 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6350 BPF_FUNC_skb_load_bytes),
6351 BPF_MOV64_IMM(BPF_REG_0, 0),
6354 .errstr = "R4 min value is negative, either use unsigned",
6358 "bounds checks mixing signed and unsigned, variant 7",
6360 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6361 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6362 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6363 BPF_LD_MAP_FD(BPF_REG_1, 0),
6364 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6365 BPF_FUNC_map_lookup_elem),
6366 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6367 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6368 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6369 BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
6370 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6371 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6372 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6373 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6374 BPF_MOV64_IMM(BPF_REG_0, 0),
6377 .fixup_map1 = { 3 },
6381 "bounds checks mixing signed and unsigned, variant 8",
6383 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6384 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6385 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6386 BPF_LD_MAP_FD(BPF_REG_1, 0),
6387 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6388 BPF_FUNC_map_lookup_elem),
6389 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6390 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6391 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6392 BPF_MOV64_IMM(BPF_REG_2, -1),
6393 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6394 BPF_MOV64_IMM(BPF_REG_0, 0),
6396 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6397 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6398 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6399 BPF_MOV64_IMM(BPF_REG_0, 0),
6402 .fixup_map1 = { 3 },
6403 .errstr = "unbounded min value",
6407 "bounds checks mixing signed and unsigned, variant 9",
6409 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6410 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6411 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6412 BPF_LD_MAP_FD(BPF_REG_1, 0),
6413 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6414 BPF_FUNC_map_lookup_elem),
6415 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
6416 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6417 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6418 BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
6419 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6420 BPF_MOV64_IMM(BPF_REG_0, 0),
6422 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6423 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6424 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6425 BPF_MOV64_IMM(BPF_REG_0, 0),
6428 .fixup_map1 = { 3 },
6432 "bounds checks mixing signed and unsigned, variant 10",
6434 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6435 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6436 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6437 BPF_LD_MAP_FD(BPF_REG_1, 0),
6438 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6439 BPF_FUNC_map_lookup_elem),
6440 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6441 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6442 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6443 BPF_MOV64_IMM(BPF_REG_2, 0),
6444 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6445 BPF_MOV64_IMM(BPF_REG_0, 0),
6447 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6448 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6449 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6450 BPF_MOV64_IMM(BPF_REG_0, 0),
6453 .fixup_map1 = { 3 },
6454 .errstr = "unbounded min value",
6458 "bounds checks mixing signed and unsigned, variant 11",
6460 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6461 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6462 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6463 BPF_LD_MAP_FD(BPF_REG_1, 0),
6464 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6465 BPF_FUNC_map_lookup_elem),
6466 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6467 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6468 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6469 BPF_MOV64_IMM(BPF_REG_2, -1),
6470 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6472 BPF_MOV64_IMM(BPF_REG_0, 0),
6474 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6475 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6476 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6477 BPF_MOV64_IMM(BPF_REG_0, 0),
6480 .fixup_map1 = { 3 },
6481 .errstr = "unbounded min value",
6485 "bounds checks mixing signed and unsigned, variant 12",
6487 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6488 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6489 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6490 BPF_LD_MAP_FD(BPF_REG_1, 0),
6491 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6492 BPF_FUNC_map_lookup_elem),
6493 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6494 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6495 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6496 BPF_MOV64_IMM(BPF_REG_2, -6),
6497 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6498 BPF_MOV64_IMM(BPF_REG_0, 0),
6500 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6501 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6502 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6503 BPF_MOV64_IMM(BPF_REG_0, 0),
6506 .fixup_map1 = { 3 },
6507 .errstr = "unbounded min value",
6511 "bounds checks mixing signed and unsigned, variant 13",
6513 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6514 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6515 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6516 BPF_LD_MAP_FD(BPF_REG_1, 0),
6517 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6518 BPF_FUNC_map_lookup_elem),
6519 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6520 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6521 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6522 BPF_MOV64_IMM(BPF_REG_2, 2),
6523 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6524 BPF_MOV64_IMM(BPF_REG_7, 1),
6525 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
6526 BPF_MOV64_IMM(BPF_REG_0, 0),
6528 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
6529 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
6530 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
6531 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6532 BPF_MOV64_IMM(BPF_REG_0, 0),
6535 .fixup_map1 = { 3 },
6536 .errstr = "unbounded min value",
6540 "bounds checks mixing signed and unsigned, variant 14",
6542 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
6543 offsetof(struct __sk_buff, mark)),
6544 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6545 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6546 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6547 BPF_LD_MAP_FD(BPF_REG_1, 0),
6548 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6549 BPF_FUNC_map_lookup_elem),
6550 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6551 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6552 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6553 BPF_MOV64_IMM(BPF_REG_2, -1),
6554 BPF_MOV64_IMM(BPF_REG_8, 2),
6555 BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
6556 BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
6557 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6558 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6559 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6560 BPF_MOV64_IMM(BPF_REG_0, 0),
6562 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
6563 BPF_JMP_IMM(BPF_JA, 0, 0, -7),
6565 .fixup_map1 = { 4 },
6566 .errstr = "R0 invalid mem access 'inv'",
6570 "bounds checks mixing signed and unsigned, variant 15",
6572 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6573 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6574 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6575 BPF_LD_MAP_FD(BPF_REG_1, 0),
6576 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6577 BPF_FUNC_map_lookup_elem),
6578 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6579 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6580 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6581 BPF_MOV64_IMM(BPF_REG_2, -6),
6582 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6583 BPF_MOV64_IMM(BPF_REG_0, 0),
6585 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6586 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
6587 BPF_MOV64_IMM(BPF_REG_0, 0),
6589 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6590 BPF_MOV64_IMM(BPF_REG_0, 0),
6593 .fixup_map1 = { 3 },
6594 .errstr = "unbounded min value",
6596 .result_unpriv = REJECT,
6599 "subtraction bounds (map value) variant 1",
6601 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6602 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6603 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6604 BPF_LD_MAP_FD(BPF_REG_1, 0),
6605 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6606 BPF_FUNC_map_lookup_elem),
6607 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6608 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6609 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
6610 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
6611 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
6612 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
6613 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
6614 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6615 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6617 BPF_MOV64_IMM(BPF_REG_0, 0),
6620 .fixup_map1 = { 3 },
6621 .errstr = "R0 max value is outside of the array range",
6625 "subtraction bounds (map value) variant 2",
6627 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6628 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6629 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6630 BPF_LD_MAP_FD(BPF_REG_1, 0),
6631 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6632 BPF_FUNC_map_lookup_elem),
6633 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6634 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6635 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
6636 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
6637 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
6638 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
6639 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6640 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6642 BPF_MOV64_IMM(BPF_REG_0, 0),
6645 .fixup_map1 = { 3 },
6646 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
6650 "bounds check based on zero-extended MOV",
6652 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6653 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6654 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6655 BPF_LD_MAP_FD(BPF_REG_1, 0),
6656 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6657 BPF_FUNC_map_lookup_elem),
6658 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6659 /* r2 = 0x0000'0000'ffff'ffff */
6660 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
6662 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
6664 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6665 /* access at offset 0 */
6666 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6668 BPF_MOV64_IMM(BPF_REG_0, 0),
6671 .fixup_map1 = { 3 },
6675 "bounds check based on sign-extended MOV. test1",
6677 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6678 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6679 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6680 BPF_LD_MAP_FD(BPF_REG_1, 0),
6681 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6682 BPF_FUNC_map_lookup_elem),
6683 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6684 /* r2 = 0xffff'ffff'ffff'ffff */
6685 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
6686 /* r2 = 0xffff'ffff */
6687 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
6688 /* r0 = <oob pointer> */
6689 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6690 /* access to OOB pointer */
6691 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6693 BPF_MOV64_IMM(BPF_REG_0, 0),
6696 .fixup_map1 = { 3 },
6697 .errstr = "map_value pointer and 4294967295",
6701 "bounds check based on sign-extended MOV. test2",
6703 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6704 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6705 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6706 BPF_LD_MAP_FD(BPF_REG_1, 0),
6707 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6708 BPF_FUNC_map_lookup_elem),
6709 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6710 /* r2 = 0xffff'ffff'ffff'ffff */
6711 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
6712 /* r2 = 0xfff'ffff */
6713 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
6714 /* r0 = <oob pointer> */
6715 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6716 /* access to OOB pointer */
6717 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6719 BPF_MOV64_IMM(BPF_REG_0, 0),
6722 .fixup_map1 = { 3 },
6723 .errstr = "R0 min value is outside of the array range",
6727 "bounds check based on reg_off + var_off + insn_off. test1",
6729 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6730 offsetof(struct __sk_buff, mark)),
6731 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6732 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6733 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6734 BPF_LD_MAP_FD(BPF_REG_1, 0),
6735 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6736 BPF_FUNC_map_lookup_elem),
6737 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6738 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
6739 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
6740 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
6741 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
6742 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
6743 BPF_MOV64_IMM(BPF_REG_0, 0),
6746 .fixup_map1 = { 4 },
6747 .errstr = "value_size=8 off=1073741825",
6749 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6752 "bounds check based on reg_off + var_off + insn_off. test2",
6754 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6755 offsetof(struct __sk_buff, mark)),
6756 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6757 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6758 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6759 BPF_LD_MAP_FD(BPF_REG_1, 0),
6760 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6761 BPF_FUNC_map_lookup_elem),
6762 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6763 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
6764 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
6765 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
6766 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
6767 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
6768 BPF_MOV64_IMM(BPF_REG_0, 0),
6771 .fixup_map1 = { 4 },
6772 .errstr = "value 1073741823",
6774 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6777 "bounds check after truncation of non-boundary-crossing range",
6779 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6780 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6781 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6782 BPF_LD_MAP_FD(BPF_REG_1, 0),
6783 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6784 BPF_FUNC_map_lookup_elem),
6785 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6786 /* r1 = [0x00, 0xff] */
6787 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6788 BPF_MOV64_IMM(BPF_REG_2, 1),
6789 /* r2 = 0x10'0000'0000 */
6790 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
6791 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
6792 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
6793 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
6794 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
6795 /* r1 = [0x00, 0xff] */
6796 BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
6798 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6800 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6801 /* access at offset 0 */
6802 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6804 BPF_MOV64_IMM(BPF_REG_0, 0),
6807 .fixup_map1 = { 3 },
6811 "bounds check after truncation of boundary-crossing range (1)",
6813 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6814 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6816 BPF_LD_MAP_FD(BPF_REG_1, 0),
6817 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6818 BPF_FUNC_map_lookup_elem),
6819 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6820 /* r1 = [0x00, 0xff] */
6821 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6822 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6823 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
6824 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6825 /* r1 = [0xffff'ff80, 0xffff'ffff] or
6826 * [0x0000'0000, 0x0000'007f]
6828 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
6829 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6830 /* r1 = [0x00, 0xff] or
6831 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
6833 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6835 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
6837 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6838 /* no-op or OOB pointer computation */
6839 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6840 /* potentially OOB access */
6841 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6843 BPF_MOV64_IMM(BPF_REG_0, 0),
6846 .fixup_map1 = { 3 },
6847 /* not actually fully unbounded, but the bound is very high */
6848 .errstr = "R0 unbounded memory access",
6852 "bounds check after truncation of boundary-crossing range (2)",
6854 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6855 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6856 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6857 BPF_LD_MAP_FD(BPF_REG_1, 0),
6858 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6859 BPF_FUNC_map_lookup_elem),
6860 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6861 /* r1 = [0x00, 0xff] */
6862 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6863 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6864 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
6865 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6866 /* r1 = [0xffff'ff80, 0xffff'ffff] or
6867 * [0x0000'0000, 0x0000'007f]
6868 * difference to previous test: truncation via MOV32
6871 BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
6872 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6873 /* r1 = [0x00, 0xff] or
6874 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
6876 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6878 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
6880 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6881 /* no-op or OOB pointer computation */
6882 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6883 /* potentially OOB access */
6884 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6886 BPF_MOV64_IMM(BPF_REG_0, 0),
6889 .fixup_map1 = { 3 },
6890 /* not actually fully unbounded, but the bound is very high */
6891 .errstr = "R0 unbounded memory access",
6895 "bounds check after wrapping 32-bit addition",
6897 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6898 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6899 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6900 BPF_LD_MAP_FD(BPF_REG_1, 0),
6901 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6902 BPF_FUNC_map_lookup_elem),
6903 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6904 /* r1 = 0x7fff'ffff */
6905 BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
6906 /* r1 = 0xffff'fffe */
6907 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
6909 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
6911 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6912 /* access at offset 0 */
6913 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6915 BPF_MOV64_IMM(BPF_REG_0, 0),
6918 .fixup_map1 = { 3 },
6922 "bounds check after shift with oversized count operand",
6924 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6925 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6926 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6927 BPF_LD_MAP_FD(BPF_REG_1, 0),
6928 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6929 BPF_FUNC_map_lookup_elem),
6930 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6931 BPF_MOV64_IMM(BPF_REG_2, 32),
6932 BPF_MOV64_IMM(BPF_REG_1, 1),
6933 /* r1 = (u32)1 << (u32)32 = ? */
6934 BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
6935 /* r1 = [0x0000, 0xffff] */
6936 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
6937 /* computes unknown pointer, potentially OOB */
6938 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6939 /* potentially OOB access */
6940 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6942 BPF_MOV64_IMM(BPF_REG_0, 0),
6945 .fixup_map1 = { 3 },
6946 .errstr = "R0 max value is outside of the array range",
6950 "bounds check after right shift of maybe-negative number",
6952 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6953 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6954 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6955 BPF_LD_MAP_FD(BPF_REG_1, 0),
6956 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6957 BPF_FUNC_map_lookup_elem),
6958 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6959 /* r1 = [0x00, 0xff] */
6960 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6961 /* r1 = [-0x01, 0xfe] */
6962 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
6963 /* r1 = 0 or 0xff'ffff'ffff'ffff */
6964 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6965 /* r1 = 0 or 0xffff'ffff'ffff */
6966 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6967 /* computes unknown pointer, potentially OOB */
6968 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6969 /* potentially OOB access */
6970 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6972 BPF_MOV64_IMM(BPF_REG_0, 0),
6975 .fixup_map1 = { 3 },
6976 .errstr = "R0 unbounded memory access",
6980 "bounds check map access with off+size signed 32bit overflow. test1",
6982 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6983 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6984 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6985 BPF_LD_MAP_FD(BPF_REG_1, 0),
6986 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6987 BPF_FUNC_map_lookup_elem),
6988 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
6990 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
6991 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
6995 .fixup_map1 = { 3 },
6996 .errstr = "map_value pointer and 2147483646",
7000 "bounds check map access with off+size signed 32bit overflow. test2",
7002 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7003 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7004 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7005 BPF_LD_MAP_FD(BPF_REG_1, 0),
7006 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7007 BPF_FUNC_map_lookup_elem),
7008 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7010 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7012 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7013 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7017 .fixup_map1 = { 3 },
7018 .errstr = "pointer offset 1073741822",
7019 .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
7023 "bounds check map access with off+size signed 32bit overflow. test3",
7025 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7026 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7027 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7028 BPF_LD_MAP_FD(BPF_REG_1, 0),
7029 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7030 BPF_FUNC_map_lookup_elem),
7031 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7033 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7034 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7035 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7039 .fixup_map1 = { 3 },
7040 .errstr = "pointer offset -1073741822",
7041 .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
7045 "bounds check map access with off+size signed 32bit overflow. test4",
7047 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7048 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7049 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7050 BPF_LD_MAP_FD(BPF_REG_1, 0),
7051 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7052 BPF_FUNC_map_lookup_elem),
7053 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7055 BPF_MOV64_IMM(BPF_REG_1, 1000000),
7056 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
7057 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7058 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7062 .fixup_map1 = { 3 },
7063 .errstr = "map_value pointer and 1000000000000",
7067 "pointer/scalar confusion in state equality check (way 1)",
7069 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7070 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7071 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7072 BPF_LD_MAP_FD(BPF_REG_1, 0),
7073 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7074 BPF_FUNC_map_lookup_elem),
7075 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7076 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7078 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7082 .fixup_map1 = { 3 },
7084 .result_unpriv = REJECT,
7085 .errstr_unpriv = "R0 leaks addr as return value"
7088 "pointer/scalar confusion in state equality check (way 2)",
7090 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7091 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7092 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7093 BPF_LD_MAP_FD(BPF_REG_1, 0),
7094 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7095 BPF_FUNC_map_lookup_elem),
7096 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
7097 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7099 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7102 .fixup_map1 = { 3 },
7104 .result_unpriv = REJECT,
7105 .errstr_unpriv = "R0 leaks addr as return value"
7108 "variable-offset ctx access",
7110 /* Get an unknown value */
7111 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7112 /* Make it small and 4-byte aligned */
7113 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7114 /* add it to skb. We now have either &skb->len or
7115 * &skb->pkt_type, but we don't know which
7117 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
7118 /* dereference it */
7119 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
7122 .errstr = "variable ctx access var_off=(0x0; 0x4)",
7124 .prog_type = BPF_PROG_TYPE_LWT_IN,
7127 "variable-offset stack access",
7129 /* Fill the top 8 bytes of the stack */
7130 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7131 /* Get an unknown value */
7132 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7133 /* Make it small and 4-byte aligned */
7134 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7135 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
7136 /* add it to fp. We now have either fp-4 or fp-8, but
7137 * we don't know which
7139 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
7140 /* dereference it */
7141 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
7144 .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
7146 .prog_type = BPF_PROG_TYPE_LWT_IN,
7149 "indirect variable-offset stack access",
7151 /* Fill the top 8 bytes of the stack */
7152 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7153 /* Get an unknown value */
7154 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7155 /* Make it small and 4-byte aligned */
7156 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7157 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
7158 /* add it to fp. We now have either fp-4 or fp-8, but
7159 * we don't know which
7161 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
7162 /* dereference it indirectly */
7163 BPF_LD_MAP_FD(BPF_REG_1, 0),
7164 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7165 BPF_FUNC_map_lookup_elem),
7166 BPF_MOV64_IMM(BPF_REG_0, 0),
7169 .fixup_map1 = { 5 },
7170 .errstr = "variable stack read R2",
7172 .prog_type = BPF_PROG_TYPE_LWT_IN,
7175 "direct stack access with 32-bit wraparound. test1",
7177 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7178 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7179 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7180 BPF_MOV32_IMM(BPF_REG_0, 0),
7181 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7184 .errstr = "fp pointer and 2147483647",
7188 "direct stack access with 32-bit wraparound. test2",
7190 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7191 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7192 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7193 BPF_MOV32_IMM(BPF_REG_0, 0),
7194 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7197 .errstr = "fp pointer and 1073741823",
7201 "direct stack access with 32-bit wraparound. test3",
7203 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7204 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7205 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7206 BPF_MOV32_IMM(BPF_REG_0, 0),
7207 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7210 .errstr = "fp pointer offset 1073741822",
7211 .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
7215 "liveness pruning and write screening",
7217 /* Get an unknown value */
7218 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7219 /* branch conditions teach us nothing about R2 */
7220 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
7221 BPF_MOV64_IMM(BPF_REG_0, 0),
7222 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
7223 BPF_MOV64_IMM(BPF_REG_0, 0),
7226 .errstr = "R0 !read_ok",
7228 .prog_type = BPF_PROG_TYPE_LWT_IN,
7231 "varlen_map_value_access pruning",
7233 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7234 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7235 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7236 BPF_LD_MAP_FD(BPF_REG_1, 0),
7237 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7238 BPF_FUNC_map_lookup_elem),
7239 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7240 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7241 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
7242 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
7243 BPF_MOV32_IMM(BPF_REG_1, 0),
7244 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
7245 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7246 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
7247 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
7248 offsetof(struct test_val, foo)),
7251 .fixup_map2 = { 3 },
7252 .errstr_unpriv = "R0 leaks addr",
7253 .errstr = "R0 unbounded memory access",
7254 .result_unpriv = REJECT,
7256 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7259 "invalid 64-bit BPF_END",
7261 BPF_MOV32_IMM(BPF_REG_0, 0),
7263 .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
7264 .dst_reg = BPF_REG_0,
7271 .errstr = "BPF_END uses reserved fields",
7275 "arithmetic ops make PTR_TO_CTX unusable",
7277 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
7278 offsetof(struct __sk_buff, data) -
7279 offsetof(struct __sk_buff, mark)),
7280 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7281 offsetof(struct __sk_buff, mark)),
7284 .errstr = "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not",
7286 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7289 "XDP pkt read, pkt_end mangling, bad access 1",
7291 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7292 offsetof(struct xdp_md, data)),
7293 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7294 offsetof(struct xdp_md, data_end)),
7295 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7296 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7297 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
7298 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
7299 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7300 BPF_MOV64_IMM(BPF_REG_0, 0),
7303 .errstr = "R1 offset is outside of the packet",
7305 .prog_type = BPF_PROG_TYPE_XDP,
7308 "XDP pkt read, pkt_end mangling, bad access 2",
7310 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7311 offsetof(struct xdp_md, data)),
7312 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7313 offsetof(struct xdp_md, data_end)),
7314 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7315 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7316 BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
7317 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
7318 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7319 BPF_MOV64_IMM(BPF_REG_0, 0),
7322 .errstr = "R1 offset is outside of the packet",
7324 .prog_type = BPF_PROG_TYPE_XDP,
7327 "XDP pkt read, pkt_data' > pkt_end, good access",
7329 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7330 offsetof(struct xdp_md, data)),
7331 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7332 offsetof(struct xdp_md, data_end)),
7333 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7334 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7335 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
7336 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7337 BPF_MOV64_IMM(BPF_REG_0, 0),
7341 .prog_type = BPF_PROG_TYPE_XDP,
7344 "XDP pkt read, pkt_data' > pkt_end, bad access 1",
7346 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7347 offsetof(struct xdp_md, data)),
7348 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7349 offsetof(struct xdp_md, data_end)),
7350 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7351 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7352 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
7353 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7354 BPF_MOV64_IMM(BPF_REG_0, 0),
7357 .errstr = "R1 offset is outside of the packet",
7359 .prog_type = BPF_PROG_TYPE_XDP,
7360 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7363 "XDP pkt read, pkt_data' > pkt_end, bad access 2",
7365 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7366 offsetof(struct xdp_md, data)),
7367 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7368 offsetof(struct xdp_md, data_end)),
7369 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7370 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7371 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
7372 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7373 BPF_MOV64_IMM(BPF_REG_0, 0),
7376 .errstr = "R1 offset is outside of the packet",
7378 .prog_type = BPF_PROG_TYPE_XDP,
7381 "XDP pkt read, pkt_end > pkt_data', good access",
7383 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7384 offsetof(struct xdp_md, data)),
7385 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7386 offsetof(struct xdp_md, data_end)),
7387 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7388 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7389 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
7390 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7391 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7392 BPF_MOV64_IMM(BPF_REG_0, 0),
7396 .prog_type = BPF_PROG_TYPE_XDP,
7397 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7400 "XDP pkt read, pkt_end > pkt_data', bad access 1",
7402 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7403 offsetof(struct xdp_md, data)),
7404 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7405 offsetof(struct xdp_md, data_end)),
7406 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7407 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7408 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
7409 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7410 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7411 BPF_MOV64_IMM(BPF_REG_0, 0),
7414 .errstr = "R1 offset is outside of the packet",
7416 .prog_type = BPF_PROG_TYPE_XDP,
7419 "XDP pkt read, pkt_end > pkt_data', bad access 2",
7421 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7422 offsetof(struct xdp_md, data)),
7423 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7424 offsetof(struct xdp_md, data_end)),
7425 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7426 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7427 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
7428 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7429 BPF_MOV64_IMM(BPF_REG_0, 0),
7432 .errstr = "R1 offset is outside of the packet",
7434 .prog_type = BPF_PROG_TYPE_XDP,
7437 "XDP pkt read, pkt_data' < pkt_end, good access",
7439 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7440 offsetof(struct xdp_md, data)),
7441 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7442 offsetof(struct xdp_md, data_end)),
7443 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7444 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7445 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
7446 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7447 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7448 BPF_MOV64_IMM(BPF_REG_0, 0),
7452 .prog_type = BPF_PROG_TYPE_XDP,
7453 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7456 "XDP pkt read, pkt_data' < pkt_end, bad access 1",
7458 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7459 offsetof(struct xdp_md, data)),
7460 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7461 offsetof(struct xdp_md, data_end)),
7462 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7463 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7464 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
7465 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7466 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7467 BPF_MOV64_IMM(BPF_REG_0, 0),
7470 .errstr = "R1 offset is outside of the packet",
7472 .prog_type = BPF_PROG_TYPE_XDP,
7475 "XDP pkt read, pkt_data' < pkt_end, bad access 2",
7477 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7478 offsetof(struct xdp_md, data)),
7479 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7480 offsetof(struct xdp_md, data_end)),
7481 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7482 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7483 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
7484 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7485 BPF_MOV64_IMM(BPF_REG_0, 0),
7488 .errstr = "R1 offset is outside of the packet",
7490 .prog_type = BPF_PROG_TYPE_XDP,
7493 "XDP pkt read, pkt_end < pkt_data', good access",
7495 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7496 offsetof(struct xdp_md, data)),
7497 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7498 offsetof(struct xdp_md, data_end)),
7499 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7500 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7501 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
7502 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7503 BPF_MOV64_IMM(BPF_REG_0, 0),
7507 .prog_type = BPF_PROG_TYPE_XDP,
7510 "XDP pkt read, pkt_end < pkt_data', bad access 1",
7512 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7513 offsetof(struct xdp_md, data)),
7514 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7515 offsetof(struct xdp_md, data_end)),
7516 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7517 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7518 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
7519 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7520 BPF_MOV64_IMM(BPF_REG_0, 0),
7523 .errstr = "R1 offset is outside of the packet",
7525 .prog_type = BPF_PROG_TYPE_XDP,
7526 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7529 "XDP pkt read, pkt_end < pkt_data', bad access 2",
7531 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7532 offsetof(struct xdp_md, data)),
7533 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7534 offsetof(struct xdp_md, data_end)),
7535 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7536 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7537 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
7538 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7539 BPF_MOV64_IMM(BPF_REG_0, 0),
7542 .errstr = "R1 offset is outside of the packet",
7544 .prog_type = BPF_PROG_TYPE_XDP,
7547 "XDP pkt read, pkt_data' >= pkt_end, good access",
7549 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7550 offsetof(struct xdp_md, data)),
7551 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7552 offsetof(struct xdp_md, data_end)),
7553 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7554 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7555 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
7556 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7557 BPF_MOV64_IMM(BPF_REG_0, 0),
7561 .prog_type = BPF_PROG_TYPE_XDP,
7562 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7565 "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
7567 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7568 offsetof(struct xdp_md, data)),
7569 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7570 offsetof(struct xdp_md, data_end)),
7571 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7572 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7573 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
7574 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7575 BPF_MOV64_IMM(BPF_REG_0, 0),
7578 .errstr = "R1 offset is outside of the packet",
7580 .prog_type = BPF_PROG_TYPE_XDP,
7583 "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
7585 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7586 offsetof(struct xdp_md, data)),
7587 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7588 offsetof(struct xdp_md, data_end)),
7589 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7590 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7591 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
7592 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7593 BPF_MOV64_IMM(BPF_REG_0, 0),
7596 .errstr = "R1 offset is outside of the packet",
7598 .prog_type = BPF_PROG_TYPE_XDP,
7599 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7602 "XDP pkt read, pkt_end >= pkt_data', good access",
7604 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7605 offsetof(struct xdp_md, data)),
7606 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7607 offsetof(struct xdp_md, data_end)),
7608 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7609 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7610 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
7611 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7612 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7613 BPF_MOV64_IMM(BPF_REG_0, 0),
7617 .prog_type = BPF_PROG_TYPE_XDP,
7620 "XDP pkt read, pkt_end >= pkt_data', bad access 1",
7622 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7623 offsetof(struct xdp_md, data)),
7624 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7625 offsetof(struct xdp_md, data_end)),
7626 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7627 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7628 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
7629 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7630 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7631 BPF_MOV64_IMM(BPF_REG_0, 0),
7634 .errstr = "R1 offset is outside of the packet",
7636 .prog_type = BPF_PROG_TYPE_XDP,
7637 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7640 "XDP pkt read, pkt_end >= pkt_data', bad access 2",
7642 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7643 offsetof(struct xdp_md, data)),
7644 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7645 offsetof(struct xdp_md, data_end)),
7646 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7647 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7648 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
7649 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7650 BPF_MOV64_IMM(BPF_REG_0, 0),
7653 .errstr = "R1 offset is outside of the packet",
7655 .prog_type = BPF_PROG_TYPE_XDP,
7658 "XDP pkt read, pkt_data' <= pkt_end, good access",
7660 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7661 offsetof(struct xdp_md, data)),
7662 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7663 offsetof(struct xdp_md, data_end)),
7664 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7665 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7666 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7667 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7668 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7669 BPF_MOV64_IMM(BPF_REG_0, 0),
7673 .prog_type = BPF_PROG_TYPE_XDP,
7676 "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
7678 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7679 offsetof(struct xdp_md, data)),
7680 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7681 offsetof(struct xdp_md, data_end)),
7682 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7683 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7684 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7685 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7686 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7687 BPF_MOV64_IMM(BPF_REG_0, 0),
7690 .errstr = "R1 offset is outside of the packet",
7692 .prog_type = BPF_PROG_TYPE_XDP,
7693 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7696 "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
7698 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7699 offsetof(struct xdp_md, data)),
7700 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7701 offsetof(struct xdp_md, data_end)),
7702 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7703 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7704 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7705 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7706 BPF_MOV64_IMM(BPF_REG_0, 0),
7709 .errstr = "R1 offset is outside of the packet",
7711 .prog_type = BPF_PROG_TYPE_XDP,
7714 "XDP pkt read, pkt_end <= pkt_data', good access",
7716 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7717 offsetof(struct xdp_md, data)),
7718 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7719 offsetof(struct xdp_md, data_end)),
7720 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7721 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7722 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
7723 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7724 BPF_MOV64_IMM(BPF_REG_0, 0),
7728 .prog_type = BPF_PROG_TYPE_XDP,
7729 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7732 "XDP pkt read, pkt_end <= pkt_data', bad access 1",
7734 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7735 offsetof(struct xdp_md, data)),
7736 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7737 offsetof(struct xdp_md, data_end)),
7738 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7739 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7740 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
7741 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7742 BPF_MOV64_IMM(BPF_REG_0, 0),
7745 .errstr = "R1 offset is outside of the packet",
7747 .prog_type = BPF_PROG_TYPE_XDP,
7750 "check deducing bounds from const, 1",
7752 BPF_MOV64_IMM(BPF_REG_0, 1),
7753 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
7754 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7758 .errstr = "R0 tried to subtract pointer from scalar",
7761 "check deducing bounds from const, 2",
7763 BPF_MOV64_IMM(BPF_REG_0, 1),
7764 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
7766 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
7768 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
7774 "check deducing bounds from const, 3",
7776 BPF_MOV64_IMM(BPF_REG_0, 0),
7777 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
7778 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7782 .errstr = "R0 tried to subtract pointer from scalar",
7785 "check deducing bounds from const, 4",
7787 BPF_MOV64_IMM(BPF_REG_0, 0),
7788 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
7790 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
7792 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
7798 "check deducing bounds from const, 5",
7800 BPF_MOV64_IMM(BPF_REG_0, 0),
7801 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
7802 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7806 .errstr = "R0 tried to subtract pointer from scalar",
7809 "check deducing bounds from const, 6",
7811 BPF_MOV64_IMM(BPF_REG_0, 0),
7812 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
7814 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7818 .errstr = "R0 tried to subtract pointer from scalar",
7821 "check deducing bounds from const, 7",
7823 BPF_MOV64_IMM(BPF_REG_0, ~0),
7824 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
7825 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
7826 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7827 offsetof(struct __sk_buff, mark)),
7831 .errstr = "dereference of modified ctx ptr",
7834 "check deducing bounds from const, 8",
7836 BPF_MOV64_IMM(BPF_REG_0, ~0),
7837 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
7838 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
7839 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7840 offsetof(struct __sk_buff, mark)),
7844 .errstr = "dereference of modified ctx ptr",
7847 "check deducing bounds from const, 9",
7849 BPF_MOV64_IMM(BPF_REG_0, 0),
7850 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
7851 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7855 .errstr = "R0 tried to subtract pointer from scalar",
7858 "check deducing bounds from const, 10",
7860 BPF_MOV64_IMM(BPF_REG_0, 0),
7861 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
7862 /* Marks reg as unknown. */
7863 BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
7864 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7868 .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
7871 "XDP pkt read, pkt_end <= pkt_data', bad access 2",
7873 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7874 offsetof(struct xdp_md, data)),
7875 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7876 offsetof(struct xdp_md, data_end)),
7877 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7878 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7879 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
7880 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7881 BPF_MOV64_IMM(BPF_REG_0, 0),
7884 .errstr = "R1 offset is outside of the packet",
7886 .prog_type = BPF_PROG_TYPE_XDP,
7887 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7890 "xadd/w check unaligned stack",
7892 BPF_MOV64_IMM(BPF_REG_0, 1),
7893 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7894 BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
7895 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
7899 .errstr = "misaligned stack access off",
7900 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7903 "xadd/w check unaligned map",
7905 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7906 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7907 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7908 BPF_LD_MAP_FD(BPF_REG_1, 0),
7909 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7910 BPF_FUNC_map_lookup_elem),
7911 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7913 BPF_MOV64_IMM(BPF_REG_1, 1),
7914 BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
7915 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
7918 .fixup_map1 = { 3 },
7920 .errstr = "misaligned value access off",
7921 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7924 "xadd/w check unaligned pkt",
7926 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7927 offsetof(struct xdp_md, data)),
7928 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7929 offsetof(struct xdp_md, data_end)),
7930 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7931 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7932 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
7933 BPF_MOV64_IMM(BPF_REG_0, 99),
7934 BPF_JMP_IMM(BPF_JA, 0, 0, 6),
7935 BPF_MOV64_IMM(BPF_REG_0, 1),
7936 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
7937 BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
7938 BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
7939 BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
7940 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
7944 .errstr = "BPF_XADD stores into R2 packet",
7945 .prog_type = BPF_PROG_TYPE_XDP,
7949 static int probe_filter_length(const struct bpf_insn *fp)
7953 for (len = MAX_INSNS - 1; len > 0; --len)
7954 if (fp[len].code != 0 || fp[len].imm != 0)
7959 static int create_map(uint32_t size_value, uint32_t max_elem)
7963 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
7964 size_value, max_elem, BPF_F_NO_PREALLOC);
7966 printf("Failed to create hash map '%s'!\n", strerror(errno));
7971 static int create_prog_array(void)
7975 fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
7978 printf("Failed to create prog array '%s'!\n", strerror(errno));
7983 static int create_map_in_map(void)
7985 int inner_map_fd, outer_map_fd;
7987 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
7989 if (inner_map_fd < 0) {
7990 printf("Failed to create array '%s'!\n", strerror(errno));
7991 return inner_map_fd;
7994 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS,
7995 sizeof(int), inner_map_fd, 1, 0);
7996 if (outer_map_fd < 0)
7997 printf("Failed to create array of maps '%s'!\n",
8000 close(inner_map_fd);
8002 return outer_map_fd;
8005 static char bpf_vlog[32768];
8007 static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
8010 int *fixup_map1 = test->fixup_map1;
8011 int *fixup_map2 = test->fixup_map2;
8012 int *fixup_prog = test->fixup_prog;
8013 int *fixup_map_in_map = test->fixup_map_in_map;
8015 /* Allocating HTs with 1 elem is fine here, since we only test
8016 * for verifier and not do a runtime lookup, so the only thing
8017 * that really matters is value size in this case.
8020 map_fds[0] = create_map(sizeof(long long), 1);
8022 prog[*fixup_map1].imm = map_fds[0];
8024 } while (*fixup_map1);
8028 map_fds[1] = create_map(sizeof(struct test_val), 1);
8030 prog[*fixup_map2].imm = map_fds[1];
8032 } while (*fixup_map2);
8036 map_fds[2] = create_prog_array();
8038 prog[*fixup_prog].imm = map_fds[2];
8040 } while (*fixup_prog);
8043 if (*fixup_map_in_map) {
8044 map_fds[3] = create_map_in_map();
8046 prog[*fixup_map_in_map].imm = map_fds[3];
8048 } while (*fixup_map_in_map);
8052 static void do_test_single(struct bpf_test *test, bool unpriv,
8053 int *passes, int *errors)
8055 int fd_prog, expected_ret, reject_from_alignment;
8056 struct bpf_insn *prog = test->insns;
8057 int prog_len = probe_filter_length(prog);
8058 int prog_type = test->prog_type;
8059 int map_fds[MAX_NR_MAPS];
8060 const char *expected_err;
8063 for (i = 0; i < MAX_NR_MAPS; i++)
8066 do_test_fixup(test, prog, map_fds);
8068 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
8069 prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
8070 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
8072 expected_ret = unpriv && test->result_unpriv != UNDEF ?
8073 test->result_unpriv : test->result;
8074 expected_err = unpriv && test->errstr_unpriv ?
8075 test->errstr_unpriv : test->errstr;
8077 reject_from_alignment = fd_prog < 0 &&
8078 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
8079 strstr(bpf_vlog, "misaligned");
8080 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
8081 if (reject_from_alignment) {
8082 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
8087 if (expected_ret == ACCEPT) {
8088 if (fd_prog < 0 && !reject_from_alignment) {
8089 printf("FAIL\nFailed to load prog '%s'!\n",
8095 printf("FAIL\nUnexpected success to load!\n");
8098 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
8099 printf("FAIL\nUnexpected error message!\n");
8105 printf("OK%s\n", reject_from_alignment ?
8106 " (NOTE: reject due to unknown alignment)" : "");
8109 for (i = 0; i < MAX_NR_MAPS; i++)
8115 printf("%s", bpf_vlog);
8119 static bool is_admin(void)
8122 cap_flag_value_t sysadmin = CAP_CLEAR;
8123 const cap_value_t cap_val = CAP_SYS_ADMIN;
8125 #ifdef CAP_IS_SUPPORTED
8126 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
8127 perror("cap_get_flag");
8131 caps = cap_get_proc();
8133 perror("cap_get_proc");
8136 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
8137 perror("cap_get_flag");
8140 return (sysadmin == CAP_SET);
8143 static int set_admin(bool admin)
8146 const cap_value_t cap_val = CAP_SYS_ADMIN;
8149 caps = cap_get_proc();
8151 perror("cap_get_proc");
8154 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
8155 admin ? CAP_SET : CAP_CLEAR)) {
8156 perror("cap_set_flag");
8159 if (cap_set_proc(caps)) {
8160 perror("cap_set_proc");
8170 static int do_test(bool unpriv, unsigned int from, unsigned int to)
8172 int i, passes = 0, errors = 0;
8174 for (i = from; i < to; i++) {
8175 struct bpf_test *test = &tests[i];
8177 /* Program types that are not supported by non-root we
8180 if (!test->prog_type) {
8183 printf("#%d/u %s ", i, test->descr);
8184 do_test_single(test, true, &passes, &errors);
8190 printf("#%d/p %s ", i, test->descr);
8191 do_test_single(test, false, &passes, &errors);
8195 printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
8196 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
8199 int main(int argc, char **argv)
8201 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
8202 struct rlimit rlim = { 1 << 20, 1 << 20 };
8203 unsigned int from = 0, to = ARRAY_SIZE(tests);
8204 bool unpriv = !is_admin();
8207 unsigned int l = atoi(argv[argc - 2]);
8208 unsigned int u = atoi(argv[argc - 1]);
8210 if (l < to && u < to) {
8214 } else if (argc == 2) {
8215 unsigned int t = atoi(argv[argc - 1]);
8223 setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
8224 return do_test(unpriv, from, to);