1 /* bpf_jit_comp.c: BPF JIT compiler for PPC64
3 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
5 * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
12 #include <linux/moduleloader.h>
13 #include <asm/cacheflush.h>
14 #include <linux/netdevice.h>
15 #include <linux/filter.h>
16 #include <linux/if_vlan.h>
20 int bpf_jit_enable __read_mostly;
22 static inline void bpf_flush_icache(void *start, void *end)
25 flush_icache_range((unsigned long)start, (unsigned long)end);
28 static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image,
29 struct codegen_context *ctx)
32 const struct sock_filter *filter = fp->insns;
34 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
36 if (ctx->seen & SEEN_DATAREF) {
37 /* If we call any helpers (for loads), save LR */
38 EMIT(PPC_INST_MFLR | __PPC_RT(R0));
41 /* Back up non-volatile regs. */
42 PPC_STD(r_D, 1, -(8*(32-r_D)));
43 PPC_STD(r_HL, 1, -(8*(32-r_HL)));
45 if (ctx->seen & SEEN_MEM) {
47 * Conditionally save regs r15-r31 as some will be used
50 for (i = r_M; i < (r_M+16); i++) {
51 if (ctx->seen & (1 << (i-r_M)))
52 PPC_STD(i, 1, -(8*(32-i)));
55 EMIT(PPC_INST_STDU | __PPC_RS(R1) | __PPC_RA(R1) |
56 (-BPF_PPC_STACKFRAME & 0xfffc));
59 if (ctx->seen & SEEN_DATAREF) {
61 * If this filter needs to access skb data,
62 * prepare r_D and r_HL:
63 * r_HL = skb->len - skb->data_len
66 PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
68 PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
69 PPC_SUB(r_HL, r_HL, r_scratch1);
70 PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
73 if (ctx->seen & SEEN_XREG) {
75 * TODO: Could also detect whether first instr. sets X and
76 * avoid this (as below, with A).
81 /* make sure we dont leak kernel information to user */
82 if (bpf_needs_clear_a(&filter[0]))
86 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
90 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
91 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
92 if (ctx->seen & SEEN_DATAREF) {
95 PPC_LD(r_D, 1, -(8*(32-r_D)));
96 PPC_LD(r_HL, 1, -(8*(32-r_HL)));
98 if (ctx->seen & SEEN_MEM) {
99 /* Restore any saved non-vol registers */
100 for (i = r_M; i < (r_M+16); i++) {
101 if (ctx->seen & (1 << (i-r_M)))
102 PPC_LD(i, 1, -(8*(32-i)));
106 /* The RETs have left a return value in R3. */
111 #define CHOOSE_LOAD_FUNC(K, func) \
112 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
114 /* Assemble the body code between the prologue & epilogue. */
115 static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
116 struct codegen_context *ctx,
119 const struct sock_filter *filter = fp->insns;
122 unsigned int true_cond;
125 /* Start of epilogue code */
126 unsigned int exit_addr = addrs[flen];
128 for (i = 0; i < flen; i++) {
129 unsigned int K = filter[i].k;
130 u16 code = bpf_anc_helper(&filter[i]);
133 * addrs[] maps a BPF bytecode address into a real offset from
134 * the start of the body code.
136 addrs[i] = ctx->idx * 4;
140 case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
141 ctx->seen |= SEEN_XREG;
142 PPC_ADD(r_A, r_A, r_X);
144 case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
147 PPC_ADDI(r_A, r_A, IMM_L(K));
149 PPC_ADDIS(r_A, r_A, IMM_HA(K));
151 case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
152 ctx->seen |= SEEN_XREG;
153 PPC_SUB(r_A, r_A, r_X);
155 case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
158 PPC_ADDI(r_A, r_A, IMM_L(-K));
160 PPC_ADDIS(r_A, r_A, IMM_HA(-K));
162 case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
163 ctx->seen |= SEEN_XREG;
164 PPC_MUL(r_A, r_A, r_X);
166 case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
168 PPC_MULI(r_A, r_A, K);
170 PPC_LI32(r_scratch1, K);
171 PPC_MUL(r_A, r_A, r_scratch1);
174 case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */
175 ctx->seen |= SEEN_XREG;
177 if (ctx->pc_ret0 != -1) {
178 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
180 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
184 PPC_DIVWU(r_scratch1, r_A, r_X);
185 PPC_MUL(r_scratch1, r_X, r_scratch1);
186 PPC_SUB(r_A, r_A, r_scratch1);
188 case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */
189 PPC_LI32(r_scratch2, K);
190 PPC_DIVWU(r_scratch1, r_A, r_scratch2);
191 PPC_MUL(r_scratch1, r_scratch2, r_scratch1);
192 PPC_SUB(r_A, r_A, r_scratch1);
194 case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
195 ctx->seen |= SEEN_XREG;
197 if (ctx->pc_ret0 != -1) {
198 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
201 * Exit, returning 0; first pass hits here
202 * (longer worst-case code size).
204 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
208 PPC_DIVWU(r_A, r_A, r_X);
210 case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
213 PPC_LI32(r_scratch1, K);
214 PPC_DIVWU(r_A, r_A, r_scratch1);
216 case BPF_ALU | BPF_AND | BPF_X:
217 ctx->seen |= SEEN_XREG;
218 PPC_AND(r_A, r_A, r_X);
220 case BPF_ALU | BPF_AND | BPF_K:
222 PPC_ANDI(r_A, r_A, K);
224 PPC_LI32(r_scratch1, K);
225 PPC_AND(r_A, r_A, r_scratch1);
228 case BPF_ALU | BPF_OR | BPF_X:
229 ctx->seen |= SEEN_XREG;
230 PPC_OR(r_A, r_A, r_X);
232 case BPF_ALU | BPF_OR | BPF_K:
234 PPC_ORI(r_A, r_A, IMM_L(K));
236 PPC_ORIS(r_A, r_A, IMM_H(K));
238 case BPF_ANC | SKF_AD_ALU_XOR_X:
239 case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */
240 ctx->seen |= SEEN_XREG;
241 PPC_XOR(r_A, r_A, r_X);
243 case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
245 PPC_XORI(r_A, r_A, IMM_L(K));
247 PPC_XORIS(r_A, r_A, IMM_H(K));
249 case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
250 ctx->seen |= SEEN_XREG;
251 PPC_SLW(r_A, r_A, r_X);
253 case BPF_ALU | BPF_LSH | BPF_K:
257 PPC_SLWI(r_A, r_A, K);
259 case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
260 ctx->seen |= SEEN_XREG;
261 PPC_SRW(r_A, r_A, r_X);
263 case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
267 PPC_SRWI(r_A, r_A, K);
269 case BPF_ALU | BPF_NEG:
272 case BPF_RET | BPF_K:
275 if (ctx->pc_ret0 == -1)
279 * If this isn't the very last instruction, branch to
280 * the epilogue if we've stuff to clean up. Otherwise,
281 * if there's nothing to tidy, just return. If we /are/
282 * the last instruction, we're about to fall through to
283 * the epilogue to return.
287 * Note: 'seen' is properly valid only on pass
288 * #2. Both parts of this conditional are the
289 * same instruction size though, meaning the
290 * first pass will still correctly determine the
291 * code size/addresses.
299 case BPF_RET | BPF_A:
308 case BPF_MISC | BPF_TAX: /* X = A */
311 case BPF_MISC | BPF_TXA: /* A = X */
312 ctx->seen |= SEEN_XREG;
316 /*** Constant loads/M[] access ***/
317 case BPF_LD | BPF_IMM: /* A = K */
320 case BPF_LDX | BPF_IMM: /* X = K */
323 case BPF_LD | BPF_MEM: /* A = mem[K] */
324 PPC_MR(r_A, r_M + (K & 0xf));
325 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
327 case BPF_LDX | BPF_MEM: /* X = mem[K] */
328 PPC_MR(r_X, r_M + (K & 0xf));
329 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
331 case BPF_ST: /* mem[K] = A */
332 PPC_MR(r_M + (K & 0xf), r_A);
333 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
335 case BPF_STX: /* mem[K] = X */
336 PPC_MR(r_M + (K & 0xf), r_X);
337 ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
339 case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */
340 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
341 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
343 case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
344 PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
347 /*** Ancillary info loads ***/
348 case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
349 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
351 PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
354 case BPF_ANC | SKF_AD_IFINDEX:
355 PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
357 PPC_CMPDI(r_scratch1, 0);
358 if (ctx->pc_ret0 != -1) {
359 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
361 /* Exit, returning 0; first pass hits here. */
362 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
366 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
368 PPC_LWZ_OFFS(r_A, r_scratch1,
369 offsetof(struct net_device, ifindex));
371 case BPF_ANC | SKF_AD_MARK:
372 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
373 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
376 case BPF_ANC | SKF_AD_RXHASH:
377 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
378 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
381 case BPF_ANC | SKF_AD_VLAN_TAG:
382 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
383 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
384 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
386 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
388 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
389 PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT);
391 PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
392 PPC_SRWI(r_A, r_A, 12);
395 case BPF_ANC | SKF_AD_QUEUE:
396 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
397 queue_mapping) != 2);
398 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
401 case BPF_ANC | SKF_AD_CPU:
405 * raw_smp_processor_id() = local_paca->paca_index
407 BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct,
409 PPC_LHZ_OFFS(r_A, 13,
410 offsetof(struct paca_struct, paca_index));
416 /*** Absolute loads from packet header/data ***/
417 case BPF_LD | BPF_W | BPF_ABS:
418 func = CHOOSE_LOAD_FUNC(K, sk_load_word);
420 case BPF_LD | BPF_H | BPF_ABS:
421 func = CHOOSE_LOAD_FUNC(K, sk_load_half);
423 case BPF_LD | BPF_B | BPF_ABS:
424 func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
427 ctx->seen |= SEEN_DATAREF;
428 PPC_LI64(r_scratch1, func);
429 PPC_MTLR(r_scratch1);
433 * Helper returns 'lt' condition on error, and an
434 * appropriate return value in r3
436 PPC_BCC(COND_LT, exit_addr);
439 /*** Indirect loads from packet header/data ***/
440 case BPF_LD | BPF_W | BPF_IND:
442 goto common_load_ind;
443 case BPF_LD | BPF_H | BPF_IND:
445 goto common_load_ind;
446 case BPF_LD | BPF_B | BPF_IND:
450 * Load from [X + K]. Negative offsets are tested for
451 * in the helper functions.
453 ctx->seen |= SEEN_DATAREF | SEEN_XREG;
454 PPC_LI64(r_scratch1, func);
455 PPC_MTLR(r_scratch1);
456 PPC_ADDI(r_addr, r_X, IMM_L(K));
458 PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
460 /* If error, cr0.LT set */
461 PPC_BCC(COND_LT, exit_addr);
464 case BPF_LDX | BPF_B | BPF_MSH:
465 func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
469 /*** Jump and branches ***/
470 case BPF_JMP | BPF_JA:
472 PPC_JMP(addrs[i + 1 + K]);
475 case BPF_JMP | BPF_JGT | BPF_K:
476 case BPF_JMP | BPF_JGT | BPF_X:
479 case BPF_JMP | BPF_JGE | BPF_K:
480 case BPF_JMP | BPF_JGE | BPF_X:
483 case BPF_JMP | BPF_JEQ | BPF_K:
484 case BPF_JMP | BPF_JEQ | BPF_X:
487 case BPF_JMP | BPF_JSET | BPF_K:
488 case BPF_JMP | BPF_JSET | BPF_X:
492 /* same targets, can avoid doing the test :) */
493 if (filter[i].jt == filter[i].jf) {
494 if (filter[i].jt > 0)
495 PPC_JMP(addrs[i + 1 + filter[i].jt]);
500 case BPF_JMP | BPF_JGT | BPF_X:
501 case BPF_JMP | BPF_JGE | BPF_X:
502 case BPF_JMP | BPF_JEQ | BPF_X:
503 ctx->seen |= SEEN_XREG;
506 case BPF_JMP | BPF_JSET | BPF_X:
507 ctx->seen |= SEEN_XREG;
508 PPC_AND_DOT(r_scratch1, r_A, r_X);
510 case BPF_JMP | BPF_JEQ | BPF_K:
511 case BPF_JMP | BPF_JGT | BPF_K:
512 case BPF_JMP | BPF_JGE | BPF_K:
516 PPC_LI32(r_scratch1, K);
517 PPC_CMPLW(r_A, r_scratch1);
520 case BPF_JMP | BPF_JSET | BPF_K:
522 /* PPC_ANDI is /only/ dot-form */
523 PPC_ANDI(r_scratch1, r_A, K);
525 PPC_LI32(r_scratch1, K);
526 PPC_AND_DOT(r_scratch1, r_A,
531 /* Sometimes branches are constructed "backward", with
532 * the false path being the branch and true path being
533 * a fallthrough to the next instruction.
535 if (filter[i].jt == 0)
536 /* Swap the sense of the branch */
537 PPC_BCC(true_cond ^ COND_CMP_TRUE,
538 addrs[i + 1 + filter[i].jf]);
540 PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
541 if (filter[i].jf != 0)
542 PPC_JMP(addrs[i + 1 + filter[i].jf]);
546 /* The filter contains something cruel & unusual.
547 * We don't handle it, but also there shouldn't be
548 * anything missing from our list.
550 if (printk_ratelimit())
551 pr_err("BPF filter opcode %04x (@%d) unsupported\n",
557 /* Set end-of-body-code address for exit. */
558 addrs[i] = ctx->idx * 4;
563 void bpf_jit_compile(struct sk_filter *fp)
565 unsigned int proglen;
566 unsigned int alloclen;
570 struct codegen_context cgctx;
577 addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
582 * There are multiple assembly passes as the generated code will change
583 * size as it settles down, figuring out the max branch offsets/exit
586 * The range of standard conditional branches is +/- 32Kbytes. Since
587 * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
588 * finish with 8 bytes/instruction. Not feasible, so long jumps are
589 * used, distinct from short branches.
593 * For now, both branch types assemble to 2 words (short branches padded
594 * with a NOP); this is less efficient, but assembly will always complete
595 * after exactly 3 passes:
597 * First pass: No code buffer; Program is "faux-generated" -- no code
598 * emitted but maximum size of output determined (and addrs[] filled
599 * in). Also, we note whether we use M[], whether we use skb data, etc.
600 * All generation choices assumed to be 'worst-case', e.g. branches all
601 * far (2 instructions), return path code reduction not available, etc.
603 * Second pass: Code buffer allocated with size determined previously.
604 * Prologue generated to support features we have seen used. Exit paths
605 * determined and addrs[] is filled in again, as code may be slightly
606 * smaller as a result.
608 * Third pass: Code generated 'for real', and branch destinations
609 * determined from now-accurate addrs[] map.
613 * If we optimise this, near branches will be shorter. On the
614 * first assembly pass, we should err on the side of caution and
615 * generate the biggest code. On subsequent passes, branches will be
616 * generated short or long and code size will reduce. With smaller
617 * code, more branches may fall into the short category, and code will
620 * Finally, if we see one pass generate code the same size as the
621 * previous pass we have converged and should now generate code for
622 * real. Allocating at the end will also save the memory that would
623 * otherwise be wasted by the (small) current code shrinkage.
624 * Preferably, we should do a small number of passes (e.g. 5) and if we
625 * haven't converged by then, get impatient and force code to generate
626 * as-is, even if the odd branch would be left long. The chances of a
627 * long jump are tiny with all but the most enormous of BPF filter
628 * inputs, so we should usually converge on the third pass.
634 /* Scouting faux-generate pass 0 */
635 if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
636 /* We hit something illegal or unsupported. */
640 * Pretend to build prologue, given the features we've seen. This will
641 * update ctgtx.idx as it pretends to output instructions, then we can
642 * calculate total size from idx.
644 bpf_jit_build_prologue(fp, 0, &cgctx);
645 bpf_jit_build_epilogue(0, &cgctx);
647 proglen = cgctx.idx * 4;
648 alloclen = proglen + FUNCTION_DESCR_SIZE;
649 image = module_alloc(alloclen);
653 code_base = image + (FUNCTION_DESCR_SIZE/4);
655 /* Code generation passes 1-2 */
656 for (pass = 1; pass < 3; pass++) {
657 /* Now build the prologue, body code & epilogue for real. */
659 bpf_jit_build_prologue(fp, code_base, &cgctx);
660 bpf_jit_build_body(fp, code_base, &cgctx, addrs);
661 bpf_jit_build_epilogue(code_base, &cgctx);
663 if (bpf_jit_enable > 1)
664 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
665 proglen - (cgctx.idx * 4), cgctx.seen);
668 if (bpf_jit_enable > 1)
669 /* Note that we output the base address of the code_base
670 * rather than image, since opcodes are in code_base.
672 bpf_jit_dump(flen, proglen, pass, code_base);
675 bpf_flush_icache(code_base, code_base + (proglen/4));
676 /* Function descriptor nastiness: Address + TOC */
677 ((u64 *)image)[0] = (u64)code_base;
678 ((u64 *)image)[1] = local_paca->kernel_toc;
679 fp->bpf_func = (void *)image;
687 void bpf_jit_free(struct sk_filter *fp)
690 module_free(NULL, fp->bpf_func);