diff options
Diffstat (limited to 'arch/powerpc/net/bpf_jit_comp64.c')
-rw-r--r-- | arch/powerpc/net/bpf_jit_comp64.c | 401 |
1 files changed, 295 insertions, 106 deletions
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 025524378443..1fe37128c876 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -25,18 +25,18 @@ * with our redzone usage. * * [ prev sp ] <------------- - * [ nv gpr save area ] 5*8 | + * [ nv gpr save area ] 6*8 | * [ tail_call_cnt ] 8 | - * [ local_tmp_var ] 16 | + * [ local_tmp_var ] 24 | * fp (r31) --> [ ebpf stack space ] upto 512 | * [ frame header ] 32/112 | * sp (r1) ---> [ stack pointer ] -------------- */ /* for gpr non volatile registers BPG_REG_6 to 10 */ -#define BPF_PPC_STACK_SAVE (5*8) +#define BPF_PPC_STACK_SAVE (6*8) /* for bpf JIT code internal usage */ -#define BPF_PPC_STACK_LOCALS 24 +#define BPF_PPC_STACK_LOCALS 32 /* stack frame excluding BPF stack, ensure this is quadword aligned */ #define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \ BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE) @@ -44,6 +44,7 @@ /* BPF register usage */ #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) +#define ARENA_VM_START (MAX_BPF_JIT_REG + 2) /* BPF to ppc register mappings */ void bpf_jit_init_reg_mapping(struct codegen_context *ctx) @@ -67,10 +68,12 @@ void bpf_jit_init_reg_mapping(struct codegen_context *ctx) ctx->b2p[BPF_REG_AX] = _R12; ctx->b2p[TMP_REG_1] = _R9; ctx->b2p[TMP_REG_2] = _R10; + /* non volatile register for kern_vm_start address */ + ctx->b2p[ARENA_VM_START] = _R26; } -/* PPC NVR range -- update this if we ever use NVRs below r27 */ -#define BPF_PPC_NVR_MIN _R27 +/* PPC NVR range -- update this if we ever use NVRs below r26 */ +#define BPF_PPC_NVR_MIN _R26 static inline bool bpf_has_stack_frame(struct codegen_context *ctx) { @@ -89,9 +92,9 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx) * [ prev sp ] <------------- * [ ... ] | * sp (r1) ---> [ stack pointer ] -------------- - * [ nv gpr save area ] 5*8 + * [ nv gpr save area ] 6*8 * [ tail_call_cnt ] 8 - * [ local_tmp_var ] 16 + * [ local_tmp_var ] 24 * [ unused red zone ] 224 */ static int bpf_jit_stack_local(struct codegen_context *ctx) @@ -99,12 +102,12 @@ static int bpf_jit_stack_local(struct codegen_context *ctx) if (bpf_has_stack_frame(ctx)) return STACK_FRAME_MIN_SIZE + ctx->stack_size; else - return -(BPF_PPC_STACK_SAVE + 24); + return -(BPF_PPC_STACK_SAVE + 32); } static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx) { - return bpf_jit_stack_local(ctx) + 16; + return bpf_jit_stack_local(ctx) + 24; } static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg) @@ -170,10 +173,17 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) if (bpf_is_seen_register(ctx, bpf_to_ppc(i))) EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i)))); + if (ctx->arena_vm_start) + EMIT(PPC_RAW_STD(bpf_to_ppc(ARENA_VM_START), _R1, + bpf_jit_stack_offsetof(ctx, bpf_to_ppc(ARENA_VM_START)))); + /* Setup frame pointer to point to the bpf stack area */ if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP))) EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1, STACK_FRAME_MIN_SIZE + ctx->stack_size)); + + if (ctx->arena_vm_start) + PPC_LI64(bpf_to_ppc(ARENA_VM_START), ctx->arena_vm_start); } static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx) @@ -185,6 +195,10 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx if (bpf_is_seen_register(ctx, bpf_to_ppc(i))) EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i)))); + if (ctx->arena_vm_start) + EMIT(PPC_RAW_LD(bpf_to_ppc(ARENA_VM_START), _R1, + bpf_jit_stack_offsetof(ctx, bpf_to_ppc(ARENA_VM_START)))); + /* Tear down our stack frame */ if (bpf_has_stack_frame(ctx)) { EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size)); @@ -396,11 +410,11 @@ void bpf_stf_barrier(void); asm ( " .global bpf_stf_barrier ;" " bpf_stf_barrier: ;" -" std 21,-64(1) ;" -" std 22,-56(1) ;" +" std 21,-80(1) ;" +" std 22,-72(1) ;" " sync ;" -" ld 21,-64(1) ;" -" ld 22,-56(1) ;" +" ld 21,-80(1) ;" +" ld 22,-72(1) ;" " ori 31,31,0 ;" " .rept 14 ;" " b 1f ;" @@ -409,6 +423,141 @@ asm ( " blr ;" ); +static int bpf_jit_emit_atomic_ops(u32 *image, struct codegen_context *ctx, + const struct bpf_insn *insn, u32 *jmp_off, + u32 *tmp_idx, u32 *addrp) +{ + u32 tmp1_reg = bpf_to_ppc(TMP_REG_1); + u32 tmp2_reg = bpf_to_ppc(TMP_REG_2); + u32 size = BPF_SIZE(insn->code); + u32 src_reg = bpf_to_ppc(insn->src_reg); + u32 dst_reg = bpf_to_ppc(insn->dst_reg); + s32 imm = insn->imm; + + u32 save_reg = tmp2_reg; + u32 ret_reg = src_reg; + u32 fixup_idx; + + /* Get offset into TMP_REG_1 */ + EMIT(PPC_RAW_LI(tmp1_reg, insn->off)); + /* + * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync' + * before and after the operation. + * + * This is a requirement in the Linux Kernel Memory Model. + * See __cmpxchg_u64() in asm/cmpxchg.h as an example. + */ + if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP)) + EMIT(PPC_RAW_SYNC()); + + *tmp_idx = ctx->idx; + + /* load value from memory into TMP_REG_2 */ + if (size == BPF_DW) + EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0)); + else + EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0)); + /* Save old value in _R0 */ + if (imm & BPF_FETCH) + EMIT(PPC_RAW_MR(_R0, tmp2_reg)); + + switch (imm) { + case BPF_ADD: + case BPF_ADD | BPF_FETCH: + EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg)); + break; + case BPF_AND: + case BPF_AND | BPF_FETCH: + EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg)); + break; + case BPF_OR: + case BPF_OR | BPF_FETCH: + EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg)); + break; + case BPF_XOR: + case BPF_XOR | BPF_FETCH: + EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg)); + break; + case BPF_CMPXCHG: + /* + * Return old value in BPF_REG_0 for BPF_CMPXCHG & + * in src_reg for other cases. + */ + ret_reg = bpf_to_ppc(BPF_REG_0); + + /* Compare with old value in BPF_R0 */ + if (size == BPF_DW) + EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg)); + else + EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg)); + /* Don't set if different from old value */ + PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4); + fallthrough; + case BPF_XCHG: + save_reg = src_reg; + break; + default: + return -EOPNOTSUPP; + } + + /* store new value */ + if (size == BPF_DW) + EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg)); + else + EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg)); + /* we're done if this succeeded */ + PPC_BCC_SHORT(COND_NE, *tmp_idx * 4); + fixup_idx = ctx->idx; + + if (imm & BPF_FETCH) { + /* Emit 'sync' to enforce full ordering */ + if (IS_ENABLED(CONFIG_SMP)) + EMIT(PPC_RAW_SYNC()); + EMIT(PPC_RAW_MR(ret_reg, _R0)); + /* + * Skip unnecessary zero-extension for 32-bit cmpxchg. + * For context, see commit 39491867ace5. + */ + if (size != BPF_DW && imm == BPF_CMPXCHG && + insn_is_zext(insn + 1)) + *addrp = ctx->idx * 4; + } + + *jmp_off = (fixup_idx - *tmp_idx) * 4; + + return 0; +} + +static int bpf_jit_emit_probe_mem_store(struct codegen_context *ctx, u32 src_reg, s16 off, + u32 code, u32 *image) +{ + u32 tmp1_reg = bpf_to_ppc(TMP_REG_1); + u32 tmp2_reg = bpf_to_ppc(TMP_REG_2); + + switch (BPF_SIZE(code)) { + case BPF_B: + EMIT(PPC_RAW_STB(src_reg, tmp1_reg, off)); + break; + case BPF_H: + EMIT(PPC_RAW_STH(src_reg, tmp1_reg, off)); + break; + case BPF_W: + EMIT(PPC_RAW_STW(src_reg, tmp1_reg, off)); + break; + case BPF_DW: + if (off % 4) { + EMIT(PPC_RAW_LI(tmp2_reg, off)); + EMIT(PPC_RAW_STDX(src_reg, tmp1_reg, tmp2_reg)); + } else { + EMIT(PPC_RAW_STD(src_reg, tmp1_reg, off)); + } + break; + default: + return -EINVAL; + } + return 0; +} + static int emit_atomic_ld_st(const struct bpf_insn insn, struct codegen_context *ctx, u32 *image) { u32 code = insn.code; @@ -494,7 +643,6 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code u32 size = BPF_SIZE(code); u32 tmp1_reg = bpf_to_ppc(TMP_REG_1); u32 tmp2_reg = bpf_to_ppc(TMP_REG_2); - u32 save_reg, ret_reg; s16 off = insn[i].off; s32 imm = insn[i].imm; bool func_addr_fixed; @@ -502,6 +650,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code u64 imm64; u32 true_cond; u32 tmp_idx; + u32 jmp_off; /* * addrs[] maps a BPF bytecode address into a real offset from @@ -768,6 +917,16 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code */ case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */ case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */ + + if (insn_is_cast_user(&insn[i])) { + EMIT(PPC_RAW_RLDICL_DOT(tmp1_reg, src_reg, 0, 32)); + PPC_LI64(dst_reg, (ctx->user_vm_start & 0xffffffff00000000UL)); + PPC_BCC_SHORT(COND_EQ, (ctx->idx + 2) * 4); + EMIT(PPC_RAW_OR(tmp1_reg, dst_reg, tmp1_reg)); + EMIT(PPC_RAW_MR(dst_reg, tmp1_reg)); + break; + } + if (imm == 1) { /* special mov32 for zext */ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31)); @@ -960,6 +1119,76 @@ emit_clear: } break; + case BPF_STX | BPF_PROBE_MEM32 | BPF_B: + case BPF_STX | BPF_PROBE_MEM32 | BPF_H: + case BPF_STX | BPF_PROBE_MEM32 | BPF_W: + case BPF_STX | BPF_PROBE_MEM32 | BPF_DW: + + EMIT(PPC_RAW_ADD(tmp1_reg, dst_reg, bpf_to_ppc(ARENA_VM_START))); + + ret = bpf_jit_emit_probe_mem_store(ctx, src_reg, off, code, image); + if (ret) + return ret; + + ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx, + ctx->idx - 1, 4, -1, code); + if (ret) + return ret; + + break; + + case BPF_ST | BPF_PROBE_MEM32 | BPF_B: + case BPF_ST | BPF_PROBE_MEM32 | BPF_H: + case BPF_ST | BPF_PROBE_MEM32 | BPF_W: + case BPF_ST | BPF_PROBE_MEM32 | BPF_DW: + + EMIT(PPC_RAW_ADD(tmp1_reg, dst_reg, bpf_to_ppc(ARENA_VM_START))); + + if (BPF_SIZE(code) == BPF_W || BPF_SIZE(code) == BPF_DW) { + PPC_LI32(tmp2_reg, imm); + src_reg = tmp2_reg; + } else { + EMIT(PPC_RAW_LI(tmp2_reg, imm)); + src_reg = tmp2_reg; + } + + ret = bpf_jit_emit_probe_mem_store(ctx, src_reg, off, code, image); + if (ret) + return ret; + + ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx, + ctx->idx - 1, 4, -1, code); + if (ret) + return ret; + + break; + + /* + * BPF_STX PROBE_ATOMIC (arena atomic ops) + */ + case BPF_STX | BPF_PROBE_ATOMIC | BPF_W: + case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW: + EMIT(PPC_RAW_ADD(dst_reg, dst_reg, bpf_to_ppc(ARENA_VM_START))); + ret = bpf_jit_emit_atomic_ops(image, ctx, &insn[i], + &jmp_off, &tmp_idx, &addrs[i + 1]); + if (ret) { + if (ret == -EOPNOTSUPP) { + pr_err_ratelimited( + "eBPF filter atomic op code %02x (@%d) unsupported\n", + code, i); + } + return ret; + } + /* LDARX/LWARX should land here on exception. */ + ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx, + tmp_idx, jmp_off, dst_reg, code); + if (ret) + return ret; + + /* Retrieve the dst_reg */ + EMIT(PPC_RAW_SUB(dst_reg, dst_reg, bpf_to_ppc(ARENA_VM_START))); + break; + /* * BPF_STX ATOMIC (atomic ops) */ @@ -982,93 +1211,15 @@ emit_clear: return -EOPNOTSUPP; } - save_reg = tmp2_reg; - ret_reg = src_reg; - - /* Get offset into TMP_REG_1 */ - EMIT(PPC_RAW_LI(tmp1_reg, off)); - /* - * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync' - * before and after the operation. - * - * This is a requirement in the Linux Kernel Memory Model. - * See __cmpxchg_u64() in asm/cmpxchg.h as an example. - */ - if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP)) - EMIT(PPC_RAW_SYNC()); - tmp_idx = ctx->idx * 4; - /* load value from memory into TMP_REG_2 */ - if (size == BPF_DW) - EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0)); - else - EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0)); - - /* Save old value in _R0 */ - if (imm & BPF_FETCH) - EMIT(PPC_RAW_MR(_R0, tmp2_reg)); - - switch (imm) { - case BPF_ADD: - case BPF_ADD | BPF_FETCH: - EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg)); - break; - case BPF_AND: - case BPF_AND | BPF_FETCH: - EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg)); - break; - case BPF_OR: - case BPF_OR | BPF_FETCH: - EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg)); - break; - case BPF_XOR: - case BPF_XOR | BPF_FETCH: - EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg)); - break; - case BPF_CMPXCHG: - /* - * Return old value in BPF_REG_0 for BPF_CMPXCHG & - * in src_reg for other cases. - */ - ret_reg = bpf_to_ppc(BPF_REG_0); - - /* Compare with old value in BPF_R0 */ - if (size == BPF_DW) - EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg)); - else - EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg)); - /* Don't set if different from old value */ - PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4); - fallthrough; - case BPF_XCHG: - save_reg = src_reg; - break; - default: - pr_err_ratelimited( - "eBPF filter atomic op code %02x (@%d) unsupported\n", - code, i); - return -EOPNOTSUPP; - } - - /* store new value */ - if (size == BPF_DW) - EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg)); - else - EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg)); - /* we're done if this succeeded */ - PPC_BCC_SHORT(COND_NE, tmp_idx); - - if (imm & BPF_FETCH) { - /* Emit 'sync' to enforce full ordering */ - if (IS_ENABLED(CONFIG_SMP)) - EMIT(PPC_RAW_SYNC()); - EMIT(PPC_RAW_MR(ret_reg, _R0)); - /* - * Skip unnecessary zero-extension for 32-bit cmpxchg. - * For context, see commit 39491867ace5. - */ - if (size != BPF_DW && imm == BPF_CMPXCHG && - insn_is_zext(&insn[i + 1])) - addrs[++i] = ctx->idx * 4; + ret = bpf_jit_emit_atomic_ops(image, ctx, &insn[i], + &jmp_off, &tmp_idx, &addrs[i + 1]); + if (ret) { + if (ret == -EOPNOTSUPP) { + pr_err_ratelimited( + "eBPF filter atomic op code %02x (@%d) unsupported\n", + code, i); + } + return ret; } break; @@ -1112,9 +1263,10 @@ emit_clear: * Check if 'off' is word aligned for BPF_DW, because * we might generate two instructions. */ - if ((BPF_SIZE(code) == BPF_DW || - (BPF_SIZE(code) == BPF_B && BPF_MODE(code) == BPF_PROBE_MEMSX)) && - (off & 3)) + if ((BPF_SIZE(code) == BPF_DW && (off & 3)) || + (BPF_SIZE(code) == BPF_B && + BPF_MODE(code) == BPF_PROBE_MEMSX) || + (BPF_SIZE(code) == BPF_B && BPF_MODE(code) == BPF_MEMSX)) PPC_JMP((ctx->idx + 3) * 4); else PPC_JMP((ctx->idx + 2) * 4); @@ -1160,12 +1312,49 @@ emit_clear: if (BPF_MODE(code) == BPF_PROBE_MEM) { ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx, - ctx->idx - 1, 4, dst_reg); + ctx->idx - 1, 4, dst_reg, code); if (ret) return ret; } break; + /* dst = *(u64 *)(ul) (src + ARENA_VM_START + off) */ + case BPF_LDX | BPF_PROBE_MEM32 | BPF_B: + case BPF_LDX | BPF_PROBE_MEM32 | BPF_H: + case BPF_LDX | BPF_PROBE_MEM32 | BPF_W: + case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW: + + EMIT(PPC_RAW_ADD(tmp1_reg, src_reg, bpf_to_ppc(ARENA_VM_START))); + + switch (size) { + case BPF_B: + EMIT(PPC_RAW_LBZ(dst_reg, tmp1_reg, off)); + break; + case BPF_H: + EMIT(PPC_RAW_LHZ(dst_reg, tmp1_reg, off)); + break; + case BPF_W: + EMIT(PPC_RAW_LWZ(dst_reg, tmp1_reg, off)); + break; + case BPF_DW: + if (off % 4) { + EMIT(PPC_RAW_LI(tmp2_reg, off)); + EMIT(PPC_RAW_LDX(dst_reg, tmp1_reg, tmp2_reg)); + } else { + EMIT(PPC_RAW_LD(dst_reg, tmp1_reg, off)); + } + break; + } + + if (size != BPF_DW && insn_is_zext(&insn[i + 1])) + addrs[++i] = ctx->idx * 4; + + ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx, + ctx->idx - 1, 4, dst_reg, code); + if (ret) + return ret; + break; + /* * Doubleword load * 16 byte instruction that uses two 'struct bpf_insn' |