diff options
| author | Huacai Chen <chenhuacai@loongson.cn> | 2025-08-02 11:07:06 +0800 |
|---|---|---|
| committer | Huacai Chen <chenhuacai@loongson.cn> | 2025-08-02 11:07:06 +0800 |
| commit | 0a91336e287ca2557fead5221d2c79e0effd034e (patch) | |
| tree | 3acfb76800d995a06e2a6932d683e67ecb1c5590 /arch | |
| parent | 038d61fd642278bab63ee8ef722c50d10ab01e8f (diff) | |
| parent | cd7c97f4584a93578f87ea6ff427f74e9a943cdf (diff) | |
Merge tag 'bpf-next-6.17' into loongarch-next
LoongArch architecture changes for 6.17 have many bpf features such as
trampoline, so merge 'bpf-next-6.17' to create a base to make bpf work
well.
Diffstat (limited to 'arch')
| -rw-r--r-- | arch/arm64/net/bpf_jit.h | 5 | ||||
| -rw-r--r-- | arch/arm64/net/bpf_jit_comp.c | 167 | ||||
| -rw-r--r-- | arch/powerpc/net/bpf_jit_comp64.c | 79 | ||||
| -rw-r--r-- | arch/s390/net/bpf_jit.h | 55 | ||||
| -rw-r--r-- | arch/s390/net/bpf_jit_comp.c | 113 | ||||
| -rw-r--r-- | arch/x86/net/bpf_jit_comp.c | 10 |
6 files changed, 272 insertions, 157 deletions
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h index a3b0e693a125..bbea4f36f9f2 100644 --- a/arch/arm64/net/bpf_jit.h +++ b/arch/arm64/net/bpf_jit.h @@ -325,4 +325,9 @@ #define A64_MRS_SP_EL0(Rt) \ aarch64_insn_gen_mrs(Rt, AARCH64_INSN_SYSREG_SP_EL0) +/* Barriers */ +#define A64_SB aarch64_insn_get_sb_value() +#define A64_DSB_NSH (aarch64_insn_get_dsb_base_value() | 0x7 << 8) +#define A64_ISB aarch64_insn_get_isb_value() + #endif /* _BPF_JIT_H */ diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index da8b89dd2910..97dfd5432809 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -30,6 +30,7 @@ #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) #define TCCNT_PTR (MAX_BPF_JIT_REG + 2) #define TMP_REG_3 (MAX_BPF_JIT_REG + 3) +#define PRIVATE_SP (MAX_BPF_JIT_REG + 4) #define ARENA_VM_START (MAX_BPF_JIT_REG + 5) #define check_imm(bits, imm) do { \ @@ -68,6 +69,8 @@ static const int bpf2a64[] = { [TCCNT_PTR] = A64_R(26), /* temporary register for blinding constants */ [BPF_REG_AX] = A64_R(9), + /* callee saved register for private stack pointer */ + [PRIVATE_SP] = A64_R(27), /* callee saved register for kern_vm_start address */ [ARENA_VM_START] = A64_R(28), }; @@ -86,6 +89,7 @@ struct jit_ctx { u64 user_vm_start; u64 arena_vm_start; bool fp_used; + bool priv_sp_used; bool write; }; @@ -98,6 +102,10 @@ struct bpf_plt { #define PLT_TARGET_SIZE sizeof_field(struct bpf_plt, target) #define PLT_TARGET_OFFSET offsetof(struct bpf_plt, target) +/* Memory size/value to protect private stack overflow/underflow */ +#define PRIV_STACK_GUARD_SZ 16 +#define PRIV_STACK_GUARD_VAL 0xEB9F12345678eb9fULL + static inline void emit(const u32 insn, struct jit_ctx *ctx) { if (ctx->image != NULL && ctx->write) @@ -387,8 +395,11 @@ static void find_used_callee_regs(struct jit_ctx *ctx) if (reg_used & 8) ctx->used_callee_reg[i++] = bpf2a64[BPF_REG_9]; - if (reg_used & 16) + if (reg_used & 16) { ctx->used_callee_reg[i++] = bpf2a64[BPF_REG_FP]; + if (ctx->priv_sp_used) + ctx->used_callee_reg[i++] = bpf2a64[PRIVATE_SP]; + } if (ctx->arena_vm_start) ctx->used_callee_reg[i++] = bpf2a64[ARENA_VM_START]; @@ -412,6 +423,7 @@ static void push_callee_regs(struct jit_ctx *ctx) emit(A64_PUSH(A64_R(23), A64_R(24), A64_SP), ctx); emit(A64_PUSH(A64_R(25), A64_R(26), A64_SP), ctx); emit(A64_PUSH(A64_R(27), A64_R(28), A64_SP), ctx); + ctx->fp_used = true; } else { find_used_callee_regs(ctx); for (i = 0; i + 1 < ctx->nr_used_callee_reg; i += 2) { @@ -461,6 +473,19 @@ static void pop_callee_regs(struct jit_ctx *ctx) } } +static void emit_percpu_ptr(const u8 dst_reg, void __percpu *ptr, + struct jit_ctx *ctx) +{ + const u8 tmp = bpf2a64[TMP_REG_1]; + + emit_a64_mov_i64(dst_reg, (__force const u64)ptr, ctx); + if (cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN)) + emit(A64_MRS_TPIDR_EL2(tmp), ctx); + else + emit(A64_MRS_TPIDR_EL1(tmp), ctx); + emit(A64_ADD(1, dst_reg, dst_reg, tmp), ctx); +} + #define BTI_INSNS (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) ? 1 : 0) #define PAC_INSNS (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) ? 1 : 0) @@ -476,6 +501,8 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf) const bool is_main_prog = !bpf_is_subprog(prog); const u8 fp = bpf2a64[BPF_REG_FP]; const u8 arena_vm_base = bpf2a64[ARENA_VM_START]; + const u8 priv_sp = bpf2a64[PRIVATE_SP]; + void __percpu *priv_stack_ptr; const int idx0 = ctx->idx; int cur_offset; @@ -551,15 +578,23 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf) emit(A64_SUB_I(1, A64_SP, A64_FP, 96), ctx); } - if (ctx->fp_used) - /* Set up BPF prog stack base register */ - emit(A64_MOV(1, fp, A64_SP), ctx); - /* Stack must be multiples of 16B */ ctx->stack_size = round_up(prog->aux->stack_depth, 16); + if (ctx->fp_used) { + if (ctx->priv_sp_used) { + /* Set up private stack pointer */ + priv_stack_ptr = prog->aux->priv_stack_ptr + PRIV_STACK_GUARD_SZ; + emit_percpu_ptr(priv_sp, priv_stack_ptr, ctx); + emit(A64_ADD_I(1, fp, priv_sp, ctx->stack_size), ctx); + } else { + /* Set up BPF prog stack base register */ + emit(A64_MOV(1, fp, A64_SP), ctx); + } + } + /* Set up function call stack */ - if (ctx->stack_size) + if (ctx->stack_size && !ctx->priv_sp_used) emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); if (ctx->arena_vm_start) @@ -623,7 +658,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) emit(A64_STR64I(tcc, ptr, 0), ctx); /* restore SP */ - if (ctx->stack_size) + if (ctx->stack_size && !ctx->priv_sp_used) emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); pop_callee_regs(ctx); @@ -991,7 +1026,7 @@ static void build_epilogue(struct jit_ctx *ctx, bool was_classic) const u8 ptr = bpf2a64[TCCNT_PTR]; /* We're done with BPF stack */ - if (ctx->stack_size) + if (ctx->stack_size && !ctx->priv_sp_used) emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); pop_callee_regs(ctx); @@ -1120,6 +1155,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, const u8 tmp2 = bpf2a64[TMP_REG_2]; const u8 fp = bpf2a64[BPF_REG_FP]; const u8 arena_vm_base = bpf2a64[ARENA_VM_START]; + const u8 priv_sp = bpf2a64[PRIVATE_SP]; const s16 off = insn->off; const s32 imm = insn->imm; const int i = insn - ctx->prog->insnsi; @@ -1564,7 +1600,7 @@ emit_cond_jmp: src = tmp2; } if (src == fp) { - src_adj = A64_SP; + src_adj = ctx->priv_sp_used ? priv_sp : A64_SP; off_adj = off + ctx->stack_size; } else { src_adj = src; @@ -1630,17 +1666,14 @@ emit_cond_jmp: return ret; break; - /* speculation barrier */ + /* speculation barrier against v1 and v4 */ case BPF_ST | BPF_NOSPEC: - /* - * Nothing required here. - * - * In case of arm64, we rely on the firmware mitigation of - * Speculative Store Bypass as controlled via the ssbd kernel - * parameter. Whenever the mitigation is enabled, it works - * for all of the kernel code with no need to provide any - * additional instructions. - */ + if (alternative_has_cap_likely(ARM64_HAS_SB)) { + emit(A64_SB, ctx); + } else { + emit(A64_DSB_NSH, ctx); + emit(A64_ISB, ctx); + } break; /* ST: *(size *)(dst + off) = imm */ @@ -1657,7 +1690,7 @@ emit_cond_jmp: dst = tmp2; } if (dst == fp) { - dst_adj = A64_SP; + dst_adj = ctx->priv_sp_used ? priv_sp : A64_SP; off_adj = off + ctx->stack_size; } else { dst_adj = dst; @@ -1719,7 +1752,7 @@ emit_cond_jmp: dst = tmp2; } if (dst == fp) { - dst_adj = A64_SP; + dst_adj = ctx->priv_sp_used ? priv_sp : A64_SP; off_adj = off + ctx->stack_size; } else { dst_adj = dst; @@ -1862,6 +1895,39 @@ static inline void bpf_flush_icache(void *start, void *end) flush_icache_range((unsigned long)start, (unsigned long)end); } +static void priv_stack_init_guard(void __percpu *priv_stack_ptr, int alloc_size) +{ + int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3; + u64 *stack_ptr; + + for_each_possible_cpu(cpu) { + stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu); + stack_ptr[0] = PRIV_STACK_GUARD_VAL; + stack_ptr[1] = PRIV_STACK_GUARD_VAL; + stack_ptr[underflow_idx] = PRIV_STACK_GUARD_VAL; + stack_ptr[underflow_idx + 1] = PRIV_STACK_GUARD_VAL; + } +} + +static void priv_stack_check_guard(void __percpu *priv_stack_ptr, int alloc_size, + struct bpf_prog *prog) +{ + int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3; + u64 *stack_ptr; + + for_each_possible_cpu(cpu) { + stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu); + if (stack_ptr[0] != PRIV_STACK_GUARD_VAL || + stack_ptr[1] != PRIV_STACK_GUARD_VAL || + stack_ptr[underflow_idx] != PRIV_STACK_GUARD_VAL || + stack_ptr[underflow_idx + 1] != PRIV_STACK_GUARD_VAL) { + pr_err("BPF private stack overflow/underflow detected for prog %sx\n", + bpf_jit_get_prog_name(prog)); + break; + } + } +} + struct arm64_jit_data { struct bpf_binary_header *header; u8 *ro_image; @@ -1874,9 +1940,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) int image_size, prog_size, extable_size, extable_align, extable_offset; struct bpf_prog *tmp, *orig_prog = prog; struct bpf_binary_header *header; - struct bpf_binary_header *ro_header; + struct bpf_binary_header *ro_header = NULL; struct arm64_jit_data *jit_data; + void __percpu *priv_stack_ptr = NULL; bool was_classic = bpf_prog_was_classic(prog); + int priv_stack_alloc_sz; bool tmp_blinded = false; bool extra_pass = false; struct jit_ctx ctx; @@ -1908,6 +1976,23 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) } prog->aux->jit_data = jit_data; } + priv_stack_ptr = prog->aux->priv_stack_ptr; + if (!priv_stack_ptr && prog->aux->jits_use_priv_stack) { + /* Allocate actual private stack size with verifier-calculated + * stack size plus two memory guards to protect overflow and + * underflow. + */ + priv_stack_alloc_sz = round_up(prog->aux->stack_depth, 16) + + 2 * PRIV_STACK_GUARD_SZ; + priv_stack_ptr = __alloc_percpu_gfp(priv_stack_alloc_sz, 16, GFP_KERNEL); + if (!priv_stack_ptr) { + prog = orig_prog; + goto out_priv_stack; + } + + priv_stack_init_guard(priv_stack_ptr, priv_stack_alloc_sz); + prog->aux->priv_stack_ptr = priv_stack_ptr; + } if (jit_data->ctx.offset) { ctx = jit_data->ctx; ro_image_ptr = jit_data->ro_image; @@ -1931,6 +2016,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ctx.user_vm_start = bpf_arena_get_user_vm_start(prog->aux->arena); ctx.arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena); + if (priv_stack_ptr) + ctx.priv_sp_used = true; + /* Pass 1: Estimate the maximum image size. * * BPF line info needs ctx->offset[i] to be the offset of @@ -2070,7 +2158,12 @@ skip_init_ctx: ctx.offset[i] *= AARCH64_INSN_SIZE; bpf_prog_fill_jited_linfo(prog, ctx.offset + 1); out_off: + if (!ro_header && priv_stack_ptr) { + free_percpu(priv_stack_ptr); + prog->aux->priv_stack_ptr = NULL; + } kvfree(ctx.offset); +out_priv_stack: kfree(jit_data); prog->aux->jit_data = NULL; } @@ -2089,6 +2182,11 @@ out_free_hdr: goto out_off; } +bool bpf_jit_supports_private_stack(void) +{ + return true; +} + bool bpf_jit_supports_kfunc_call(void) { return true; @@ -2243,11 +2341,6 @@ static int calc_arg_aux(const struct btf_func_model *m, /* the rest arguments are passed through stack */ for (; i < m->nr_args; i++) { - /* We can not know for sure about exact alignment needs for - * struct passed on stack, so deny those - */ - if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) - return -ENOTSUPP; stack_slots = (m->arg_size[i] + 7) / 8; a->bstack_for_args += stack_slots * 8; a->ostack_for_args = a->ostack_for_args + stack_slots * 8; @@ -2911,6 +3004,17 @@ bool bpf_jit_supports_percpu_insn(void) return true; } +bool bpf_jit_bypass_spec_v4(void) +{ + /* In case of arm64, we rely on the firmware mitigation of Speculative + * Store Bypass as controlled via the ssbd kernel parameter. Whenever + * the mitigation is enabled, it works for all of the kernel code with + * no need to provide any additional instructions. Therefore, skip + * inserting nospec insns against Spectre v4. + */ + return true; +} + bool bpf_jit_inlines_helper_call(s32 imm) { switch (imm) { @@ -2928,6 +3032,8 @@ void bpf_jit_free(struct bpf_prog *prog) if (prog->jited) { struct arm64_jit_data *jit_data = prog->aux->jit_data; struct bpf_binary_header *hdr; + void __percpu *priv_stack_ptr; + int priv_stack_alloc_sz; /* * If we fail the final pass of JIT (from jit_subprogs), @@ -2941,6 +3047,13 @@ void bpf_jit_free(struct bpf_prog *prog) } hdr = bpf_jit_binary_pack_hdr(prog); bpf_jit_binary_pack_free(hdr, NULL); + priv_stack_ptr = prog->aux->priv_stack_ptr; + if (priv_stack_ptr) { + priv_stack_alloc_sz = round_up(prog->aux->stack_depth, 16) + + 2 * PRIV_STACK_GUARD_SZ; + priv_stack_check_guard(priv_stack_ptr, priv_stack_alloc_sz, prog); + free_percpu(prog->aux->priv_stack_ptr); + } WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog)); } diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 5daa77aee7f7..a25a6ffe7d7c 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -370,6 +370,23 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o return 0; } +bool bpf_jit_bypass_spec_v1(void) +{ +#if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64) + return !(security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && + security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)); +#else + return true; +#endif +} + +bool bpf_jit_bypass_spec_v4(void) +{ + return !(security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && + security_ftr_enabled(SEC_FTR_STF_BARRIER) && + stf_barrier_type_get() != STF_BARRIER_NONE); +} + /* * We spill into the redzone always, even if the bpf program has its own stackframe. * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local() @@ -397,6 +414,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code u32 *addrs, int pass, bool extra_pass) { enum stf_barrier_type stf_barrier = stf_barrier_type_get(); + bool sync_emitted, ori31_emitted; const struct bpf_insn *insn = fp->insnsi; int flen = fp->len; int i, ret; @@ -789,30 +807,51 @@ emit_clear: /* * BPF_ST NOSPEC (speculation barrier) + * + * The following must act as a barrier against both Spectre v1 + * and v4 if we requested both mitigations. Therefore, also emit + * 'isync; sync' on E500 or 'ori31' on BOOK3S_64 in addition to + * the insns needed for a Spectre v4 barrier. + * + * If we requested only !bypass_spec_v1 OR only !bypass_spec_v4, + * we can skip the respective other barrier type as an + * optimization. */ case BPF_ST | BPF_NOSPEC: - if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) || - !security_ftr_enabled(SEC_FTR_STF_BARRIER)) - break; - - switch (stf_barrier) { - case STF_BARRIER_EIEIO: - EMIT(PPC_RAW_EIEIO() | 0x02000000); - break; - case STF_BARRIER_SYNC_ORI: + sync_emitted = false; + ori31_emitted = false; + if (IS_ENABLED(CONFIG_PPC_E500) && + !bpf_jit_bypass_spec_v1()) { + EMIT(PPC_RAW_ISYNC()); EMIT(PPC_RAW_SYNC()); - EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0)); - EMIT(PPC_RAW_ORI(_R31, _R31, 0)); - break; - case STF_BARRIER_FALLBACK: - ctx->seen |= SEEN_FUNC; - PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier)); - EMIT(PPC_RAW_MTCTR(_R12)); - EMIT(PPC_RAW_BCTRL()); - break; - case STF_BARRIER_NONE: - break; + sync_emitted = true; } + if (!bpf_jit_bypass_spec_v4()) { + switch (stf_barrier) { + case STF_BARRIER_EIEIO: + EMIT(PPC_RAW_EIEIO() | 0x02000000); + break; + case STF_BARRIER_SYNC_ORI: + if (!sync_emitted) + EMIT(PPC_RAW_SYNC()); + EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0)); + EMIT(PPC_RAW_ORI(_R31, _R31, 0)); + ori31_emitted = true; + break; + case STF_BARRIER_FALLBACK: + ctx->seen |= SEEN_FUNC; + PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier)); + EMIT(PPC_RAW_MTCTR(_R12)); + EMIT(PPC_RAW_BCTRL()); + break; + case STF_BARRIER_NONE: + break; + } + } + if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && + !bpf_jit_bypass_spec_v1() && + !ori31_emitted) + EMIT(PPC_RAW_ORI(_R31, _R31, 0)); break; /* diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h deleted file mode 100644 index 7822ea92e54a..000000000000 --- a/arch/s390/net/bpf_jit.h +++ /dev/null @@ -1,55 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * BPF Jit compiler defines - * - * Copyright IBM Corp. 2012,2015 - * - * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> - * Michael Holzheu <holzheu@linux.vnet.ibm.com> - */ - -#ifndef __ARCH_S390_NET_BPF_JIT_H -#define __ARCH_S390_NET_BPF_JIT_H - -#ifndef __ASSEMBLY__ - -#include <linux/filter.h> -#include <linux/types.h> - -#endif /* __ASSEMBLY__ */ - -/* - * Stackframe layout (packed stack): - * - * ^ high - * +---------------+ | - * | old backchain | | - * +---------------+ | - * | r15 - r6 | | - * +---------------+ | - * | 4 byte align | | - * | tail_call_cnt | | - * BFP -> +===============+ | - * | | | - * | BPF stack | | - * | | | - * R15+160 -> +---------------+ | - * | new backchain | | - * R15+152 -> +---------------+ | - * | + 152 byte SA | | - * R15 -> +---------------+ + low - * - * We get 160 bytes stack space from calling function, but only use - * 12 * 8 byte for old backchain, r15..r6, and tail_call_cnt. - * - * The stack size used by the BPF program ("BPF stack" above) is passed - * via "aux->stack_depth". - */ -#define STK_SPACE_ADD (160) -#define STK_160_UNUSED (160 - 12 * 8) -#define STK_OFF (STK_SPACE_ADD - STK_160_UNUSED) - -#define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */ -#define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */ - -#endif /* __ARCH_S390_NET_BPF_JIT_H */ diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 0c9a35782c83..bb17efe29d65 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -32,7 +32,6 @@ #include <asm/set_memory.h> #include <asm/text-patching.h> #include <asm/unwind.h> -#include "bpf_jit.h" struct bpf_jit { u32 seen; /* Flags to remember seen eBPF instructions */ @@ -54,6 +53,7 @@ struct bpf_jit { int prologue_plt; /* Start of prologue hotpatch PLT */ int kern_arena; /* Pool offset of kernel arena address */ u64 user_arena; /* User arena address */ + u32 frame_off; /* Offset of struct bpf_prog from %r15 */ }; #define SEEN_MEM BIT(0) /* use mem[] for temporary storage */ @@ -426,11 +426,25 @@ static void jit_fill_hole(void *area, unsigned int size) } /* + * Caller-allocated part of the frame. + * Thanks to packed stack, its otherwise unused initial part can be used for + * the BPF stack and for the next frame. + */ +struct prog_frame { + u64 unused[8]; + /* BPF stack starts here and grows towards 0 */ + u32 tail_call_cnt; + u32 pad; + u64 r6[10]; /* r6 - r15 */ + u64 backchain; +} __packed; + +/* * Save registers from "rs" (register start) to "re" (register end) on stack */ static void save_regs(struct bpf_jit *jit, u32 rs, u32 re) { - u32 off = STK_OFF_R6 + (rs - 6) * 8; + u32 off = offsetof(struct prog_frame, r6) + (rs - 6) * 8; if (rs == re) /* stg %rs,off(%r15) */ @@ -443,12 +457,9 @@ static void save_regs(struct bpf_jit *jit, u32 rs, u32 re) /* * Restore registers from "rs" (register start) to "re" (register end) on stack */ -static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth) +static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re) { - u32 off = STK_OFF_R6 + (rs - 6) * 8; - - if (jit->seen & SEEN_STACK) - off += STK_OFF + stack_depth; + u32 off = jit->frame_off + offsetof(struct prog_frame, r6) + (rs - 6) * 8; if (rs == re) /* lg %rs,off(%r15) */ @@ -492,8 +503,7 @@ static int get_end(u16 seen_regs, int start) * Save and restore clobbered registers (6-15) on stack. * We save/restore registers in chunks with gap >= 2 registers. */ -static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth, - u16 extra_regs) +static void save_restore_regs(struct bpf_jit *jit, int op, u16 extra_regs) { u16 seen_regs = jit->seen_regs | extra_regs; const int last = 15, save_restore_size = 6; @@ -516,7 +526,7 @@ static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth, if (op == REGS_SAVE) save_regs(jit, rs, re); else - restore_regs(jit, rs, re, stack_depth); + restore_regs(jit, rs, re); re++; } while (re <= last); } @@ -581,11 +591,12 @@ static void bpf_jit_plt(struct bpf_plt *plt, void *ret, void *target) * Emit function prologue * * Save registers and create stack frame if necessary. - * See stack frame layout description in "bpf_jit.h"! + * Stack frame layout is described by struct prog_frame. */ -static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp, - u32 stack_depth) +static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp) { + BUILD_BUG_ON(sizeof(struct prog_frame) != STACK_FRAME_OVERHEAD); + /* No-op for hotpatching */ /* brcl 0,prologue_plt */ EMIT6_PCREL_RILC(0xc0040000, 0, jit->prologue_plt); @@ -593,8 +604,9 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp, if (!bpf_is_subprog(fp)) { /* Initialize the tail call counter in the main program. */ - /* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */ - _EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT); + /* xc tail_call_cnt(4,%r15),tail_call_cnt(%r15) */ + _EMIT6(0xd703f000 | offsetof(struct prog_frame, tail_call_cnt), + 0xf000 | offsetof(struct prog_frame, tail_call_cnt)); } else { /* * Skip the tail call counter initialization in subprograms. @@ -617,7 +629,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp, jit->seen_regs |= NVREGS; } else { /* Save registers */ - save_restore_regs(jit, REGS_SAVE, stack_depth, + save_restore_regs(jit, REGS_SAVE, fp->aux->exception_boundary ? NVREGS : 0); } /* Setup literal pool */ @@ -637,13 +649,15 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp, if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) { /* lgr %w1,%r15 (backchain) */ EMIT4(0xb9040000, REG_W1, REG_15); - /* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */ - EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED); - /* aghi %r15,-STK_OFF */ - EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth)); - /* stg %w1,152(%r15) (backchain) */ + /* la %bfp,unused_end(%r15) (BPF frame pointer) */ + EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, + offsetofend(struct prog_frame, unused)); + /* aghi %r15,-frame_off */ + EMIT4_IMM(0xa70b0000, REG_15, -jit->frame_off); + /* stg %w1,backchain(%r15) */ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, - REG_15, 152); + REG_15, + offsetof(struct prog_frame, backchain)); } } @@ -677,13 +691,13 @@ static void call_r1(struct bpf_jit *jit) /* * Function epilogue */ -static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth) +static void bpf_jit_epilogue(struct bpf_jit *jit) { jit->exit_ip = jit->prg; /* Load exit code: lgr %r2,%b0 */ EMIT4(0xb9040000, REG_2, BPF_REG_0); /* Restore registers */ - save_restore_regs(jit, REGS_RESTORE, stack_depth, 0); + save_restore_regs(jit, REGS_RESTORE, 0); EMIT_JUMP_REG(14); jit->prg = ALIGN(jit->prg, 8); @@ -865,7 +879,7 @@ static int sign_extend(struct bpf_jit *jit, int r, u8 size, u8 flags) * stack space for the large switch statement. */ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, - int i, bool extra_pass, u32 stack_depth) + int i, bool extra_pass) { struct bpf_insn *insn = &fp->insnsi[i]; s32 branch_oc_off = insn->off; @@ -1786,9 +1800,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, * Note 2: We assume that the verifier does not let us call the * main program, which clears the tail call counter on entry. */ - /* mvc STK_OFF_TCCNT(4,%r15),N(%r15) */ - _EMIT6(0xd203f000 | STK_OFF_TCCNT, - 0xf000 | (STK_OFF_TCCNT + STK_OFF + stack_depth)); + /* mvc tail_call_cnt(4,%r15),frame_off+tail_call_cnt(%r15) */ + _EMIT6(0xd203f000 | offsetof(struct prog_frame, tail_call_cnt), + 0xf000 | (jit->frame_off + + offsetof(struct prog_frame, tail_call_cnt))); /* Sign-extend the kfunc arguments. */ if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { @@ -1839,10 +1854,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, * goto out; */ - if (jit->seen & SEEN_STACK) - off = STK_OFF_TCCNT + STK_OFF + stack_depth; - else - off = STK_OFF_TCCNT; + off = jit->frame_off + + offsetof(struct prog_frame, tail_call_cnt); /* lhi %w0,1 */ EMIT4_IMM(0xa7080000, REG_W0, 1); /* laal %w1,%w0,off(%r15) */ @@ -1872,7 +1885,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, /* * Restore registers before calling function */ - save_restore_regs(jit, REGS_RESTORE, stack_depth, 0); + save_restore_regs(jit, REGS_RESTORE, 0); /* * goto *(prog->bpf_func + tail_call_start); @@ -2165,7 +2178,7 @@ static int bpf_set_addr(struct bpf_jit *jit, int i) * Compile eBPF program into s390x code */ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp, - bool extra_pass, u32 stack_depth) + bool extra_pass) { int i, insn_count, lit32_size, lit64_size; u64 kern_arena; @@ -2174,24 +2187,30 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp, jit->lit64 = jit->lit64_start; jit->prg = 0; jit->excnt = 0; + if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) + jit->frame_off = sizeof(struct prog_frame) - + offsetofend(struct prog_frame, unused) + + round_up(fp->aux->stack_depth, 8); + else + jit->frame_off = 0; kern_arena = bpf_arena_get_kern_vm_start(fp->aux->arena); if (kern_arena) jit->kern_arena = _EMIT_CONST_U64(kern_arena); jit->user_arena = bpf_arena_get_user_vm_start(fp->aux->arena); - bpf_jit_prologue(jit, fp, stack_depth); + bpf_jit_prologue(jit, fp); if (bpf_set_addr(jit, 0) < 0) return -1; for (i = 0; i < fp->len; i += insn_count) { - insn_count = bpf_jit_insn(jit, fp, i, extra_pass, stack_depth); + insn_count = bpf_jit_insn(jit, fp, i, extra_pass); if (insn_count < 0) return -1; /* Next instruction address */ if (bpf_set_addr(jit, i + insn_count) < 0) return -1; } - bpf_jit_epilogue(jit, stack_depth); + bpf_jit_epilogue(jit); lit32_size = jit->lit32 - jit->lit32_start; lit64_size = jit->lit64 - jit->lit64_start; @@ -2267,7 +2286,6 @@ static struct bpf_binary_header *bpf_jit_alloc(struct bpf_jit *jit, */ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) { - u32 stack_depth = round_up(fp->aux->stack_depth, 8); struct bpf_prog *tmp, *orig_fp = fp; struct bpf_binary_header *header; struct s390_jit_data *jit_data; @@ -2320,7 +2338,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) * - 3: Calculate program size and addrs array */ for (pass = 1; pass <= 3; pass++) { - if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) { + if (bpf_jit_prog(&jit, fp, extra_pass)) { fp = orig_fp; goto free_addrs; } @@ -2334,7 +2352,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) goto free_addrs; } skip_init_ctx: - if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) { + if (bpf_jit_prog(&jit, fp, extra_pass)) { bpf_jit_binary_free(header); fp = orig_fp; goto free_addrs; @@ -2654,9 +2672,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, /* stg %r1,backchain_off(%r15) */ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_1, REG_0, REG_15, tjit->backchain_off); - /* mvc tccnt_off(4,%r15),stack_size+STK_OFF_TCCNT(%r15) */ + /* mvc tccnt_off(4,%r15),stack_size+tail_call_cnt(%r15) */ _EMIT6(0xd203f000 | tjit->tccnt_off, - 0xf000 | (tjit->stack_size + STK_OFF_TCCNT)); + 0xf000 | (tjit->stack_size + + offsetof(struct prog_frame, tail_call_cnt))); /* stmg %r2,%rN,fwd_reg_args_off(%r15) */ if (nr_reg_args) EMIT6_DISP_LH(0xeb000000, 0x0024, REG_2, @@ -2793,8 +2812,9 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, (nr_stack_args * sizeof(u64) - 1) << 16 | tjit->stack_args_off, 0xf000 | tjit->orig_stack_args_off); - /* mvc STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */ - _EMIT6(0xd203f000 | STK_OFF_TCCNT, 0xf000 | tjit->tccnt_off); + /* mvc tail_call_cnt(4,%r15),tccnt_off(%r15) */ + _EMIT6(0xd203f000 | offsetof(struct prog_frame, tail_call_cnt), + 0xf000 | tjit->tccnt_off); /* lgr %r1,%r8 */ EMIT4(0xb9040000, REG_1, REG_8); /* %r1() */ @@ -2851,8 +2871,9 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, if (flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET)) EMIT6_DISP_LH(0xe3000000, 0x0004, REG_2, REG_0, REG_15, tjit->retval_off); - /* mvc stack_size+STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */ - _EMIT6(0xd203f000 | (tjit->stack_size + STK_OFF_TCCNT), + /* mvc stack_size+tail_call_cnt(4,%r15),tccnt_off(%r15) */ + _EMIT6(0xd203f000 | (tjit->stack_size + + offsetof(struct prog_frame, tail_call_cnt)), 0xf000 | tjit->tccnt_off); /* aghi %r15,stack_size */ EMIT4_IMM(0xa70b0000, REG_15, tjit->stack_size); diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 15672cb926fc..7e3fca164620 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -3501,13 +3501,6 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf); } -static const char *bpf_get_prog_name(struct bpf_prog *prog) -{ - if (prog->aux->ksym.prog) - return prog->aux->ksym.name; - return prog->aux->name; -} - static void priv_stack_init_guard(void __percpu *priv_stack_ptr, int alloc_size) { int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3; @@ -3531,7 +3524,7 @@ static void priv_stack_check_guard(void __percpu *priv_stack_ptr, int alloc_size if (stack_ptr[0] != PRIV_STACK_GUARD_VAL || stack_ptr[underflow_idx] != PRIV_STACK_GUARD_VAL) { pr_err("BPF private stack overflow/underflow detected for prog %sx\n", - bpf_get_prog_name(prog)); + bpf_jit_get_prog_name(prog)); break; } } @@ -3845,7 +3838,6 @@ void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp } return; #endif - WARN(1, "verification of programs using bpf_throw should have failed\n"); } void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke, |
