diff options
Diffstat (limited to 'arch')
| -rw-r--r-- | arch/arm64/net/bpf_jit_comp.c | 25 | ||||
| -rw-r--r-- | arch/loongarch/net/bpf_jit.c | 9 | ||||
| -rw-r--r-- | arch/m68k/coldfire/m5272.c | 15 | ||||
| -rw-r--r-- | arch/mips/bcm47xx/setup.c | 7 | ||||
| -rw-r--r-- | arch/powerpc/net/bpf_jit_comp.c | 10 | ||||
| -rw-r--r-- | arch/riscv/net/bpf_jit_comp64.c | 11 | ||||
| -rw-r--r-- | arch/s390/net/bpf_jit_comp.c | 7 | ||||
| -rw-r--r-- | arch/x86/Kconfig | 1 | ||||
| -rw-r--r-- | arch/x86/kernel/ftrace.c | 7 | ||||
| -rw-r--r-- | arch/x86/kernel/ftrace_64.S | 12 | ||||
| -rw-r--r-- | arch/x86/net/bpf_jit_comp.c | 97 |
11 files changed, 126 insertions, 75 deletions
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index afd05b41ea9e..74dd29816f36 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -1452,6 +1452,10 @@ emit_bswap_uxt: emit(A64_ASR(is64, dst, dst, imm), ctx); break; + /* JUMP reg */ + case BPF_JMP | BPF_JA | BPF_X: + emit(A64_BR(dst), ctx); + break; /* JUMP off */ case BPF_JMP | BPF_JA: case BPF_JMP32 | BPF_JA: @@ -2231,6 +2235,13 @@ skip_init_ctx: for (i = 0; i <= prog->len; i++) ctx.offset[i] *= AARCH64_INSN_SIZE; bpf_prog_fill_jited_linfo(prog, ctx.offset + 1); + /* + * The bpf_prog_update_insn_ptrs function expects offsets to + * point to the first byte of the jitted instruction (unlike + * the bpf_prog_fill_jited_linfo above, which, for historical + * reasons, expects to point to the next instruction) + */ + bpf_prog_update_insn_ptrs(prog, ctx.offset, ctx.ro_image); out_off: if (!ro_header && priv_stack_ptr) { free_percpu(priv_stack_ptr); @@ -2923,8 +2934,9 @@ static int gen_branch_or_nop(enum aarch64_insn_branch_type type, void *ip, * The dummy_tramp is used to prevent another CPU from jumping to unknown * locations during the patching process, making the patching process easier. */ -int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, - void *old_addr, void *new_addr) +int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t, + enum bpf_text_poke_type new_t, void *old_addr, + void *new_addr) { int ret; u32 old_insn; @@ -2968,14 +2980,13 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, !poking_bpf_entry)) return -EINVAL; - if (poke_type == BPF_MOD_CALL) - branch_type = AARCH64_INSN_BRANCH_LINK; - else - branch_type = AARCH64_INSN_BRANCH_NOLINK; - + branch_type = old_t == BPF_MOD_CALL ? AARCH64_INSN_BRANCH_LINK : + AARCH64_INSN_BRANCH_NOLINK; if (gen_branch_or_nop(branch_type, ip, old_addr, plt, &old_insn) < 0) return -EFAULT; + branch_type = new_t == BPF_MOD_CALL ? AARCH64_INSN_BRANCH_LINK : + AARCH64_INSN_BRANCH_NOLINK; if (gen_branch_or_nop(branch_type, ip, new_addr, plt, &new_insn) < 0) return -EFAULT; diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index f97dc9936401..8dc58781b8eb 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -1284,11 +1284,12 @@ void *bpf_arch_text_copy(void *dst, void *src, size_t len) return ret ? ERR_PTR(-EINVAL) : dst; } -int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, - void *old_addr, void *new_addr) +int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t, + enum bpf_text_poke_type new_t, void *old_addr, + void *new_addr) { int ret; - bool is_call = (poke_type == BPF_MOD_CALL); + bool is_call; u32 old_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP}; u32 new_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP}; @@ -1298,6 +1299,7 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, if (!is_bpf_text_address((unsigned long)ip)) return -ENOTSUPP; + is_call = old_t == BPF_MOD_CALL; ret = emit_jump_or_nops(old_addr, ip, old_insns, is_call); if (ret) return ret; @@ -1305,6 +1307,7 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, if (memcmp(ip, old_insns, LOONGARCH_LONG_JUMP_NBYTES)) return -EFAULT; + is_call = new_t == BPF_MOD_CALL; ret = emit_jump_or_nops(new_addr, ip, new_insns, is_call); if (ret) return ret; diff --git a/arch/m68k/coldfire/m5272.c b/arch/m68k/coldfire/m5272.c index 918e2a3236c5..28b3ffa25ba0 100644 --- a/arch/m68k/coldfire/m5272.c +++ b/arch/m68k/coldfire/m5272.c @@ -16,7 +16,6 @@ #include <linux/init.h> #include <linux/io.h> #include <linux/phy.h> -#include <linux/phy_fixed.h> #include <asm/machdep.h> #include <asm/coldfire.h> #include <asm/mcfsim.h> @@ -103,23 +102,9 @@ void __init config_BSP(char *commandp, int size) /***************************************************************************/ -/* - * Some 5272 based boards have the FEC ethernet directly connected to - * an ethernet switch. In this case we need to use the fixed phy type, - * and we need to declare it early in boot. - */ -static const struct fixed_phy_status nettel_fixed_phy_status __initconst = { - .link = 1, - .speed = 100, - .duplex = 0, -}; - -/***************************************************************************/ - static int __init init_BSP(void) { m5272_uarts_init(); - fixed_phy_add(&nettel_fixed_phy_status); clkdev_add_table(m5272_clk_lookup, ARRAY_SIZE(m5272_clk_lookup)); return 0; } diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index a93a4266dc1e..38ed61b4bd96 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c @@ -256,12 +256,6 @@ static int __init bcm47xx_cpu_fixes(void) } arch_initcall(bcm47xx_cpu_fixes); -static const struct fixed_phy_status bcm47xx_fixed_phy_status __initconst = { - .link = 1, - .speed = SPEED_100, - .duplex = DUPLEX_FULL, -}; - static int __init bcm47xx_register_bus_complete(void) { switch (bcm47xx_bus_type) { @@ -282,7 +276,6 @@ static int __init bcm47xx_register_bus_complete(void) bcm47xx_leds_register(); bcm47xx_workarounds(); - fixed_phy_add(&bcm47xx_fixed_phy_status); return 0; } device_initcall(bcm47xx_register_bus_complete); diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 88ad5ba7b87f..5e976730b2f5 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -1107,8 +1107,9 @@ static void do_isync(void *info __maybe_unused) * execute isync (or some CSI) so that they don't go back into the * trampoline again. */ -int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, - void *old_addr, void *new_addr) +int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t, + enum bpf_text_poke_type new_t, void *old_addr, + void *new_addr) { unsigned long bpf_func, bpf_func_end, size, offset; ppc_inst_t old_inst, new_inst; @@ -1119,7 +1120,6 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, return -EOPNOTSUPP; bpf_func = (unsigned long)ip; - branch_flags = poke_type == BPF_MOD_CALL ? BRANCH_SET_LINK : 0; /* We currently only support poking bpf programs */ if (!__bpf_address_lookup(bpf_func, &size, &offset, name)) { @@ -1132,7 +1132,7 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, * an unconditional branch instruction at im->ip_after_call */ if (offset) { - if (poke_type != BPF_MOD_JUMP) { + if (old_t == BPF_MOD_CALL || new_t == BPF_MOD_CALL) { pr_err("%s (0x%lx): calls are not supported in bpf prog body\n", __func__, bpf_func); return -EOPNOTSUPP; @@ -1166,6 +1166,7 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, } old_inst = ppc_inst(PPC_RAW_NOP()); + branch_flags = old_t == BPF_MOD_CALL ? BRANCH_SET_LINK : 0; if (old_addr) { if (is_offset_in_branch_range(ip - old_addr)) create_branch(&old_inst, ip, (unsigned long)old_addr, branch_flags); @@ -1174,6 +1175,7 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, branch_flags); } new_inst = ppc_inst(PPC_RAW_NOP()); + branch_flags = new_t == BPF_MOD_CALL ? BRANCH_SET_LINK : 0; if (new_addr) { if (is_offset_in_branch_range(ip - new_addr)) create_branch(&new_inst, ip, (unsigned long)new_addr, branch_flags); diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 45cbc7c6fe49..5f9457e910e8 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -852,17 +852,19 @@ static int gen_jump_or_nops(void *target, void *ip, u32 *insns, bool is_call) return emit_jump_and_link(is_call ? RV_REG_T0 : RV_REG_ZERO, rvoff, false, &ctx); } -int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, - void *old_addr, void *new_addr) +int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t, + enum bpf_text_poke_type new_t, void *old_addr, + void *new_addr) { u32 old_insns[RV_FENTRY_NINSNS], new_insns[RV_FENTRY_NINSNS]; - bool is_call = poke_type == BPF_MOD_CALL; + bool is_call; int ret; if (!is_kernel_text((unsigned long)ip) && !is_bpf_text_address((unsigned long)ip)) return -ENOTSUPP; + is_call = old_t == BPF_MOD_CALL; ret = gen_jump_or_nops(old_addr, ip, old_insns, is_call); if (ret) return ret; @@ -870,6 +872,7 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, if (memcmp(ip, old_insns, RV_FENTRY_NBYTES)) return -EFAULT; + is_call = new_t == BPF_MOD_CALL; ret = gen_jump_or_nops(new_addr, ip, new_insns, is_call); if (ret) return ret; @@ -1131,7 +1134,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, store_args(nr_arg_slots, args_off, ctx); /* skip to actual body of traced function */ - if (flags & BPF_TRAMP_F_SKIP_FRAME) + if (flags & BPF_TRAMP_F_ORIG_STACK) orig_call += RV_FENTRY_NINSNS * 4; if (flags & BPF_TRAMP_F_CALL_ORIG) { diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 3238c178bed8..579461d471bb 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -2412,8 +2412,9 @@ bool bpf_jit_supports_far_kfunc_call(void) return true; } -int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, - void *old_addr, void *new_addr) +int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t, + enum bpf_text_poke_type new_t, void *old_addr, + void *new_addr) { struct bpf_plt expected_plt, current_plt, new_plt, *plt; struct { @@ -2430,7 +2431,7 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, if (insn.opc != (0xc004 | (old_addr ? 0xf0 : 0))) return -EINVAL; - if (t == BPF_MOD_JUMP && + if ((new_t == BPF_MOD_JUMP || old_t == BPF_MOD_JUMP) && insn.disp == ((char *)new_addr - (char *)ip) >> 1) { /* * The branch already points to the destination, diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 34fb46d5341b..17a107cc5244 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -230,6 +230,7 @@ config X86 select HAVE_DYNAMIC_FTRACE_WITH_ARGS if X86_64 select HAVE_FTRACE_REGS_HAVING_PT_REGS if X86_64 select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS + select HAVE_DYNAMIC_FTRACE_WITH_JMP if X86_64 select HAVE_SAMPLE_FTRACE_DIRECT if X86_64 select HAVE_SAMPLE_FTRACE_DIRECT_MULTI if X86_64 select HAVE_EBPF_JIT diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 4450acec9390..0543b57f54ee 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -74,7 +74,12 @@ static const char *ftrace_call_replace(unsigned long ip, unsigned long addr) * No need to translate into a callthunk. The trampoline does * the depth accounting itself. */ - return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr); + if (ftrace_is_jmp(addr)) { + addr = ftrace_jmp_get(addr); + return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr); + } else { + return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr); + } } static int ftrace_verify_code(unsigned long ip, const char *old_code) diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index 823dbdd0eb41..a132608265f6 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -285,8 +285,18 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL) ANNOTATE_NOENDBR RET +1: + testb $1, %al + jz 2f + andq $0xfffffffffffffffe, %rax + movq %rax, MCOUNT_REG_SIZE+8(%rsp) + restore_mcount_regs + /* Restore flags */ + popfq + RET + /* Swap the flags with orig_rax */ -1: movq MCOUNT_REG_SIZE(%rsp), %rdi +2: movq MCOUNT_REG_SIZE(%rsp), %rdi movq %rdi, MCOUNT_REG_SIZE-8(%rsp) movq %rax, MCOUNT_REG_SIZE(%rsp) diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index de5083cb1d37..b69dc7194e2c 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -597,7 +597,8 @@ static int emit_jump(u8 **pprog, void *func, void *ip) return emit_patch(pprog, func, ip, 0xE9); } -static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, +static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t, + enum bpf_text_poke_type new_t, void *old_addr, void *new_addr) { const u8 *nop_insn = x86_nops[5]; @@ -607,9 +608,9 @@ static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, int ret; memcpy(old_insn, nop_insn, X86_PATCH_SIZE); - if (old_addr) { + if (old_t != BPF_MOD_NOP && old_addr) { prog = old_insn; - ret = t == BPF_MOD_CALL ? + ret = old_t == BPF_MOD_CALL ? emit_call(&prog, old_addr, ip) : emit_jump(&prog, old_addr, ip); if (ret) @@ -617,9 +618,9 @@ static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, } memcpy(new_insn, nop_insn, X86_PATCH_SIZE); - if (new_addr) { + if (new_t != BPF_MOD_NOP && new_addr) { prog = new_insn; - ret = t == BPF_MOD_CALL ? + ret = new_t == BPF_MOD_CALL ? emit_call(&prog, new_addr, ip) : emit_jump(&prog, new_addr, ip); if (ret) @@ -640,8 +641,9 @@ out: return ret; } -int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, - void *old_addr, void *new_addr) +int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t, + enum bpf_text_poke_type new_t, void *old_addr, + void *new_addr) { if (!is_kernel_text((long)ip) && !is_bpf_text_address((long)ip)) @@ -655,29 +657,43 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, if (is_endbr(ip)) ip += ENDBR_INSN_SIZE; - return __bpf_arch_text_poke(ip, t, old_addr, new_addr); + return __bpf_arch_text_poke(ip, old_t, new_t, old_addr, new_addr); } #define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8) -static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip) +static void __emit_indirect_jump(u8 **pprog, int reg, bool ereg) { u8 *prog = *pprog; + if (ereg) + EMIT1(0x41); + + EMIT2(0xFF, 0xE0 + reg); + + *pprog = prog; +} + +static void emit_indirect_jump(u8 **pprog, int bpf_reg, u8 *ip) +{ + u8 *prog = *pprog; + int reg = reg2hex[bpf_reg]; + bool ereg = is_ereg(bpf_reg); + if (cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) { OPTIMIZER_HIDE_VAR(reg); - emit_jump(&prog, its_static_thunk(reg), ip); + emit_jump(&prog, its_static_thunk(reg + 8*ereg), ip); } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { EMIT_LFENCE(); - EMIT2(0xFF, 0xE0 + reg); + __emit_indirect_jump(&prog, reg, ereg); } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) { OPTIMIZER_HIDE_VAR(reg); if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH)) - emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg], ip); + emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg + 8*ereg], ip); else - emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip); + emit_jump(&prog, &__x86_indirect_thunk_array[reg + 8*ereg], ip); } else { - EMIT2(0xFF, 0xE0 + reg); /* jmp *%\reg */ + __emit_indirect_jump(&prog, reg, ereg); if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || IS_ENABLED(CONFIG_MITIGATION_SLS)) EMIT1(0xCC); /* int3 */ } @@ -797,7 +813,7 @@ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog, * rdi == ctx (1st arg) * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET */ - emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start)); + emit_indirect_jump(&prog, BPF_REG_4 /* R4 -> rcx */, ip + (prog - start)); /* out: */ ctx->tail_call_indirect_label = prog - start; @@ -883,12 +899,13 @@ static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) target = array->ptrs[poke->tail_call.key]; if (target) { ret = __bpf_arch_text_poke(poke->tailcall_target, - BPF_MOD_JUMP, NULL, + BPF_MOD_NOP, BPF_MOD_JUMP, + NULL, (u8 *)target->bpf_func + poke->adj_off); BUG_ON(ret < 0); ret = __bpf_arch_text_poke(poke->tailcall_bypass, - BPF_MOD_JUMP, + BPF_MOD_JUMP, BPF_MOD_NOP, (u8 *)poke->tailcall_target + X86_PATCH_SIZE, NULL); BUG_ON(ret < 0); @@ -2614,6 +2631,9 @@ emit_cond_jmp: /* Convert BPF opcode to x86 */ break; + case BPF_JMP | BPF_JA | BPF_X: + emit_indirect_jump(&prog, insn->dst_reg, image + addrs[i - 1]); + break; case BPF_JMP | BPF_JA: case BPF_JMP32 | BPF_JA: if (BPF_CLASS(insn->code) == BPF_JMP) { @@ -2830,9 +2850,10 @@ static int get_nr_used_regs(const struct btf_func_model *m) } static void save_args(const struct btf_func_model *m, u8 **prog, - int stack_size, bool for_call_origin) + int stack_size, bool for_call_origin, u32 flags) { int arg_regs, first_off = 0, nr_regs = 0, nr_stack_slots = 0; + bool use_jmp = bpf_trampoline_use_jmp(flags); int i, j; /* Store function arguments to stack. @@ -2873,7 +2894,7 @@ static void save_args(const struct btf_func_model *m, u8 **prog, */ for (j = 0; j < arg_regs; j++) { emit_ldx(prog, BPF_DW, BPF_REG_0, BPF_REG_FP, - nr_stack_slots * 8 + 0x18); + nr_stack_slots * 8 + 16 + (!use_jmp) * 8); emit_stx(prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -stack_size); @@ -3267,12 +3288,17 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im * should be 16-byte aligned. Following code depend on * that stack_size is already 8-byte aligned. */ - stack_size += (stack_size % 16) ? 0 : 8; + if (bpf_trampoline_use_jmp(flags)) { + /* no rip in the "jmp" case */ + stack_size += (stack_size % 16) ? 8 : 0; + } else { + stack_size += (stack_size % 16) ? 0 : 8; + } } arg_stack_off = stack_size; - if (flags & BPF_TRAMP_F_SKIP_FRAME) { + if (flags & BPF_TRAMP_F_CALL_ORIG) { /* skip patched call instruction and point orig_call to actual * body of the kernel function. */ @@ -3327,7 +3353,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off); } - save_args(m, &prog, regs_off, false); + save_args(m, &prog, regs_off, false, flags); if (flags & BPF_TRAMP_F_CALL_ORIG) { /* arg1: mov rdi, im */ @@ -3360,7 +3386,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im if (flags & BPF_TRAMP_F_CALL_ORIG) { restore_regs(m, &prog, regs_off); - save_args(m, &prog, arg_stack_off, true); + save_args(m, &prog, arg_stack_off, true, flags); if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) { /* Before calling the original function, load the @@ -3543,7 +3569,7 @@ static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, if (err) return err; - emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf)); + emit_indirect_jump(&prog, BPF_REG_3 /* R3 -> rdx */, image + (prog - buf)); *pprog = prog; return 0; @@ -3827,6 +3853,15 @@ out_image: jit_data->header = header; jit_data->rw_header = rw_header; } + + /* + * The bpf_prog_update_insn_ptrs function expects addrs to + * point to the first byte of the jitted instruction (unlike + * the bpf_prog_fill_jited_linfo below, which, for historical + * reasons, expects to point to the next instruction) + */ + bpf_prog_update_insn_ptrs(prog, addrs, image); + /* * ctx.prog_offset is used when CFI preambles put code *before* * the function. See emit_cfi(). For FineIBT specifically this code @@ -3953,6 +3988,7 @@ void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke, struct bpf_prog *new, struct bpf_prog *old) { u8 *old_addr, *new_addr, *old_bypass_addr; + enum bpf_text_poke_type t; int ret; old_bypass_addr = old ? NULL : poke->bypass_addr; @@ -3965,21 +4001,22 @@ void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke, * the kallsyms check. */ if (new) { + t = old_addr ? BPF_MOD_JUMP : BPF_MOD_NOP; ret = __bpf_arch_text_poke(poke->tailcall_target, - BPF_MOD_JUMP, + t, BPF_MOD_JUMP, old_addr, new_addr); BUG_ON(ret < 0); if (!old) { ret = __bpf_arch_text_poke(poke->tailcall_bypass, - BPF_MOD_JUMP, + BPF_MOD_JUMP, BPF_MOD_NOP, poke->bypass_addr, NULL); BUG_ON(ret < 0); } } else { + t = old_bypass_addr ? BPF_MOD_JUMP : BPF_MOD_NOP; ret = __bpf_arch_text_poke(poke->tailcall_bypass, - BPF_MOD_JUMP, - old_bypass_addr, + t, BPF_MOD_JUMP, old_bypass_addr, poke->bypass_addr); BUG_ON(ret < 0); /* let other CPUs finish the execution of program @@ -3988,9 +4025,9 @@ void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke, */ if (!ret) synchronize_rcu(); + t = old_addr ? BPF_MOD_JUMP : BPF_MOD_NOP; ret = __bpf_arch_text_poke(poke->tailcall_target, - BPF_MOD_JUMP, - old_addr, NULL); + t, BPF_MOD_NOP, old_addr, NULL); BUG_ON(ret < 0); } } |
