summaryrefslogtreecommitdiff
path: root/arch/arm64/net/bpf_jit_comp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/net/bpf_jit_comp.c')
-rw-r--r--arch/arm64/net/bpf_jit_comp.c129
1 files changed, 94 insertions, 35 deletions
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 52ffe115a8c4..ab83089c3d8f 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -185,7 +185,7 @@ static inline void emit_bti(u32 insn, struct jit_ctx *ctx)
static inline void emit_kcfi(u32 hash, struct jit_ctx *ctx)
{
- if (IS_ENABLED(CONFIG_CFI_CLANG))
+ if (IS_ENABLED(CONFIG_CFI))
emit_u32_data(hash, ctx);
}
@@ -1066,19 +1066,53 @@ static void build_epilogue(struct jit_ctx *ctx, bool was_classic)
emit(A64_RET(A64_LR), ctx);
}
-#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
+/*
+ * Metadata encoding for exception handling in JITed code.
+ *
+ * Format of `fixup` field in `struct exception_table_entry`:
+ *
+ * Bit layout of `fixup` (32-bit):
+ *
+ * +-----------+--------+-----------+-----------+----------+
+ * | 31-27 | 26-22 | 21 | 20-16 | 15-0 |
+ * | | | | | |
+ * | FIXUP_REG | Unused | ARENA_ACC | ARENA_REG | OFFSET |
+ * +-----------+--------+-----------+-----------+----------+
+ *
+ * - OFFSET (16 bits): Offset used to compute address for Load/Store instruction.
+ * - ARENA_REG (5 bits): Register that is used to calculate the address for load/store when
+ * accessing the arena region.
+ * - ARENA_ACCESS (1 bit): This bit is set when the faulting instruction accessed the arena region.
+ * - FIXUP_REG (5 bits): Destination register for the load instruction (cleared on fault) or set to
+ * DONT_CLEAR if it is a store instruction.
+ */
+
+#define BPF_FIXUP_OFFSET_MASK GENMASK(15, 0)
+#define BPF_FIXUP_ARENA_REG_MASK GENMASK(20, 16)
+#define BPF_ARENA_ACCESS BIT(21)
#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
#define DONT_CLEAR 5 /* Unused ARM64 register from BPF's POV */
bool ex_handler_bpf(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
- off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
+ s16 off = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
+ int arena_reg = FIELD_GET(BPF_FIXUP_ARENA_REG_MASK, ex->fixup);
+ bool is_arena = !!(ex->fixup & BPF_ARENA_ACCESS);
+ bool is_write = (dst_reg == DONT_CLEAR);
+ unsigned long addr;
+
+ if (is_arena) {
+ addr = regs->regs[arena_reg] + off;
+ bpf_prog_report_arena_violation(is_write, addr, regs->pc);
+ }
if (dst_reg != DONT_CLEAR)
regs->regs[dst_reg] = 0;
- regs->pc = (unsigned long)&ex->fixup - offset;
+ /* Skip the faulting instruction */
+ regs->pc += AARCH64_INSN_SIZE;
+
return true;
}
@@ -1088,7 +1122,9 @@ static int add_exception_handler(const struct bpf_insn *insn,
int dst_reg)
{
off_t ins_offset;
- off_t fixup_offset;
+ s16 off = insn->off;
+ bool is_arena;
+ int arena_reg;
unsigned long pc;
struct exception_table_entry *ex;
@@ -1097,11 +1133,16 @@ static int add_exception_handler(const struct bpf_insn *insn,
return 0;
if (BPF_MODE(insn->code) != BPF_PROBE_MEM &&
- BPF_MODE(insn->code) != BPF_PROBE_MEMSX &&
- BPF_MODE(insn->code) != BPF_PROBE_MEM32 &&
- BPF_MODE(insn->code) != BPF_PROBE_ATOMIC)
+ BPF_MODE(insn->code) != BPF_PROBE_MEMSX &&
+ BPF_MODE(insn->code) != BPF_PROBE_MEM32 &&
+ BPF_MODE(insn->code) != BPF_PROBE_MEM32SX &&
+ BPF_MODE(insn->code) != BPF_PROBE_ATOMIC)
return 0;
+ is_arena = (BPF_MODE(insn->code) == BPF_PROBE_MEM32) ||
+ (BPF_MODE(insn->code) == BPF_PROBE_MEM32SX) ||
+ (BPF_MODE(insn->code) == BPF_PROBE_ATOMIC);
+
if (!ctx->prog->aux->extable ||
WARN_ON_ONCE(ctx->exentry_idx >= ctx->prog->aux->num_exentries))
return -EINVAL;
@@ -1120,22 +1161,6 @@ static int add_exception_handler(const struct bpf_insn *insn,
return -ERANGE;
/*
- * Since the extable follows the program, the fixup offset is always
- * negative and limited to BPF_JIT_REGION_SIZE. Store a positive value
- * to keep things simple, and put the destination register in the upper
- * bits. We don't need to worry about buildtime or runtime sort
- * modifying the upper bits because the table is already sorted, and
- * isn't part of the main exception table.
- *
- * The fixup_offset is set to the next instruction from the instruction
- * that may fault. The execution will jump to this after handling the
- * fault.
- */
- fixup_offset = (long)&ex->fixup - (pc + AARCH64_INSN_SIZE);
- if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, fixup_offset))
- return -ERANGE;
-
- /*
* The offsets above have been calculated using the RO buffer but we
* need to use the R/W buffer for writes.
* switch ex to rw buffer for writing.
@@ -1147,8 +1172,26 @@ static int add_exception_handler(const struct bpf_insn *insn,
if (BPF_CLASS(insn->code) != BPF_LDX)
dst_reg = DONT_CLEAR;
- ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, fixup_offset) |
- FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
+ ex->fixup = FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
+
+ if (is_arena) {
+ ex->fixup |= BPF_ARENA_ACCESS;
+ /*
+ * insn->src_reg/dst_reg holds the address in the arena region with upper 32-bits
+ * being zero because of a preceding addr_space_cast(r<n>, 0x0, 0x1) instruction.
+ * This address is adjusted with the addition of arena_vm_start (see the
+ * implementation of BPF_PROBE_MEM32 and BPF_PROBE_ATOMIC) before being used for the
+ * memory access. Pass the reg holding the unmodified 32-bit address to
+ * ex_handler_bpf.
+ */
+ if (BPF_CLASS(insn->code) == BPF_LDX)
+ arena_reg = bpf2a64[insn->src_reg];
+ else
+ arena_reg = bpf2a64[insn->dst_reg];
+
+ ex->fixup |= FIELD_PREP(BPF_FIXUP_OFFSET_MASK, off) |
+ FIELD_PREP(BPF_FIXUP_ARENA_REG_MASK, arena_reg);
+ }
ex->type = EX_TYPE_BPF;
@@ -1558,7 +1601,13 @@ emit_cond_jmp:
if (ret < 0)
return ret;
emit_call(func_addr, ctx);
- emit(A64_MOV(1, r0, A64_R(0)), ctx);
+ /*
+ * Call to arch_bpf_timed_may_goto() is emitted by the
+ * verifier and called with custom calling convention with
+ * first argument and return value in BPF_REG_AX (x9).
+ */
+ if (func_addr != (u64)arch_bpf_timed_may_goto)
+ emit(A64_MOV(1, r0, A64_R(0)), ctx);
break;
}
/* tail call */
@@ -1612,7 +1661,11 @@ emit_cond_jmp:
case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
- if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
+ case BPF_LDX | BPF_PROBE_MEM32SX | BPF_B:
+ case BPF_LDX | BPF_PROBE_MEM32SX | BPF_H:
+ case BPF_LDX | BPF_PROBE_MEM32SX | BPF_W:
+ if (BPF_MODE(insn->code) == BPF_PROBE_MEM32 ||
+ BPF_MODE(insn->code) == BPF_PROBE_MEM32SX) {
emit(A64_ADD(1, tmp2, src, arena_vm_base), ctx);
src = tmp2;
}
@@ -1624,7 +1677,8 @@ emit_cond_jmp:
off_adj = off;
}
sign_extend = (BPF_MODE(insn->code) == BPF_MEMSX ||
- BPF_MODE(insn->code) == BPF_PROBE_MEMSX);
+ BPF_MODE(insn->code) == BPF_PROBE_MEMSX ||
+ BPF_MODE(insn->code) == BPF_PROBE_MEM32SX);
switch (BPF_SIZE(code)) {
case BPF_W:
if (is_lsi_offset(off_adj, 2)) {
@@ -1832,9 +1886,11 @@ emit_cond_jmp:
if (ret)
return ret;
- ret = add_exception_handler(insn, ctx, dst);
- if (ret)
- return ret;
+ if (BPF_MODE(insn->code) == BPF_PROBE_ATOMIC) {
+ ret = add_exception_handler(insn, ctx, dst);
+ if (ret)
+ return ret;
+ }
break;
default:
@@ -2767,7 +2823,6 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image,
goto out;
}
- bpf_flush_icache(ro_image, ro_image + size);
out:
kvfree(image);
return ret;
@@ -3038,6 +3093,11 @@ bool bpf_jit_bypass_spec_v4(void)
return true;
}
+bool bpf_jit_supports_timed_may_goto(void)
+{
+ return true;
+}
+
bool bpf_jit_inlines_helper_call(s32 imm)
{
switch (imm) {
@@ -3064,8 +3124,7 @@ void bpf_jit_free(struct bpf_prog *prog)
* before freeing it.
*/
if (jit_data) {
- bpf_arch_text_copy(&jit_data->ro_header->size, &jit_data->header->size,
- sizeof(jit_data->header->size));
+ bpf_jit_binary_pack_finalize(jit_data->ro_header, jit_data->header);
kfree(jit_data);
}
prog->bpf_func -= cfi_get_offset();