summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2026-01-20 20:39:01 -0800
committerAlexei Starovoitov <ast@kernel.org>2026-01-20 20:39:01 -0800
commitba335bf3a5b8c5f47d56e9be3f96e0989dad5346 (patch)
treeaa25ec6488ff79e94a1359e771374d0bdcc90e85 /kernel
parent83c9030cdc45e0518d71065c25201a24eafc9818 (diff)
parent4fca95095cdcd81bd4a8c8c7008fb3c175a3a5d5 (diff)
Merge branch 'bpf-x86-inline-bpf_get_current_task-for-x86_64'
Menglong Dong says: ==================== bpf, x86: inline bpf_get_current_task() for x86_64 Inline bpf_get_current_task() and bpf_get_current_task_btf() for x86_64 to obtain better performance, and add the testcase for it. Changes since v5: * remove unnecessary 'ifdef' and __description in the selftests * v5: https://lore.kernel.org/bpf/20260119070246.249499-1-dongml2@chinatelecom.cn/ Changes since v4: * don't support the !CONFIG_SMP case * v4: https://lore.kernel.org/bpf/20260112104529.224645-1-dongml2@chinatelecom.cn/ Changes since v3: * handle the !CONFIG_SMP case * ignore the !CONFIG_SMP case in the testcase, as we enable CONFIG_SMP for x86_64 in the selftests Changes since v2: * implement it in the verifier with BPF_MOV64_PERCPU_REG() instead of in x86_64 JIT (Alexei). Changes since v1: * add the testcase * remove the usage of const_current_task ==================== Link: https://patch.msgid.link/20260120070555.233486-1-dongml2@chinatelecom.cn Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/verifier.c22
1 files changed, 22 insertions, 0 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index bca0ca82d164..9905ad40f4d3 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -18130,6 +18130,10 @@ static bool verifier_inlines_helper_call(struct bpf_verifier_env *env, s32 imm)
switch (imm) {
#ifdef CONFIG_X86_64
case BPF_FUNC_get_smp_processor_id:
+#ifdef CONFIG_SMP
+ case BPF_FUNC_get_current_task_btf:
+ case BPF_FUNC_get_current_task:
+#endif
return env->prog->jit_requested && bpf_jit_supports_percpu_insn();
#endif
default:
@@ -23715,6 +23719,24 @@ patch_map_ops_generic:
insn = new_prog->insnsi + i + delta;
goto next_insn;
}
+
+ /* Implement bpf_get_current_task() and bpf_get_current_task_btf() inline. */
+ if ((insn->imm == BPF_FUNC_get_current_task || insn->imm == BPF_FUNC_get_current_task_btf) &&
+ verifier_inlines_helper_call(env, insn->imm)) {
+ insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, (u32)(unsigned long)&current_task);
+ insn_buf[1] = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0);
+ insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0);
+ cnt = 3;
+
+ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+ if (!new_prog)
+ return -ENOMEM;
+
+ delta += cnt - 1;
+ env->prog = prog = new_prog;
+ insn = new_prog->insnsi + i + delta;
+ goto next_insn;
+ }
#endif
/* Implement bpf_get_func_arg inline. */
if (prog_type == BPF_PROG_TYPE_TRACING &&