From f614f2c755b6125c646d680d1c990b3b262bd0a9 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Sat, 2 Oct 2021 06:47:52 +0530 Subject: tools: Allow specifying base BTF file in resolve_btfids This commit allows specifying the base BTF for resolving btf id lists/sets during link time in the resolve_btfids tool. The base BTF is set to NULL if no path is passed. This allows resolving BTF ids for module kernel objects. Also, drop the --no-fail option, as it is only used in case .BTF_ids section is not present, instead make no-fail the default mode. The long option name is same as that of pahole. Signed-off-by: Kumar Kartikeya Dwivedi Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211002011757.311265-5-memxor@gmail.com --- tools/testing/selftests/bpf/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index aa94739a1835..5a94d0900d1b 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -454,7 +454,7 @@ $(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \ | $(TRUNNER_BINARY)-extras $$(call msg,BINARY,,$$@) $(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@ - $(Q)$(RESOLVE_BTFIDS) --no-fail --btf $(TRUNNER_OUTPUT)/btf_data.o $$@ + $(Q)$(RESOLVE_BTFIDS) --btf $(TRUNNER_OUTPUT)/btf_data.o $$@ endef -- cgit v1.2.3 From c48e51c8b07aba8a18125221cb67a40cb1256bf2 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Sat, 2 Oct 2021 06:47:57 +0530 Subject: bpf: selftests: Add selftests for module kfunc support This adds selftests that tests the success and failure path for modules kfuncs (in presence of invalid kfunc calls) for both libbpf and gen_loader. It also adds a prog_test kfunc_btf_id_list so that we can add module BTF ID set from bpf_testmod. This also introduces a couple of test cases to verifier selftests for validating whether we get an error or not depending on if invalid kfunc call remains after elimination of unreachable instructions. Signed-off-by: Kumar Kartikeya Dwivedi Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211002011757.311265-10-memxor@gmail.com --- include/linux/btf.h | 1 + kernel/bpf/btf.c | 1 + net/bpf/test_run.c | 5 ++- tools/testing/selftests/bpf/Makefile | 7 ++-- .../selftests/bpf/bpf_testmod/bpf_testmod.c | 23 ++++++++++- .../selftests/bpf/prog_tests/ksyms_module.c | 29 +++++++------- .../selftests/bpf/prog_tests/ksyms_module_libbpf.c | 28 +++++++++++++ .../selftests/bpf/progs/test_ksyms_module.c | 46 ++++++++++++++++------ tools/testing/selftests/bpf/verifier/calls.c | 23 +++++++++++ 9 files changed, 132 insertions(+), 31 deletions(-) create mode 100644 tools/testing/selftests/bpf/prog_tests/ksyms_module_libbpf.c (limited to 'tools/testing') diff --git a/include/linux/btf.h b/include/linux/btf.h index 1d56cd2bb362..203eef993d76 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -275,5 +275,6 @@ static inline bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, THIS_MODULE } extern struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list; +extern struct kfunc_btf_id_list prog_test_kfunc_list; #endif diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 1460dff3c154..2ebffb9f57eb 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -6397,3 +6397,4 @@ bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id, EXPORT_SYMBOL_GPL(name) DEFINE_KFUNC_BTF_ID_LIST(bpf_tcp_ca_kfunc_list); +DEFINE_KFUNC_BTF_ID_LIST(prog_test_kfunc_list); diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index b1f6f5237de6..529608784aa8 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -2,6 +2,7 @@ /* Copyright (c) 2017 Facebook */ #include +#include #include #include #include @@ -243,7 +244,9 @@ BTF_SET_END(test_sk_kfunc_ids) bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, struct module *owner) { - return btf_id_set_contains(&test_sk_kfunc_ids, kfunc_id); + if (btf_id_set_contains(&test_sk_kfunc_ids, kfunc_id)) + return true; + return bpf_check_mod_kfunc_call(&prog_test_kfunc_list, kfunc_id, owner); } static void *bpf_test_init(const union bpf_attr *kattr, u32 size, diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 5a94d0900d1b..c5c9a9f50d8d 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -315,8 +315,9 @@ LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \ linked_vars.skel.h linked_maps.skel.h LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \ - test_ksyms_module.c test_ringbuf.c atomics.c trace_printk.c \ - trace_vprintk.c + test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c +# Generate both light skeleton and libbpf skeleton for these +LSKELS_EXTRA := test_ksyms_module.c SKEL_BLACKLIST += $$(LSKELS) test_static_linked.skel.h-deps := test_static_linked1.o test_static_linked2.o @@ -346,7 +347,7 @@ TRUNNER_BPF_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, $$(TRUNNER_BPF_SRCS) TRUNNER_BPF_SKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.skel.h, \ $$(filter-out $(SKEL_BLACKLIST) $(LINKED_BPF_SRCS),\ $$(TRUNNER_BPF_SRCS))) -TRUNNER_BPF_LSKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.lskel.h, $$(LSKELS)) +TRUNNER_BPF_LSKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.lskel.h, $$(LSKELS) $$(LSKELS_EXTRA)) TRUNNER_BPF_SKELS_LINKED := $$(addprefix $$(TRUNNER_OUTPUT)/,$(LINKED_SKELS)) TEST_GEN_FILES += $$(TRUNNER_BPF_OBJS) diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 50fc5561110a..b892948dc134 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ +#include +#include #include #include #include @@ -13,6 +15,12 @@ DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123; +noinline void +bpf_testmod_test_mod_kfunc(int i) +{ + *(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i; +} + noinline int bpf_testmod_loop_test(int n) { int i, sum = 0; @@ -71,13 +79,26 @@ static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = { .write = bpf_testmod_test_write, }; +BTF_SET_START(bpf_testmod_kfunc_ids) +BTF_ID(func, bpf_testmod_test_mod_kfunc) +BTF_SET_END(bpf_testmod_kfunc_ids) + +static DEFINE_KFUNC_BTF_ID_SET(&bpf_testmod_kfunc_ids, bpf_testmod_kfunc_btf_set); + static int bpf_testmod_init(void) { - return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); + int ret; + + ret = sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); + if (ret) + return ret; + register_kfunc_btf_id_set(&prog_test_kfunc_list, &bpf_testmod_kfunc_btf_set); + return 0; } static void bpf_testmod_exit(void) { + unregister_kfunc_btf_id_set(&prog_test_kfunc_list, &bpf_testmod_kfunc_btf_set); return sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); } diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_module.c b/tools/testing/selftests/bpf/prog_tests/ksyms_module.c index 2cd5cded543f..831447878d7b 100644 --- a/tools/testing/selftests/bpf/prog_tests/ksyms_module.c +++ b/tools/testing/selftests/bpf/prog_tests/ksyms_module.c @@ -2,30 +2,29 @@ /* Copyright (c) 2021 Facebook */ #include -#include -#include +#include #include "test_ksyms_module.lskel.h" -static int duration; - void test_ksyms_module(void) { - struct test_ksyms_module* skel; + struct test_ksyms_module *skel; + int retval; int err; - skel = test_ksyms_module__open_and_load(); - if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + if (!env.has_testmod) { + test__skip(); return; + } - err = test_ksyms_module__attach(skel); - if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + skel = test_ksyms_module__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_ksyms_module__open_and_load")) + return; + err = bpf_prog_test_run(skel->progs.load.prog_fd, 1, &pkt_v4, sizeof(pkt_v4), + NULL, NULL, (__u32 *)&retval, NULL); + if (!ASSERT_OK(err, "bpf_prog_test_run")) goto cleanup; - - usleep(1); - - ASSERT_EQ(skel->bss->triggered, true, "triggered"); - ASSERT_EQ(skel->bss->out_mod_ksym_global, 123, "global_ksym_val"); - + ASSERT_EQ(retval, 0, "retval"); + ASSERT_EQ(skel->bss->out_bpf_testmod_ksym, 42, "bpf_testmod_ksym"); cleanup: test_ksyms_module__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_module_libbpf.c b/tools/testing/selftests/bpf/prog_tests/ksyms_module_libbpf.c new file mode 100644 index 000000000000..e6343ef63af9 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/ksyms_module_libbpf.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include "test_ksyms_module.skel.h" + +void test_ksyms_module_libbpf(void) +{ + struct test_ksyms_module *skel; + int retval, err; + + if (!env.has_testmod) { + test__skip(); + return; + } + + skel = test_ksyms_module__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_ksyms_module__open")) + return; + err = bpf_prog_test_run(bpf_program__fd(skel->progs.load), 1, &pkt_v4, + sizeof(pkt_v4), NULL, NULL, (__u32 *)&retval, NULL); + if (!ASSERT_OK(err, "bpf_prog_test_run")) + goto cleanup; + ASSERT_EQ(retval, 0, "retval"); + ASSERT_EQ(skel->bss->out_bpf_testmod_ksym, 42, "bpf_testmod_ksym"); +cleanup: + test_ksyms_module__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/test_ksyms_module.c b/tools/testing/selftests/bpf/progs/test_ksyms_module.c index d6a0b3086b90..0650d918c096 100644 --- a/tools/testing/selftests/bpf/progs/test_ksyms_module.c +++ b/tools/testing/selftests/bpf/progs/test_ksyms_module.c @@ -2,24 +2,48 @@ /* Copyright (c) 2021 Facebook */ #include "vmlinux.h" - #include +#define X_0(x) +#define X_1(x) x X_0(x) +#define X_2(x) x X_1(x) +#define X_3(x) x X_2(x) +#define X_4(x) x X_3(x) +#define X_5(x) x X_4(x) +#define X_6(x) x X_5(x) +#define X_7(x) x X_6(x) +#define X_8(x) x X_7(x) +#define X_9(x) x X_8(x) +#define X_10(x) x X_9(x) +#define REPEAT_256(Y) X_2(X_10(X_10(Y))) X_5(X_10(Y)) X_6(Y) + extern const int bpf_testmod_ksym_percpu __ksym; +extern void bpf_testmod_test_mod_kfunc(int i) __ksym; +extern void bpf_testmod_invalid_mod_kfunc(void) __ksym __weak; -int out_mod_ksym_global = 0; -bool triggered = false; +int out_bpf_testmod_ksym = 0; +const volatile int x = 0; -SEC("raw_tp/sys_enter") -int handler(const void *ctx) +SEC("tc") +int load(struct __sk_buff *skb) { - int *val; - __u32 cpu; - - val = (int *)bpf_this_cpu_ptr(&bpf_testmod_ksym_percpu); - out_mod_ksym_global = *val; - triggered = true; + /* This will be kept by clang, but removed by verifier. Since it is + * marked as __weak, libbpf and gen_loader don't error out if BTF ID + * is not found for it, instead imm and off is set to 0 for it. + */ + if (x) + bpf_testmod_invalid_mod_kfunc(); + bpf_testmod_test_mod_kfunc(42); + out_bpf_testmod_ksym = *(int *)bpf_this_cpu_ptr(&bpf_testmod_ksym_percpu); + return 0; +} +SEC("tc") +int load_256(struct __sk_buff *skb) +{ + /* this will fail if kfunc doesn't reuse its own btf fd index */ + REPEAT_256(bpf_testmod_test_mod_kfunc(42);); + bpf_testmod_test_mod_kfunc(42); return 0; } diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c index 336a749673d1..d7b74eb28333 100644 --- a/tools/testing/selftests/bpf/verifier/calls.c +++ b/tools/testing/selftests/bpf/verifier/calls.c @@ -1,3 +1,26 @@ +{ + "calls: invalid kfunc call not eliminated", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .result = REJECT, + .errstr = "invalid kernel function call not eliminated in verifier pass", +}, +{ + "calls: invalid kfunc call unreachable", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 0, 2), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .result = ACCEPT, +}, { "calls: basic sanity", .insns = { -- cgit v1.2.3 From 57a610f1c58fa493315e1c24eef6d992cdf4c4a9 Mon Sep 17 00:00:00 2001 From: Jie Meng Date: Fri, 1 Oct 2021 20:56:26 -0700 Subject: bpf, x64: Save bytes for DIV by reducing reg copies Instead of unconditionally performing push/pop on %rax/%rdx in case of division/modulo, we can save a few bytes in case of destination register being either BPF r0 (%rax) or r3 (%rdx) since the result is written in there anyway. Also, we do not need to copy the source to %r11 unless the source is either %rax, %rdx or an immediate. For example, before the patch: 22: push %rax 23: push %rdx 24: mov %rsi,%r11 27: xor %edx,%edx 29: div %r11 2c: mov %rax,%r11 2f: pop %rdx 30: pop %rax 31: mov %r11,%rax After: 22: push %rdx 23: xor %edx,%edx 25: div %rsi 28: pop %rdx Signed-off-by: Jie Meng Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Tested-by: Daniel Borkmann Acked-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20211002035626.2041910-1-jmeng@fb.com --- arch/x86/net/bpf_jit_comp.c | 71 ++++++++++++++++++------------ tools/testing/selftests/bpf/verifier/jit.c | 47 ++++++++++++++++++++ 2 files changed, 89 insertions(+), 29 deletions(-) (limited to 'tools/testing') diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 576ef1a6954a..5a0edea3cc2e 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1028,19 +1028,30 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, case BPF_ALU64 | BPF_MOD | BPF_X: case BPF_ALU64 | BPF_DIV | BPF_X: case BPF_ALU64 | BPF_MOD | BPF_K: - case BPF_ALU64 | BPF_DIV | BPF_K: - EMIT1(0x50); /* push rax */ - EMIT1(0x52); /* push rdx */ - - if (BPF_SRC(insn->code) == BPF_X) - /* mov r11, src_reg */ - EMIT_mov(AUX_REG, src_reg); - else + case BPF_ALU64 | BPF_DIV | BPF_K: { + bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; + + if (dst_reg != BPF_REG_0) + EMIT1(0x50); /* push rax */ + if (dst_reg != BPF_REG_3) + EMIT1(0x52); /* push rdx */ + + if (BPF_SRC(insn->code) == BPF_X) { + if (src_reg == BPF_REG_0 || + src_reg == BPF_REG_3) { + /* mov r11, src_reg */ + EMIT_mov(AUX_REG, src_reg); + src_reg = AUX_REG; + } + } else { /* mov r11, imm32 */ EMIT3_off32(0x49, 0xC7, 0xC3, imm32); + src_reg = AUX_REG; + } - /* mov rax, dst_reg */ - EMIT_mov(BPF_REG_0, dst_reg); + if (dst_reg != BPF_REG_0) + /* mov rax, dst_reg */ + emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg); /* * xor edx, edx @@ -1048,26 +1059,28 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, */ EMIT2(0x31, 0xd2); - if (BPF_CLASS(insn->code) == BPF_ALU64) - /* div r11 */ - EMIT3(0x49, 0xF7, 0xF3); - else - /* div r11d */ - EMIT3(0x41, 0xF7, 0xF3); - - if (BPF_OP(insn->code) == BPF_MOD) - /* mov r11, rdx */ - EMIT3(0x49, 0x89, 0xD3); - else - /* mov r11, rax */ - EMIT3(0x49, 0x89, 0xC3); - - EMIT1(0x5A); /* pop rdx */ - EMIT1(0x58); /* pop rax */ - - /* mov dst_reg, r11 */ - EMIT_mov(dst_reg, AUX_REG); + if (is64) + EMIT1(add_1mod(0x48, src_reg)); + else if (is_ereg(src_reg)) + EMIT1(add_1mod(0x40, src_reg)); + /* div src_reg */ + EMIT2(0xF7, add_1reg(0xF0, src_reg)); + + if (BPF_OP(insn->code) == BPF_MOD && + dst_reg != BPF_REG_3) + /* mov dst_reg, rdx */ + emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3); + else if (BPF_OP(insn->code) == BPF_DIV && + dst_reg != BPF_REG_0) + /* mov dst_reg, rax */ + emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0); + + if (dst_reg != BPF_REG_3) + EMIT1(0x5A); /* pop rdx */ + if (dst_reg != BPF_REG_0) + EMIT1(0x58); /* pop rax */ break; + } case BPF_ALU | BPF_MUL | BPF_K: case BPF_ALU64 | BPF_MUL | BPF_K: diff --git a/tools/testing/selftests/bpf/verifier/jit.c b/tools/testing/selftests/bpf/verifier/jit.c index eedcb752bf70..79021c30e51e 100644 --- a/tools/testing/selftests/bpf/verifier/jit.c +++ b/tools/testing/selftests/bpf/verifier/jit.c @@ -102,6 +102,53 @@ .result = ACCEPT, .retval = 2, }, +{ + "jit: various div tests", + .insns = { + BPF_LD_IMM64(BPF_REG_2, 0xefeffeULL), + BPF_LD_IMM64(BPF_REG_0, 0xeeff0d413122ULL), + BPF_LD_IMM64(BPF_REG_1, 0xfefeeeULL), + BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1), + BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_LD_IMM64(BPF_REG_3, 0xeeff0d413122ULL), + BPF_ALU64_IMM(BPF_DIV, BPF_REG_3, 0xfefeeeULL), + BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_LD_IMM64(BPF_REG_2, 0xaa93ULL), + BPF_ALU64_IMM(BPF_MOD, BPF_REG_1, 0xbeefULL), + BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_LD_IMM64(BPF_REG_1, 0xfefeeeULL), + BPF_LD_IMM64(BPF_REG_3, 0xbeefULL), + BPF_ALU64_REG(BPF_MOD, BPF_REG_1, BPF_REG_3), + BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_LD_IMM64(BPF_REG_2, 0x5ee1dULL), + BPF_LD_IMM64(BPF_REG_1, 0xfefeeeULL), + BPF_LD_IMM64(BPF_REG_3, 0x2bULL), + BPF_ALU32_REG(BPF_DIV, BPF_REG_1, BPF_REG_3), + BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_ALU32_REG(BPF_DIV, BPF_REG_1, BPF_REG_1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, +}, { "jit: jsgt, jslt", .insns = { -- cgit v1.2.3 From c65eb8082d4cb02ef87bdecedce8969d5ccbea54 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 5 Oct 2021 22:11:06 -0700 Subject: selftests/bpf: Refactor btf_write selftest to reuse BTF generation logic Next patch will need to reuse BTF generation logic, which tests every supported BTF kind, for testing btf__add_btf() APIs. So restructure existing selftests and make it as a single subtest that uses bulk VALIDATE_RAW_BTF() macro for raw BTF dump checking. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20211006051107.17921-3-andrii@kernel.org --- tools/testing/selftests/bpf/prog_tests/btf_write.c | 55 +++++++++++++++++++--- 1 file changed, 49 insertions(+), 6 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/btf_write.c b/tools/testing/selftests/bpf/prog_tests/btf_write.c index 76548eecce2c..aa4505618252 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_write.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_write.c @@ -4,19 +4,15 @@ #include #include "btf_helpers.h" -void test_btf_write() { +static void gen_btf(struct btf *btf) +{ const struct btf_var_secinfo *vi; const struct btf_type *t; const struct btf_member *m; const struct btf_enum *v; const struct btf_param *p; - struct btf *btf; int id, err, str_off; - btf = btf__new_empty(); - if (!ASSERT_OK_PTR(btf, "new_empty")) - return; - str_off = btf__find_str(btf, "int"); ASSERT_EQ(str_off, -ENOENT, "int_str_missing_off"); @@ -301,6 +297,53 @@ void test_btf_write() { ASSERT_EQ(btf_tag(t)->component_idx, 1, "tag_component_idx"); ASSERT_STREQ(btf_type_raw_dump(btf, 19), "[19] TAG 'tag2' type_id=14 component_idx=1", "raw_dump"); +} + +static void test_btf_add() +{ + struct btf *btf; + + btf = btf__new_empty(); + if (!ASSERT_OK_PTR(btf, "new_empty")) + return; + + gen_btf(btf); + + VALIDATE_RAW_BTF( + btf, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1", + "[3] CONST '(anon)' type_id=5", + "[4] VOLATILE '(anon)' type_id=3", + "[5] RESTRICT '(anon)' type_id=4", + "[6] ARRAY '(anon)' type_id=2 index_type_id=1 nr_elems=10", + "[7] STRUCT 's1' size=8 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=1 bits_offset=32 bitfield_size=16", + "[8] UNION 'u1' size=8 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0 bitfield_size=16", + "[9] ENUM 'e1' size=4 vlen=2\n" + "\t'v1' val=1\n" + "\t'v2' val=2", + "[10] FWD 'struct_fwd' fwd_kind=struct", + "[11] FWD 'union_fwd' fwd_kind=union", + "[12] ENUM 'enum_fwd' size=4 vlen=0", + "[13] TYPEDEF 'typedef1' type_id=1", + "[14] FUNC 'func1' type_id=15 linkage=global", + "[15] FUNC_PROTO '(anon)' ret_type_id=1 vlen=2\n" + "\t'p1' type_id=1\n" + "\t'p2' type_id=2", + "[16] VAR 'var1' type_id=1, linkage=global-alloc", + "[17] DATASEC 'datasec1' size=12 vlen=1\n" + "\ttype_id=1 offset=4 size=8", + "[18] TAG 'tag1' type_id=16 component_idx=-1", + "[19] TAG 'tag2' type_id=14 component_idx=1"); btf__free(btf); } + +void test_btf_write() +{ + if (test__start_subtest("btf_add")) + test_btf_add(); +} -- cgit v1.2.3 From 9d05787223913171fce20a737ba54e3b6e7da13c Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 5 Oct 2021 22:11:07 -0700 Subject: selftests/bpf: Test new btf__add_btf() API Add a test that validates that btf__add_btf() API is correctly copying all the types from the source BTF into destination BTF object and adjusts type IDs and string offsets properly. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20211006051107.17921-4-andrii@kernel.org --- tools/testing/selftests/bpf/prog_tests/btf_write.c | 86 ++++++++++++++++++++++ 1 file changed, 86 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/btf_write.c b/tools/testing/selftests/bpf/prog_tests/btf_write.c index aa4505618252..886e0fc1efb1 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_write.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_write.c @@ -342,8 +342,94 @@ static void test_btf_add() btf__free(btf); } +static void test_btf_add_btf() +{ + struct btf *btf1 = NULL, *btf2 = NULL; + int id; + + btf1 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf1, "btf1")) + return; + + btf2 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf2, "btf2")) + goto cleanup; + + gen_btf(btf1); + gen_btf(btf2); + + id = btf__add_btf(btf1, btf2); + if (!ASSERT_EQ(id, 20, "id")) + goto cleanup; + + VALIDATE_RAW_BTF( + btf1, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1", + "[3] CONST '(anon)' type_id=5", + "[4] VOLATILE '(anon)' type_id=3", + "[5] RESTRICT '(anon)' type_id=4", + "[6] ARRAY '(anon)' type_id=2 index_type_id=1 nr_elems=10", + "[7] STRUCT 's1' size=8 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=1 bits_offset=32 bitfield_size=16", + "[8] UNION 'u1' size=8 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0 bitfield_size=16", + "[9] ENUM 'e1' size=4 vlen=2\n" + "\t'v1' val=1\n" + "\t'v2' val=2", + "[10] FWD 'struct_fwd' fwd_kind=struct", + "[11] FWD 'union_fwd' fwd_kind=union", + "[12] ENUM 'enum_fwd' size=4 vlen=0", + "[13] TYPEDEF 'typedef1' type_id=1", + "[14] FUNC 'func1' type_id=15 linkage=global", + "[15] FUNC_PROTO '(anon)' ret_type_id=1 vlen=2\n" + "\t'p1' type_id=1\n" + "\t'p2' type_id=2", + "[16] VAR 'var1' type_id=1, linkage=global-alloc", + "[17] DATASEC 'datasec1' size=12 vlen=1\n" + "\ttype_id=1 offset=4 size=8", + "[18] TAG 'tag1' type_id=16 component_idx=-1", + "[19] TAG 'tag2' type_id=14 component_idx=1", + + /* types appended from the second BTF */ + "[20] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[21] PTR '(anon)' type_id=20", + "[22] CONST '(anon)' type_id=24", + "[23] VOLATILE '(anon)' type_id=22", + "[24] RESTRICT '(anon)' type_id=23", + "[25] ARRAY '(anon)' type_id=21 index_type_id=20 nr_elems=10", + "[26] STRUCT 's1' size=8 vlen=2\n" + "\t'f1' type_id=20 bits_offset=0\n" + "\t'f2' type_id=20 bits_offset=32 bitfield_size=16", + "[27] UNION 'u1' size=8 vlen=1\n" + "\t'f1' type_id=20 bits_offset=0 bitfield_size=16", + "[28] ENUM 'e1' size=4 vlen=2\n" + "\t'v1' val=1\n" + "\t'v2' val=2", + "[29] FWD 'struct_fwd' fwd_kind=struct", + "[30] FWD 'union_fwd' fwd_kind=union", + "[31] ENUM 'enum_fwd' size=4 vlen=0", + "[32] TYPEDEF 'typedef1' type_id=20", + "[33] FUNC 'func1' type_id=34 linkage=global", + "[34] FUNC_PROTO '(anon)' ret_type_id=20 vlen=2\n" + "\t'p1' type_id=20\n" + "\t'p2' type_id=21", + "[35] VAR 'var1' type_id=20, linkage=global-alloc", + "[36] DATASEC 'datasec1' size=12 vlen=1\n" + "\ttype_id=20 offset=4 size=8", + "[37] TAG 'tag1' type_id=35 component_idx=-1", + "[38] TAG 'tag2' type_id=33 component_idx=1"); + +cleanup: + btf__free(btf1); + btf__free(btf2); +} + void test_btf_write() { if (test__start_subtest("btf_add")) test_btf_add(); + if (test__start_subtest("btf_add_btf")) + test_btf_add_btf(); } -- cgit v1.2.3 From 189c83bdde850e1fc8bb347f813cdd8776ce7abf Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sun, 3 Oct 2021 18:49:25 +0200 Subject: selftest/bpf: Switch recursion test to use htab_map_delete_elem Currently the recursion test is hooking __htab_map_lookup_elem function, which is invoked both from bpf_prog and bpf syscall. But in our kernel build, the __htab_map_lookup_elem gets inlined within the htab_map_lookup_elem, so it's not trigered and the test fails. Fixing this by using htab_map_delete_elem, which is not inlined for bpf_prog calls (like htab_map_lookup_elem is) and is used directly as pointer for map_delete_elem, so it won't disappear by inlining. Signed-off-by: Jiri Olsa Signed-off-by: Andrii Nakryiko Acked-by: Song Liu Link: https://lore.kernel.org/bpf/YVnfFTL/3T6jOwHI@krava --- tools/testing/selftests/bpf/prog_tests/recursion.c | 10 +++++----- tools/testing/selftests/bpf/progs/recursion.c | 9 +++------ 2 files changed, 8 insertions(+), 11 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/recursion.c b/tools/testing/selftests/bpf/prog_tests/recursion.c index 0e378d63fe18..f3af2627b599 100644 --- a/tools/testing/selftests/bpf/prog_tests/recursion.c +++ b/tools/testing/selftests/bpf/prog_tests/recursion.c @@ -20,18 +20,18 @@ void test_recursion(void) goto out; ASSERT_EQ(skel->bss->pass1, 0, "pass1 == 0"); - bpf_map_lookup_elem(bpf_map__fd(skel->maps.hash1), &key, 0); + bpf_map_delete_elem(bpf_map__fd(skel->maps.hash1), &key); ASSERT_EQ(skel->bss->pass1, 1, "pass1 == 1"); - bpf_map_lookup_elem(bpf_map__fd(skel->maps.hash1), &key, 0); + bpf_map_delete_elem(bpf_map__fd(skel->maps.hash1), &key); ASSERT_EQ(skel->bss->pass1, 2, "pass1 == 2"); ASSERT_EQ(skel->bss->pass2, 0, "pass2 == 0"); - bpf_map_lookup_elem(bpf_map__fd(skel->maps.hash2), &key, 0); + bpf_map_delete_elem(bpf_map__fd(skel->maps.hash2), &key); ASSERT_EQ(skel->bss->pass2, 1, "pass2 == 1"); - bpf_map_lookup_elem(bpf_map__fd(skel->maps.hash2), &key, 0); + bpf_map_delete_elem(bpf_map__fd(skel->maps.hash2), &key); ASSERT_EQ(skel->bss->pass2, 2, "pass2 == 2"); - err = bpf_obj_get_info_by_fd(bpf_program__fd(skel->progs.on_lookup), + err = bpf_obj_get_info_by_fd(bpf_program__fd(skel->progs.on_delete), &prog_info, &prog_info_len); if (!ASSERT_OK(err, "get_prog_info")) goto out; diff --git a/tools/testing/selftests/bpf/progs/recursion.c b/tools/testing/selftests/bpf/progs/recursion.c index 49f679375b9d..3c2423bb19e2 100644 --- a/tools/testing/selftests/bpf/progs/recursion.c +++ b/tools/testing/selftests/bpf/progs/recursion.c @@ -24,8 +24,8 @@ struct { int pass1 = 0; int pass2 = 0; -SEC("fentry/__htab_map_lookup_elem") -int BPF_PROG(on_lookup, struct bpf_map *map) +SEC("fentry/htab_map_delete_elem") +int BPF_PROG(on_delete, struct bpf_map *map) { int key = 0; @@ -35,10 +35,7 @@ int BPF_PROG(on_lookup, struct bpf_map *map) } if (map == (void *)&hash2) { pass2++; - /* htab_map_gen_lookup() will inline below call - * into direct call to __htab_map_lookup_elem() - */ - bpf_map_lookup_elem(&hash2, &key); + bpf_map_delete_elem(&hash2, &key); return 0; } -- cgit v1.2.3 From 6f2b219b62a4376ca2da15c503de79d0650c8155 Mon Sep 17 00:00:00 2001 From: Hengqi Chen Date: Mon, 4 Oct 2021 00:58:44 +0800 Subject: selftests/bpf: Switch to new bpf_object__next_{map,program} APIs Replace deprecated bpf_{map,program}__next APIs with newly added bpf_object__next_{map,program} APIs, so that no compilation warnings emit. Signed-off-by: Hengqi Chen Signed-off-by: Andrii Nakryiko Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20211003165844.4054931-3-hengqi.chen@gmail.com --- samples/bpf/xdp1_user.c | 2 +- samples/bpf/xdp_sample_pkts_user.c | 2 +- tools/bpf/bpftool/iter.c | 2 +- tools/bpf/bpftool/prog.c | 2 +- tools/testing/selftests/bpf/prog_tests/btf.c | 2 +- tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c | 6 +++--- tools/testing/selftests/bpf/prog_tests/select_reuseport.c | 2 +- tools/testing/selftests/bpf/prog_tests/tcp_rtt.c | 2 +- tools/testing/selftests/bpf/xdping.c | 2 +- 9 files changed, 11 insertions(+), 11 deletions(-) (limited to 'tools/testing') diff --git a/samples/bpf/xdp1_user.c b/samples/bpf/xdp1_user.c index 116e39f6b666..8675fa5273df 100644 --- a/samples/bpf/xdp1_user.c +++ b/samples/bpf/xdp1_user.c @@ -128,7 +128,7 @@ int main(int argc, char **argv) if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd)) return 1; - map = bpf_map__next(NULL, obj); + map = bpf_object__next_map(obj, NULL); if (!map) { printf("finding a map in obj file failed\n"); return 1; diff --git a/samples/bpf/xdp_sample_pkts_user.c b/samples/bpf/xdp_sample_pkts_user.c index 495e09897bd3..f4382ccdcbb1 100644 --- a/samples/bpf/xdp_sample_pkts_user.c +++ b/samples/bpf/xdp_sample_pkts_user.c @@ -154,7 +154,7 @@ int main(int argc, char **argv) return 1; } - map = bpf_map__next(NULL, obj); + map = bpf_object__next_map(obj, NULL); if (!map) { printf("finding a map in obj file failed\n"); return 1; diff --git a/tools/bpf/bpftool/iter.c b/tools/bpf/bpftool/iter.c index 84a9b01d956d..6c0de647b8ad 100644 --- a/tools/bpf/bpftool/iter.c +++ b/tools/bpf/bpftool/iter.c @@ -57,7 +57,7 @@ static int do_pin(int argc, char **argv) goto close_obj; } - prog = bpf_program__next(NULL, obj); + prog = bpf_object__next_program(obj, NULL); if (!prog) { p_err("can't find bpf program in objfile %s", objfile); goto close_obj; diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 9c3e343b7d87..a24ea7e26aa4 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -1601,7 +1601,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only) goto err_close_obj; if (first_prog_only) { - prog = bpf_program__next(NULL, obj); + prog = bpf_object__next_program(obj, NULL); if (!prog) { p_err("object file doesn't contain any bpf program"); goto err_close_obj; diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index 9c85d7d27409..acd33d0cd5d9 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -4511,7 +4511,7 @@ static void do_test_file(unsigned int test_num) if (CHECK(err, "obj: %d", err)) return; - prog = bpf_program__next(NULL, obj); + prog = bpf_object__next_program(obj, NULL); if (CHECK(!prog, "Cannot find bpf_prog")) { err = -1; goto done; diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c index c7c1816899bf..2839f4270a26 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c @@ -285,7 +285,7 @@ static void test_fmod_ret_freplace(void) if (!ASSERT_OK_PTR(freplace_obj, "freplace_obj_open")) goto out; - prog = bpf_program__next(NULL, freplace_obj); + prog = bpf_object__next_program(freplace_obj, NULL); err = bpf_program__set_attach_target(prog, pkt_fd, NULL); ASSERT_OK(err, "freplace__set_attach_target"); @@ -302,7 +302,7 @@ static void test_fmod_ret_freplace(void) goto out; attach_prog_fd = bpf_program__fd(prog); - prog = bpf_program__next(NULL, fmod_obj); + prog = bpf_object__next_program(fmod_obj, NULL); err = bpf_program__set_attach_target(prog, attach_prog_fd, NULL); ASSERT_OK(err, "fmod_ret_set_attach_target"); @@ -352,7 +352,7 @@ static void test_obj_load_failure_common(const char *obj_file, if (!ASSERT_OK_PTR(obj, "obj_open")) goto close_prog; - prog = bpf_program__next(NULL, obj); + prog = bpf_object__next_program(obj, NULL); err = bpf_program__set_attach_target(prog, pkt_fd, NULL); ASSERT_OK(err, "set_attach_target"); diff --git a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c index 4efd337d6a3c..d40e9156c48d 100644 --- a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c +++ b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c @@ -114,7 +114,7 @@ static int prepare_bpf_obj(void) err = bpf_object__load(obj); RET_ERR(err, "load bpf_object", "err:%d\n", err); - prog = bpf_program__next(NULL, obj); + prog = bpf_object__next_program(obj, NULL); RET_ERR(!prog, "get first bpf_program", "!prog\n"); select_by_skb_data_prog = bpf_program__fd(prog); RET_ERR(select_by_skb_data_prog < 0, "get prog fd", diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c index d207e968e6b1..265b4fe33ec3 100644 --- a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c +++ b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c @@ -109,7 +109,7 @@ static int run_test(int cgroup_fd, int server_fd) return -1; } - map = bpf_map__next(NULL, obj); + map = bpf_object__next_map(obj, NULL); map_fd = bpf_map__fd(map); err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_SOCK_OPS, 0); diff --git a/tools/testing/selftests/bpf/xdping.c b/tools/testing/selftests/bpf/xdping.c index 79a3453dab25..30f12637f4e4 100644 --- a/tools/testing/selftests/bpf/xdping.c +++ b/tools/testing/selftests/bpf/xdping.c @@ -187,7 +187,7 @@ int main(int argc, char **argv) return 1; } - map = bpf_map__next(NULL, obj); + map = bpf_object__next_map(obj, NULL); if (map) map_fd = bpf_map__fd(map); if (!map || map_fd < 0) { -- cgit v1.2.3 From aa67fdb4643616f04cb59b6d090010c371ab1a80 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Wed, 6 Oct 2021 22:02:31 -0700 Subject: selftests/bpf: Skip the second half of get_branch_snapshot in vm VMs running on upstream 5.12+ kernel support LBR. However, bpf_get_branch_snapshot couldn't stop the LBR before too many entries are flushed. Skip the hit/waste test for VMs before we find a proper fix for LBR in VM. Fixes: 025bd7c753aa ("selftests/bpf: Add test for bpf_get_branch_snapshot") Signed-off-by: Song Liu Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211007050231.728496-1-songliubraving@fb.com --- .../selftests/bpf/prog_tests/get_branch_snapshot.c | 34 ++++++++++++++++++++++ 1 file changed, 34 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c b/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c index 67e86f8d8677..e4f92feb7b32 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c +++ b/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c @@ -6,6 +6,30 @@ static int *pfd_array; static int cpu_cnt; +static bool is_hypervisor(void) +{ + char *line = NULL; + bool ret = false; + size_t len; + FILE *fp; + + fp = fopen("/proc/cpuinfo", "r"); + if (!fp) + return false; + + while (getline(&line, &len, fp) != -1) { + if (!strncmp(line, "flags", 5)) { + if (strstr(line, "hypervisor") != NULL) + ret = true; + break; + } + } + + free(line); + fclose(fp); + return ret; +} + static int create_perf_events(void) { struct perf_event_attr attr = {0}; @@ -83,6 +107,16 @@ void test_get_branch_snapshot(void) goto cleanup; } + if (is_hypervisor()) { + /* As of today, LBR in hypervisor cannot be stopped before + * too many entries are flushed. Skip the hit/waste test + * for now in hypervisor until we optimize the LBR in + * hypervisor. + */ + test__skip(); + goto cleanup; + } + ASSERT_GT(skel->bss->test1_hits, 6, "find_looptest_in_lbr"); /* Given we stop LBR in software, we will waste a few entries. -- cgit v1.2.3 From dd65acf72d0e073970459d5da80573a04304aaa9 Mon Sep 17 00:00:00 2001 From: Dave Marchevsky Date: Thu, 7 Oct 2021 16:12:34 -0700 Subject: selftests/bpf: Remove SEC("version") from test progs Since commit 6c4fc209fcf9d ("bpf: remove useless version check for prog load") these "version" sections, which result in bpf_attr.kern_version being set, have been unnecessary. Remove them so that it's obvious to folks using selftests as a guide that "modern" BPF progs don't need this section. Signed-off-by: Dave Marchevsky Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211007231234.2223081-1-davemarchevsky@fb.com --- tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c | 1 - tools/testing/selftests/bpf/progs/connect4_prog.c | 2 -- tools/testing/selftests/bpf/progs/connect6_prog.c | 2 -- tools/testing/selftests/bpf/progs/connect_force_port4.c | 1 - tools/testing/selftests/bpf/progs/connect_force_port6.c | 1 - tools/testing/selftests/bpf/progs/dev_cgroup.c | 1 - tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c | 1 - tools/testing/selftests/bpf/progs/map_ptr_kern.c | 1 - tools/testing/selftests/bpf/progs/netcnt_prog.c | 1 - tools/testing/selftests/bpf/progs/sendmsg4_prog.c | 2 -- tools/testing/selftests/bpf/progs/sendmsg6_prog.c | 2 -- tools/testing/selftests/bpf/progs/sockmap_parse_prog.c | 2 -- tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c | 2 -- tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c | 2 -- tools/testing/selftests/bpf/progs/sockopt_inherit.c | 1 - tools/testing/selftests/bpf/progs/tcp_rtt.c | 1 - tools/testing/selftests/bpf/progs/test_btf_haskv.c | 2 -- tools/testing/selftests/bpf/progs/test_btf_newkv.c | 2 -- tools/testing/selftests/bpf/progs/test_btf_nokv.c | 2 -- tools/testing/selftests/bpf/progs/test_l4lb.c | 2 -- tools/testing/selftests/bpf/progs/test_map_in_map.c | 1 - tools/testing/selftests/bpf/progs/test_pinning.c | 2 -- tools/testing/selftests/bpf/progs/test_pinning_invalid.c | 2 -- tools/testing/selftests/bpf/progs/test_pkt_access.c | 1 - tools/testing/selftests/bpf/progs/test_queue_stack_map.h | 2 -- tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c | 2 -- tools/testing/selftests/bpf/progs/test_sk_lookup.c | 1 - tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c | 2 -- tools/testing/selftests/bpf/progs/test_skb_ctx.c | 1 - tools/testing/selftests/bpf/progs/test_sockmap_kern.h | 1 - tools/testing/selftests/bpf/progs/test_sockmap_listen.c | 1 - tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c | 1 - tools/testing/selftests/bpf/progs/test_tcp_estats.c | 1 - tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c | 1 - tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c | 2 -- tools/testing/selftests/bpf/progs/test_tracepoint.c | 1 - tools/testing/selftests/bpf/progs/test_tunnel_kern.c | 2 -- tools/testing/selftests/bpf/progs/test_xdp.c | 2 -- tools/testing/selftests/bpf/progs/test_xdp_loop.c | 2 -- tools/testing/selftests/bpf/progs/test_xdp_redirect.c | 2 -- 40 files changed, 61 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c b/tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c index 3f757e30d7a0..88638315c582 100644 --- a/tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c +++ b/tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c @@ -14,7 +14,6 @@ #include #include -int _version SEC("version") = 1; char _license[] SEC("license") = "GPL"; __u16 g_serv_port = 0; diff --git a/tools/testing/selftests/bpf/progs/connect4_prog.c b/tools/testing/selftests/bpf/progs/connect4_prog.c index a943d394fd3a..b241932911db 100644 --- a/tools/testing/selftests/bpf/progs/connect4_prog.c +++ b/tools/testing/selftests/bpf/progs/connect4_prog.c @@ -31,8 +31,6 @@ #define IFNAMSIZ 16 #endif -int _version SEC("version") = 1; - __attribute__ ((noinline)) int do_bind(struct bpf_sock_addr *ctx) { diff --git a/tools/testing/selftests/bpf/progs/connect6_prog.c b/tools/testing/selftests/bpf/progs/connect6_prog.c index 506d0f81a375..40266d2c737c 100644 --- a/tools/testing/selftests/bpf/progs/connect6_prog.c +++ b/tools/testing/selftests/bpf/progs/connect6_prog.c @@ -24,8 +24,6 @@ #define DST_REWRITE_PORT6 6666 -int _version SEC("version") = 1; - SEC("cgroup/connect6") int connect_v6_prog(struct bpf_sock_addr *ctx) { diff --git a/tools/testing/selftests/bpf/progs/connect_force_port4.c b/tools/testing/selftests/bpf/progs/connect_force_port4.c index a979aaef2a76..27a632dd382e 100644 --- a/tools/testing/selftests/bpf/progs/connect_force_port4.c +++ b/tools/testing/selftests/bpf/progs/connect_force_port4.c @@ -13,7 +13,6 @@ #include char _license[] SEC("license") = "GPL"; -int _version SEC("version") = 1; struct svc_addr { __be32 addr; diff --git a/tools/testing/selftests/bpf/progs/connect_force_port6.c b/tools/testing/selftests/bpf/progs/connect_force_port6.c index afc8f1c5a9d6..19cad93e612f 100644 --- a/tools/testing/selftests/bpf/progs/connect_force_port6.c +++ b/tools/testing/selftests/bpf/progs/connect_force_port6.c @@ -12,7 +12,6 @@ #include char _license[] SEC("license") = "GPL"; -int _version SEC("version") = 1; struct svc_addr { __be32 addr[4]; diff --git a/tools/testing/selftests/bpf/progs/dev_cgroup.c b/tools/testing/selftests/bpf/progs/dev_cgroup.c index 8924e06bdef0..79b54a4fa244 100644 --- a/tools/testing/selftests/bpf/progs/dev_cgroup.c +++ b/tools/testing/selftests/bpf/progs/dev_cgroup.c @@ -57,4 +57,3 @@ int bpf_prog1(struct bpf_cgroup_dev_ctx *ctx) } char _license[] SEC("license") = "GPL"; -__u32 _version SEC("version") = LINUX_VERSION_CODE; diff --git a/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c b/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c index 6b42db2fe391..68587b1de34e 100644 --- a/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c +++ b/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c @@ -37,4 +37,3 @@ int trace(void *ctx) } char _license[] SEC("license") = "GPL"; -__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */ diff --git a/tools/testing/selftests/bpf/progs/map_ptr_kern.c b/tools/testing/selftests/bpf/progs/map_ptr_kern.c index d1d304c980f0..b1b711d9b214 100644 --- a/tools/testing/selftests/bpf/progs/map_ptr_kern.c +++ b/tools/testing/selftests/bpf/progs/map_ptr_kern.c @@ -683,5 +683,4 @@ int cg_skb(void *ctx) return 1; } -__u32 _version SEC("version") = 1; char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/netcnt_prog.c b/tools/testing/selftests/bpf/progs/netcnt_prog.c index 43649bce4c54..f718b2c212dc 100644 --- a/tools/testing/selftests/bpf/progs/netcnt_prog.c +++ b/tools/testing/selftests/bpf/progs/netcnt_prog.c @@ -68,4 +68,3 @@ int bpf_nextcnt(struct __sk_buff *skb) } char _license[] SEC("license") = "GPL"; -__u32 _version SEC("version") = LINUX_VERSION_CODE; diff --git a/tools/testing/selftests/bpf/progs/sendmsg4_prog.c b/tools/testing/selftests/bpf/progs/sendmsg4_prog.c index ac5abc34cde8..ea75a44cb7fc 100644 --- a/tools/testing/selftests/bpf/progs/sendmsg4_prog.c +++ b/tools/testing/selftests/bpf/progs/sendmsg4_prog.c @@ -18,8 +18,6 @@ #define DST_PORT 4040 #define DST_REWRITE_PORT4 4444 -int _version SEC("version") = 1; - SEC("cgroup/sendmsg4") int sendmsg_v4_prog(struct bpf_sock_addr *ctx) { diff --git a/tools/testing/selftests/bpf/progs/sendmsg6_prog.c b/tools/testing/selftests/bpf/progs/sendmsg6_prog.c index 24694b1a8d82..bf9b46b806f6 100644 --- a/tools/testing/selftests/bpf/progs/sendmsg6_prog.c +++ b/tools/testing/selftests/bpf/progs/sendmsg6_prog.c @@ -22,8 +22,6 @@ #define DST_REWRITE_PORT6 6666 -int _version SEC("version") = 1; - SEC("cgroup/sendmsg6") int sendmsg_v6_prog(struct bpf_sock_addr *ctx) { diff --git a/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c b/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c index ca283af80d4e..95d5b941bc1f 100644 --- a/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c +++ b/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c @@ -2,8 +2,6 @@ #include #include -int _version SEC("version") = 1; - SEC("sk_skb1") int bpf_prog1(struct __sk_buff *skb) { diff --git a/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c b/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c index eeaf6e75c9a2..80632954c5a1 100644 --- a/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c +++ b/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c @@ -3,8 +3,6 @@ #include #include -int _version SEC("version") = 1; - SEC("sk_msg1") int bpf_prog1(struct sk_msg_md *msg) { diff --git a/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c index 73872c535cbb..e2468a6d01a5 100644 --- a/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c +++ b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c @@ -2,8 +2,6 @@ #include #include -int _version SEC("version") = 1; - struct { __uint(type, BPF_MAP_TYPE_SOCKMAP); __uint(max_entries, 20); diff --git a/tools/testing/selftests/bpf/progs/sockopt_inherit.c b/tools/testing/selftests/bpf/progs/sockopt_inherit.c index c6d428a8d785..9fb241b97291 100644 --- a/tools/testing/selftests/bpf/progs/sockopt_inherit.c +++ b/tools/testing/selftests/bpf/progs/sockopt_inherit.c @@ -3,7 +3,6 @@ #include char _license[] SEC("license") = "GPL"; -__u32 _version SEC("version") = 1; #define SOL_CUSTOM 0xdeadbeef #define CUSTOM_INHERIT1 0 diff --git a/tools/testing/selftests/bpf/progs/tcp_rtt.c b/tools/testing/selftests/bpf/progs/tcp_rtt.c index 0cb3204ddb18..0988d79f1587 100644 --- a/tools/testing/selftests/bpf/progs/tcp_rtt.c +++ b/tools/testing/selftests/bpf/progs/tcp_rtt.c @@ -3,7 +3,6 @@ #include char _license[] SEC("license") = "GPL"; -__u32 _version SEC("version") = 1; struct tcp_rtt_storage { __u32 invoked; diff --git a/tools/testing/selftests/bpf/progs/test_btf_haskv.c b/tools/testing/selftests/bpf/progs/test_btf_haskv.c index 31538c9ed193..160ead6c67b2 100644 --- a/tools/testing/selftests/bpf/progs/test_btf_haskv.c +++ b/tools/testing/selftests/bpf/progs/test_btf_haskv.c @@ -4,8 +4,6 @@ #include #include "bpf_legacy.h" -int _version SEC("version") = 1; - struct ipv_counts { unsigned int v4; unsigned int v6; diff --git a/tools/testing/selftests/bpf/progs/test_btf_newkv.c b/tools/testing/selftests/bpf/progs/test_btf_newkv.c index 6c5560162746..1884a5bd10f5 100644 --- a/tools/testing/selftests/bpf/progs/test_btf_newkv.c +++ b/tools/testing/selftests/bpf/progs/test_btf_newkv.c @@ -4,8 +4,6 @@ #include #include "bpf_legacy.h" -int _version SEC("version") = 1; - struct ipv_counts { unsigned int v4; unsigned int v6; diff --git a/tools/testing/selftests/bpf/progs/test_btf_nokv.c b/tools/testing/selftests/bpf/progs/test_btf_nokv.c index 506da7fd2da2..15e0f9945fe4 100644 --- a/tools/testing/selftests/bpf/progs/test_btf_nokv.c +++ b/tools/testing/selftests/bpf/progs/test_btf_nokv.c @@ -3,8 +3,6 @@ #include #include -int _version SEC("version") = 1; - struct ipv_counts { unsigned int v4; unsigned int v6; diff --git a/tools/testing/selftests/bpf/progs/test_l4lb.c b/tools/testing/selftests/bpf/progs/test_l4lb.c index 33493911d87a..04fee08863cb 100644 --- a/tools/testing/selftests/bpf/progs/test_l4lb.c +++ b/tools/testing/selftests/bpf/progs/test_l4lb.c @@ -21,8 +21,6 @@ #include "test_iptunnel_common.h" #include -int _version SEC("version") = 1; - static inline __u32 rol32(__u32 word, unsigned int shift) { return (word << shift) | (word >> ((-shift) & 31)); diff --git a/tools/testing/selftests/bpf/progs/test_map_in_map.c b/tools/testing/selftests/bpf/progs/test_map_in_map.c index a6d91932dcd5..f416032ba858 100644 --- a/tools/testing/selftests/bpf/progs/test_map_in_map.c +++ b/tools/testing/selftests/bpf/progs/test_map_in_map.c @@ -47,5 +47,4 @@ int xdp_mimtest0(struct xdp_md *ctx) return XDP_PASS; } -int _version SEC("version") = 1; char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_pinning.c b/tools/testing/selftests/bpf/progs/test_pinning.c index 4ef2630292b2..0facea6cbbae 100644 --- a/tools/testing/selftests/bpf/progs/test_pinning.c +++ b/tools/testing/selftests/bpf/progs/test_pinning.c @@ -3,8 +3,6 @@ #include #include -int _version SEC("version") = 1; - struct { __uint(type, BPF_MAP_TYPE_ARRAY); __uint(max_entries, 1); diff --git a/tools/testing/selftests/bpf/progs/test_pinning_invalid.c b/tools/testing/selftests/bpf/progs/test_pinning_invalid.c index 5412e0c732c7..2a56db1094b8 100644 --- a/tools/testing/selftests/bpf/progs/test_pinning_invalid.c +++ b/tools/testing/selftests/bpf/progs/test_pinning_invalid.c @@ -3,8 +3,6 @@ #include #include -int _version SEC("version") = 1; - struct { __uint(type, BPF_MAP_TYPE_ARRAY); __uint(max_entries, 1); diff --git a/tools/testing/selftests/bpf/progs/test_pkt_access.c b/tools/testing/selftests/bpf/progs/test_pkt_access.c index 3cfd88141ddc..0558544e1ff0 100644 --- a/tools/testing/selftests/bpf/progs/test_pkt_access.c +++ b/tools/testing/selftests/bpf/progs/test_pkt_access.c @@ -15,7 +15,6 @@ #include #define barrier() __asm__ __volatile__("": : :"memory") -int _version SEC("version") = 1; /* llvm will optimize both subprograms into exactly the same BPF assembly * diff --git a/tools/testing/selftests/bpf/progs/test_queue_stack_map.h b/tools/testing/selftests/bpf/progs/test_queue_stack_map.h index 4dd9806ad73b..0fcd3ff0e38a 100644 --- a/tools/testing/selftests/bpf/progs/test_queue_stack_map.h +++ b/tools/testing/selftests/bpf/progs/test_queue_stack_map.h @@ -8,8 +8,6 @@ #include #include -int _version SEC("version") = 1; - struct { __uint(type, MAP_TYPE); __uint(max_entries, 32); diff --git a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c index 0f9bc258225e..7d56ed47cd4d 100644 --- a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c +++ b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c @@ -15,8 +15,6 @@ #include #include "test_select_reuseport_common.h" -int _version SEC("version") = 1; - #ifndef offsetof #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) #endif diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup.c b/tools/testing/selftests/bpf/progs/test_sk_lookup.c index 48534d810391..19d2465d9442 100644 --- a/tools/testing/selftests/bpf/progs/test_sk_lookup.c +++ b/tools/testing/selftests/bpf/progs/test_sk_lookup.c @@ -644,4 +644,3 @@ int multi_prog_redir2(struct bpf_sk_lookup *ctx) } char _license[] SEC("license") = "Dual BSD/GPL"; -__u32 _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c b/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c index 552f2090665c..c304cd5b8cad 100644 --- a/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c +++ b/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c @@ -42,6 +42,4 @@ int log_cgroup_id(struct __sk_buff *skb) return TC_ACT_OK; } -int _version SEC("version") = 1; - char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_skb_ctx.c b/tools/testing/selftests/bpf/progs/test_skb_ctx.c index ba4dab09d19c..1d61b36e6067 100644 --- a/tools/testing/selftests/bpf/progs/test_skb_ctx.c +++ b/tools/testing/selftests/bpf/progs/test_skb_ctx.c @@ -3,7 +3,6 @@ #include #include -int _version SEC("version") = 1; char _license[] SEC("license") = "GPL"; SEC("skb_ctx") diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h index 1858435de7aa..2966564b8497 100644 --- a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h +++ b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h @@ -361,5 +361,4 @@ int bpf_prog10(struct sk_msg_md *msg) return SK_DROP; } -int _version SEC("version") = 1; char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c index 00f1456aaeda..325c9f193432 100644 --- a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c +++ b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c @@ -116,5 +116,4 @@ int prog_reuseport(struct sk_reuseport_md *reuse) return verdict; } -int _version SEC("version") = 1; char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c index 7449fdb1763b..36a707e7c7a7 100644 --- a/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c +++ b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c @@ -73,4 +73,3 @@ int oncpu(struct random_urandom_args *args) } char _license[] SEC("license") = "GPL"; -__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */ diff --git a/tools/testing/selftests/bpf/progs/test_tcp_estats.c b/tools/testing/selftests/bpf/progs/test_tcp_estats.c index adc83a54c352..2c5c602c6011 100644 --- a/tools/testing/selftests/bpf/progs/test_tcp_estats.c +++ b/tools/testing/selftests/bpf/progs/test_tcp_estats.c @@ -255,4 +255,3 @@ int _dummy_tracepoint(struct dummy_tracepoint_args *arg) } char _license[] SEC("license") = "GPL"; -__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */ diff --git a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c index 94f50f7e94d6..3ded05280757 100644 --- a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c @@ -16,7 +16,6 @@ #include "test_tcpbpf.h" struct tcpbpf_globals global = {}; -int _version SEC("version") = 1; /** * SOL_TCP is defined in while diff --git a/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c index 24e9344994ef..540181c115a8 100644 --- a/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c @@ -28,8 +28,6 @@ struct { __type(value, __u32); } perf_event_map SEC(".maps"); -int _version SEC("version") = 1; - SEC("sockops") int bpf_testcb(struct bpf_sock_ops *skops) { diff --git a/tools/testing/selftests/bpf/progs/test_tracepoint.c b/tools/testing/selftests/bpf/progs/test_tracepoint.c index 4b825ee122cf..ce6974016f53 100644 --- a/tools/testing/selftests/bpf/progs/test_tracepoint.c +++ b/tools/testing/selftests/bpf/progs/test_tracepoint.c @@ -23,4 +23,3 @@ int oncpu(struct sched_switch_args *ctx) } char _license[] SEC("license") = "GPL"; -__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */ diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c index e7b673117436..ef0dde83b85a 100644 --- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c @@ -26,8 +26,6 @@ bpf_trace_printk(fmt, sizeof(fmt), __LINE__, ret); \ } while (0) -int _version SEC("version") = 1; - struct geneve_opt { __be16 opt_class; __u8 type; diff --git a/tools/testing/selftests/bpf/progs/test_xdp.c b/tools/testing/selftests/bpf/progs/test_xdp.c index e6aa2fc6ce6b..d7a9a74b7245 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp.c +++ b/tools/testing/selftests/bpf/progs/test_xdp.c @@ -20,8 +20,6 @@ #include #include "test_iptunnel_common.h" -int _version SEC("version") = 1; - struct { __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); __uint(max_entries, 256); diff --git a/tools/testing/selftests/bpf/progs/test_xdp_loop.c b/tools/testing/selftests/bpf/progs/test_xdp_loop.c index 27eb52dda92c..c98fb44156f0 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_loop.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_loop.c @@ -16,8 +16,6 @@ #include #include "test_iptunnel_common.h" -int _version SEC("version") = 1; - struct { __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); __uint(max_entries, 256); diff --git a/tools/testing/selftests/bpf/progs/test_xdp_redirect.c b/tools/testing/selftests/bpf/progs/test_xdp_redirect.c index a5337cd9400b..b778cad45485 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_redirect.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_redirect.c @@ -12,8 +12,6 @@ #include #include -int _version SEC("version") = 1; - SEC("redirect_to_111") int xdp_redirect_to_111(struct xdp_md *xdp) { -- cgit v1.2.3 From 7e3cbd3405cb7b6c036b8984baa694bc55c08e46 Mon Sep 17 00:00:00 2001 From: Yucong Sun Date: Fri, 8 Oct 2021 10:31:39 -0700 Subject: selftests/bpf: Fix btf_dump test under new clang New clang version changed ([0]) type name in dwarf from "long int" to "long", this is causing btf_dump tests to fail. [0] https://github.com/llvm/llvm-project/commit/f6a561c4d6754b13165a49990e8365d819f64c86 Signed-off-by: Yucong Sun Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211008173139.1457407-1-fallentree@fb.com --- .../testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c | 10 +++++----- tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c | 4 ++-- tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c | 2 +- tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c index 8f44767a75fa..e5560a656030 100644 --- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c @@ -11,7 +11,7 @@ /* *struct bitfields_only_mixed_types { * int a: 3; - * long int b: 2; + * long b: 2; * _Bool c: 1; * enum { * A = 0, @@ -27,7 +27,7 @@ struct bitfields_only_mixed_types { int a: 3; - long int b: 2; + long b: 2; bool c: 1; /* it's really a _Bool type */ enum { A, /* A = 0, dumper is very explicit */ @@ -44,8 +44,8 @@ struct bitfields_only_mixed_types { * char: 4; * int a: 4; * short b; - * long int c; - * long int d: 8; + * long c; + * long d: 8; * int e; * int f; *}; @@ -71,7 +71,7 @@ struct bitfield_mixed_with_others { *struct bitfield_flushed { * int a: 4; * long: 60; - * long int b: 16; + * long b: 16; *}; * */ diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c index 1cef3bec1dc7..e304b6204bd9 100644 --- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c @@ -29,7 +29,7 @@ struct non_packed_fields { struct nested_packed { char: 4; int a: 4; - long int b; + long b; struct { char c; int d; @@ -44,7 +44,7 @@ union union_is_never_packed { union union_does_not_need_packing { struct { - long int a; + long a; int b; } __attribute__((packed)); int c; diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c index 35c512818a56..f2661c8d2d90 100644 --- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c @@ -9,7 +9,7 @@ /* ----- START-EXPECTED-OUTPUT ----- */ struct padded_implicitly { int a; - long int b; + long b; char c; }; diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c index 8aaa24a00322..1c7105fcae3c 100644 --- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c @@ -189,7 +189,7 @@ struct struct_with_embedded_stuff { const char *d; } e; union { - volatile long int f; + volatile long f; void * restrict g; }; }; -- cgit v1.2.3 From f012ade10b34c461663bc3dd957636be06804b0d Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Thu, 7 Oct 2021 20:44:29 +0100 Subject: bpftool: Install libbpf headers instead of including the dir Bpftool relies on libbpf, therefore it relies on a number of headers from the library and must be linked against the library. The Makefile for bpftool exposes these objects by adding tools/lib as an include directory ("-I$(srctree)/tools/lib"). This is a working solution, but this is not the cleanest one. The risk is to involuntarily include objects that are not intended to be exposed by the libbpf. The headers needed to compile bpftool should in fact be "installed" from libbpf, with its "install_headers" Makefile target. In addition, there is one header which is internal to the library and not supposed to be used by external applications, but that bpftool uses anyway. Adjust the Makefile in order to install the header files properly before compiling bpftool. Also copy the additional internal header file (nlattr.h), but call it out explicitly. Build (and install headers) in a subdirectory under bpftool/ instead of tools/lib/bpf/. When descending from a parent Makefile, this is configurable by setting the OUTPUT, LIBBPF_OUTPUT and LIBBPF_DESTDIR variables. Also adjust the Makefile for BPF selftests, so as to reuse the (host) libbpf compiled earlier and to avoid compiling a separate version of the library just for bpftool. Signed-off-by: Quentin Monnet Signed-off-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211007194438.34443-4-quentin@isovalent.com --- tools/bpf/bpftool/Makefile | 33 +++++++++++++++++++++------------ tools/testing/selftests/bpf/Makefile | 2 ++ 2 files changed, 23 insertions(+), 12 deletions(-) (limited to 'tools/testing') diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index 1fcf5b01a193..ba02d71c39ef 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -17,19 +17,23 @@ endif BPF_DIR = $(srctree)/tools/lib/bpf/ ifneq ($(OUTPUT),) - LIBBPF_OUTPUT = $(OUTPUT)/libbpf/ - LIBBPF_PATH = $(LIBBPF_OUTPUT) - BOOTSTRAP_OUTPUT = $(OUTPUT)/bootstrap/ + _OUTPUT := $(OUTPUT) else - LIBBPF_OUTPUT = - LIBBPF_PATH = $(BPF_DIR) - BOOTSTRAP_OUTPUT = $(CURDIR)/bootstrap/ + _OUTPUT := $(CURDIR) endif +BOOTSTRAP_OUTPUT := $(_OUTPUT)/bootstrap/ +LIBBPF_OUTPUT := $(_OUTPUT)/libbpf/ +LIBBPF_DESTDIR := $(LIBBPF_OUTPUT) +LIBBPF_INCLUDE := $(LIBBPF_DESTDIR)/include -LIBBPF = $(LIBBPF_PATH)libbpf.a +LIBBPF = $(LIBBPF_OUTPUT)libbpf.a LIBBPF_BOOTSTRAP_OUTPUT = $(BOOTSTRAP_OUTPUT)libbpf/ LIBBPF_BOOTSTRAP = $(LIBBPF_BOOTSTRAP_OUTPUT)libbpf.a +# We need to copy nlattr.h which is not otherwise exported by libbpf, but still +# required by bpftool. +LIBBPF_INTERNAL_HDRS := nlattr.h + ifeq ($(BPFTOOL_VERSION),) BPFTOOL_VERSION := $(shell make -rR --no-print-directory -sC ../../.. kernelversion) endif @@ -38,7 +42,13 @@ $(LIBBPF_OUTPUT) $(BOOTSTRAP_OUTPUT) $(LIBBPF_BOOTSTRAP_OUTPUT): $(QUIET_MKDIR)mkdir -p $@ $(LIBBPF): FORCE | $(LIBBPF_OUTPUT) - $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_OUTPUT) $(LIBBPF_OUTPUT)libbpf.a + $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_OUTPUT) \ + DESTDIR=$(LIBBPF_DESTDIR) prefix= $(LIBBPF) install_headers + +$(LIBBPF_INCLUDE)/bpf/$(LIBBPF_INTERNAL_HDRS): \ + $(addprefix $(BPF_DIR),$(LIBBPF_INTERNAL_HDRS)) $(LIBBPF) + $(call QUIET_INSTALL, bpf/$(notdir $@)) + $(Q)install -m 644 -t $(LIBBPF_INCLUDE)/bpf/ $(BPF_DIR)$(notdir $@) $(LIBBPF_BOOTSTRAP): FORCE | $(LIBBPF_BOOTSTRAP_OUTPUT) $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_BOOTSTRAP_OUTPUT) \ @@ -60,10 +70,10 @@ CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wno-missing-field-initializers CFLAGS += $(filter-out -Wswitch-enum -Wnested-externs,$(EXTRA_WARNINGS)) CFLAGS += -DPACKAGE='"bpftool"' -D__EXPORTED_HEADERS__ \ -I$(if $(OUTPUT),$(OUTPUT),.) \ + -I$(LIBBPF_INCLUDE) \ -I$(srctree)/kernel/bpf/ \ -I$(srctree)/tools/include \ -I$(srctree)/tools/include/uapi \ - -I$(srctree)/tools/lib \ -I$(srctree)/tools/perf CFLAGS += -DBPFTOOL_VERSION='"$(BPFTOOL_VERSION)"' ifneq ($(EXTRA_CFLAGS),) @@ -140,7 +150,7 @@ BOOTSTRAP_OBJS = $(addprefix $(BOOTSTRAP_OUTPUT),main.o common.o json_writer.o g $(BOOTSTRAP_OBJS): $(LIBBPF_BOOTSTRAP) OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o -$(OBJS): $(LIBBPF) +$(OBJS): $(LIBBPF) $(LIBBPF_INCLUDE)/bpf/$(LIBBPF_INTERNAL_HDRS) VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \ $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \ @@ -167,8 +177,7 @@ $(OUTPUT)%.bpf.o: skeleton/%.bpf.c $(OUTPUT)vmlinux.h $(LIBBPF) $(QUIET_CLANG)$(CLANG) \ -I$(if $(OUTPUT),$(OUTPUT),.) \ -I$(srctree)/tools/include/uapi/ \ - -I$(LIBBPF_PATH) \ - -I$(srctree)/tools/lib \ + -I$(LIBBPF_INCLUDE) \ -g -O2 -Wall -target bpf -c $< -o $@ && $(LLVM_STRIP) -g $@ $(OUTPUT)%.skel.h: $(OUTPUT)%.bpf.o $(BPFTOOL_BOOTSTRAP) diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index c5c9a9f50d8d..849a4637f59d 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -209,6 +209,8 @@ $(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \ CC=$(HOSTCC) LD=$(HOSTLD) \ EXTRA_CFLAGS='-g -O0' \ OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \ + LIBBPF_OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \ + LIBBPF_DESTDIR=$(HOST_SCRATCH_DIR)/ \ prefix= DESTDIR=$(HOST_SCRATCH_DIR)/ install all: docs -- cgit v1.2.3 From 1478994aad82810d833bf9c816fb4e9845553e9b Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Thu, 7 Oct 2021 20:44:30 +0100 Subject: tools/resolve_btfids: Install libbpf headers when building API headers from libbpf should not be accessed directly from the library's source directory. Instead, they should be exported with "make install_headers". Let's make sure that resolve_btfids installs the headers properly when building. When descending from a parent Makefile, the specific output directories for building the library and exporting the headers are configurable with LIBBPF_OUT and LIBBPF_DESTDIR, respectively. This is in addition to OUTPUT, on top of which those variables are constructed by default. Also adjust the Makefile for the BPF selftests in order to point to the (target) libbpf shared with other tools, instead of building a version specific to resolve_btfids. Remove libbpf's order-only dependencies on the include directories (they are created by libbpf and don't need to exist beforehand). Signed-off-by: Quentin Monnet Signed-off-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211007194438.34443-5-quentin@isovalent.com --- tools/bpf/resolve_btfids/Makefile | 16 +++++++++++----- tools/bpf/resolve_btfids/main.c | 4 ++-- tools/testing/selftests/bpf/Makefile | 7 +++++-- 3 files changed, 18 insertions(+), 9 deletions(-) (limited to 'tools/testing') diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile index 08b75e314ae7..751643f860b2 100644 --- a/tools/bpf/resolve_btfids/Makefile +++ b/tools/bpf/resolve_btfids/Makefile @@ -29,25 +29,30 @@ BPFOBJ := $(OUTPUT)/libbpf/libbpf.a LIBBPF_OUT := $(abspath $(dir $(BPFOBJ)))/ SUBCMDOBJ := $(OUTPUT)/libsubcmd/libsubcmd.a +LIBBPF_DESTDIR := $(LIBBPF_OUT) +LIBBPF_INCLUDE := $(LIBBPF_DESTDIR)include + BINARY := $(OUTPUT)/resolve_btfids BINARY_IN := $(BINARY)-in.o all: $(BINARY) -$(OUTPUT) $(OUTPUT)/libbpf $(OUTPUT)/libsubcmd: +$(OUTPUT) $(OUTPUT)/libsubcmd $(LIBBPF_OUT): $(call msg,MKDIR,,$@) $(Q)mkdir -p $(@) $(SUBCMDOBJ): fixdep FORCE | $(OUTPUT)/libsubcmd $(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(abspath $(dir $@))/ $(abspath $@) -$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT)/libbpf - $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(LIBBPF_OUT) $(abspath $@) +$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OUT) + $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(LIBBPF_OUT) \ + DESTDIR=$(LIBBPF_DESTDIR) prefix= \ + $(abspath $@) install_headers CFLAGS := -g \ -I$(srctree)/tools/include \ -I$(srctree)/tools/include/uapi \ - -I$(LIBBPF_SRC) \ + -I$(LIBBPF_INCLUDE) \ -I$(SUBCMD_SRC) LIBS = -lelf -lz @@ -65,7 +70,8 @@ $(BINARY): $(BPFOBJ) $(SUBCMDOBJ) $(BINARY_IN) clean_objects := $(wildcard $(OUTPUT)/*.o \ $(OUTPUT)/.*.o.cmd \ $(OUTPUT)/.*.o.d \ - $(OUTPUT)/libbpf \ + $(LIBBPF_OUT) \ + $(LIBBPF_DESTDIR) \ $(OUTPUT)/libsubcmd \ $(OUTPUT)/resolve_btfids) diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c index c6c3e613858a..716e6ad1864b 100644 --- a/tools/bpf/resolve_btfids/main.c +++ b/tools/bpf/resolve_btfids/main.c @@ -60,8 +60,8 @@ #include #include #include -#include -#include +#include +#include #include #define BTF_IDS_SECTION ".BTF_ids" diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 849a4637f59d..090f424ac5e1 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -122,9 +122,11 @@ BPFOBJ := $(BUILD_DIR)/libbpf/libbpf.a ifneq ($(CROSS_COMPILE),) HOST_BUILD_DIR := $(BUILD_DIR)/host HOST_SCRATCH_DIR := $(OUTPUT)/host-tools +HOST_INCLUDE_DIR := $(HOST_SCRATCH_DIR)/include else HOST_BUILD_DIR := $(BUILD_DIR) HOST_SCRATCH_DIR := $(SCRATCH_DIR) +HOST_INCLUDE_DIR := $(INCLUDE_DIR) endif HOST_BPFOBJ := $(HOST_BUILD_DIR)/libbpf/libbpf.a RESOLVE_BTFIDS := $(HOST_BUILD_DIR)/resolve_btfids/resolve_btfids @@ -227,7 +229,7 @@ docs-clean: $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \ ../../../include/uapi/linux/bpf.h \ - | $(INCLUDE_DIR) $(BUILD_DIR)/libbpf + | $(BUILD_DIR)/libbpf $(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(BUILD_DIR)/libbpf/ \ EXTRA_CFLAGS='-g -O0' \ DESTDIR=$(SCRATCH_DIR) prefix= all install_headers @@ -235,7 +237,7 @@ $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \ ifneq ($(BPFOBJ),$(HOST_BPFOBJ)) $(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \ ../../../include/uapi/linux/bpf.h \ - | $(INCLUDE_DIR) $(HOST_BUILD_DIR)/libbpf + | $(HOST_BUILD_DIR)/libbpf $(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) \ EXTRA_CFLAGS='-g -O0' \ OUTPUT=$(HOST_BUILD_DIR)/libbpf/ CC=$(HOSTCC) LD=$(HOSTLD) \ @@ -260,6 +262,7 @@ $(RESOLVE_BTFIDS): $(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/resolve_btfids \ $(TOOLSDIR)/lib/str_error_r.c $(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/resolve_btfids \ CC=$(HOSTCC) LD=$(HOSTLD) AR=$(HOSTAR) \ + LIBBPF_INCLUDE=$(HOST_INCLUDE_DIR) \ OUTPUT=$(HOST_BUILD_DIR)/resolve_btfids/ BPFOBJ=$(HOST_BPFOBJ) # Get Clang's default includes on this system, as opposed to those seen by -- cgit v1.2.3 From be79505caf3f99a2f9cca5946261085b333f7034 Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Thu, 7 Oct 2021 20:44:31 +0100 Subject: tools/runqslower: Install libbpf headers when building API headers from libbpf should not be accessed directly from the library's source directory. Instead, they should be exported with "make install_headers". Let's make sure that runqslower installs the headers properly when building. We use a libbpf_hdrs target to mark the logical dependency on libbpf's headers export for a number of object files, even though the headers should have been exported at this time (since bpftool needs them, and is required to generate the skeleton or the vmlinux.h). When descending from a parent Makefile, the specific output directories for building the library and exporting the headers are configurable with BPFOBJ_OUTPUT and BPF_DESTDIR, respectively. This is in addition to OUTPUT, on top of which those variables are constructed by default. Also adjust the Makefile for the BPF selftests. We pass a number of variables to the "make" invocation, because we want to point runqslower to the (target) libbpf shared with other tools, instead of building its own version. In addition, runqslower relies on (target) bpftool, and we also want to pass the proper variables to its Makefile so that bpftool itself reuses the same libbpf. Signed-off-by: Quentin Monnet Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211007194438.34443-6-quentin@isovalent.com --- tools/bpf/runqslower/Makefile | 22 +++++++++++++--------- tools/testing/selftests/bpf/Makefile | 15 +++++++++------ 2 files changed, 22 insertions(+), 15 deletions(-) (limited to 'tools/testing') diff --git a/tools/bpf/runqslower/Makefile b/tools/bpf/runqslower/Makefile index 3818ec511fd2..bbd1150578f7 100644 --- a/tools/bpf/runqslower/Makefile +++ b/tools/bpf/runqslower/Makefile @@ -9,9 +9,9 @@ BPFTOOL ?= $(DEFAULT_BPFTOOL) LIBBPF_SRC := $(abspath ../../lib/bpf) BPFOBJ_OUTPUT := $(OUTPUT)libbpf/ BPFOBJ := $(BPFOBJ_OUTPUT)libbpf.a -BPF_INCLUDE := $(BPFOBJ_OUTPUT) -INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../lib) \ - -I$(abspath ../../include/uapi) +BPF_DESTDIR := $(BPFOBJ_OUTPUT) +BPF_INCLUDE := $(BPF_DESTDIR)/include +INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../include/uapi) CFLAGS := -g -Wall # Try to detect best kernel BTF source @@ -33,7 +33,7 @@ endif .DELETE_ON_ERROR: -.PHONY: all clean runqslower +.PHONY: all clean runqslower libbpf_hdrs all: runqslower runqslower: $(OUTPUT)/runqslower @@ -46,13 +46,15 @@ clean: $(Q)$(RM) $(OUTPUT)runqslower $(Q)$(RM) -r .output +libbpf_hdrs: $(BPFOBJ) + $(OUTPUT)/runqslower: $(OUTPUT)/runqslower.o $(BPFOBJ) $(QUIET_LINK)$(CC) $(CFLAGS) $^ -lelf -lz -o $@ $(OUTPUT)/runqslower.o: runqslower.h $(OUTPUT)/runqslower.skel.h \ - $(OUTPUT)/runqslower.bpf.o + $(OUTPUT)/runqslower.bpf.o | libbpf_hdrs -$(OUTPUT)/runqslower.bpf.o: $(OUTPUT)/vmlinux.h runqslower.h +$(OUTPUT)/runqslower.bpf.o: $(OUTPUT)/vmlinux.h runqslower.h | libbpf_hdrs $(OUTPUT)/%.skel.h: $(OUTPUT)/%.bpf.o | $(BPFTOOL) $(QUIET_GEN)$(BPFTOOL) gen skeleton $< > $@ @@ -81,8 +83,10 @@ else endif $(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(BPFOBJ_OUTPUT) - $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(BPFOBJ_OUTPUT) $@ + $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(BPFOBJ_OUTPUT) \ + DESTDIR=$(BPFOBJ_OUTPUT) prefix= $(abspath $@) install_headers -$(DEFAULT_BPFTOOL): | $(BPFTOOL_OUTPUT) +$(DEFAULT_BPFTOOL): $(BPFOBJ) | $(BPFTOOL_OUTPUT) $(Q)$(MAKE) $(submake_extras) -C ../bpftool OUTPUT=$(BPFTOOL_OUTPUT) \ - CC=$(HOSTCC) LD=$(HOSTLD) + LIBBPF_OUTPUT=$(BPFOBJ_OUTPUT) \ + LIBBPF_DESTDIR=$(BPF_DESTDIR) CC=$(HOSTCC) LD=$(HOSTLD) diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 090f424ac5e1..e023d734f7b0 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -130,6 +130,7 @@ HOST_INCLUDE_DIR := $(INCLUDE_DIR) endif HOST_BPFOBJ := $(HOST_BUILD_DIR)/libbpf/libbpf.a RESOLVE_BTFIDS := $(HOST_BUILD_DIR)/resolve_btfids/resolve_btfids +RUNQSLOWER_OUTPUT := $(BUILD_DIR)/runqslower/ VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \ $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \ @@ -154,7 +155,7 @@ $(notdir $(TEST_GEN_PROGS) \ # sort removes libbpf duplicates when not cross-building MAKE_DIRS := $(sort $(BUILD_DIR)/libbpf $(HOST_BUILD_DIR)/libbpf \ $(HOST_BUILD_DIR)/bpftool $(HOST_BUILD_DIR)/resolve_btfids \ - $(INCLUDE_DIR)) + $(RUNQSLOWER_OUTPUT) $(INCLUDE_DIR)) $(MAKE_DIRS): $(call msg,MKDIR,,$@) $(Q)mkdir -p $@ @@ -183,11 +184,13 @@ $(OUTPUT)/test_stub.o: test_stub.c $(BPFOBJ) DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool -$(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) - $(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/runqslower \ - OUTPUT=$(SCRATCH_DIR)/ VMLINUX_BTF=$(VMLINUX_BTF) \ - BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR) && \ - cp $(SCRATCH_DIR)/runqslower $@ +$(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) $(RUNQSLOWER_OUTPUT) + $(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/runqslower \ + OUTPUT=$(RUNQSLOWER_OUTPUT) VMLINUX_BTF=$(VMLINUX_BTF) \ + BPFTOOL_OUTPUT=$(BUILD_DIR)/bpftool/ \ + BPFOBJ_OUTPUT=$(BUILD_DIR)/libbpf \ + BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR) && \ + cp $(RUNQSLOWER_OUTPUT)runqslower $@ TEST_GEN_PROGS_EXTENDED += $(DEFAULT_BPFTOOL) -- cgit v1.2.3 From 87ee33bfdd4f74edc1548c7f0140800cfcc33039 Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Thu, 7 Oct 2021 20:44:37 +0100 Subject: selftests/bpf: Better clean up for runqslower in test_bpftool_build.sh The script test_bpftool_build.sh attempts to build bpftool in the various supported ways, to make sure nothing breaks. One of those ways is to run "make tools/bpf" from the root of the kernel repository. This command builds bpftool, along with the other tools under tools/bpf, and runqslower in particular. After running the command and upon a successful bpftool build, the script attempts to cleanup the generated objects. However, after building with this target and in the case of runqslower, the files are not cleaned up as expected. This is because the "tools/bpf" target sets $(OUTPUT) to .../tools/bpf/runqslower/ when building the tool, causing the object files to be placed directly under the runqslower directory. But when running "cd tools/bpf; make clean", the value for $(OUTPUT) is set to ".output" (relative to the runqslower directory) by runqslower's Makefile, and this is where the Makefile looks for files to clean up. We cannot easily fix in the root Makefile (where "tools/bpf" is defined) or in tools/scripts/Makefile.include (setting $(OUTPUT)), where changing the way the output variables are passed would likely have consequences elsewhere. We could change runqslower's Makefile to build in the repository instead of in a dedicated ".output/", but doing so just to accommodate a test script doesn't sound great. Instead, let's just make sure that we clean up runqslower properly by adding the correct command to the script. This will attempt to clean runqslower twice: the first try with command "cd tools/bpf; make clean" will search for tools/bpf/runqslower/.output and fail to clean it (but will still clean the other tools, in particular bpftool), the second one (added in this commit) sets the $(OUTPUT) variable like for building with the "tool/bpf" target and should succeed. Signed-off-by: Quentin Monnet Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211007194438.34443-12-quentin@isovalent.com --- tools/testing/selftests/bpf/test_bpftool_build.sh | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/test_bpftool_build.sh b/tools/testing/selftests/bpf/test_bpftool_build.sh index b03a87571592..1453a53ed547 100755 --- a/tools/testing/selftests/bpf/test_bpftool_build.sh +++ b/tools/testing/selftests/bpf/test_bpftool_build.sh @@ -90,6 +90,10 @@ echo -e "... through kbuild\n" if [ -f ".config" ] ; then make_and_clean tools/bpf + ## "make tools/bpf" sets $(OUTPUT) to ...tools/bpf/runqslower for + ## runqslower, but the default (used for the "clean" target) is .output. + ## Let's make sure we clean runqslower's directory properly. + make -C tools/bpf/runqslower OUTPUT=${KDIR_ROOT_DIR}/tools/bpf/runqslower/ clean ## $OUTPUT is overwritten in kbuild Makefile, and thus cannot be passed ## down from toplevel Makefile to bpftool's Makefile. -- cgit v1.2.3 From d7db0a4e8d95101ebb545444578ba7085c270e5f Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Thu, 7 Oct 2021 20:44:38 +0100 Subject: bpftool: Add install-bin target to install binary only With "make install", bpftool installs its binary and its bash completion file. Usually, this is what we want. But a few components in the kernel repository (namely, BPF iterators and selftests) also install bpftool locally before using it. In such a case, bash completion is not necessary and is just a useless build artifact. Let's add an "install-bin" target to bpftool, to offer a way to install the binary only. Signed-off-by: Quentin Monnet Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211007194438.34443-13-quentin@isovalent.com --- kernel/bpf/preload/iterators/Makefile | 2 +- tools/bpf/bpftool/Makefile | 6 ++++-- tools/testing/selftests/bpf/Makefile | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) (limited to 'tools/testing') diff --git a/kernel/bpf/preload/iterators/Makefile b/kernel/bpf/preload/iterators/Makefile index a4aedc7b0728..b8bd60511227 100644 --- a/kernel/bpf/preload/iterators/Makefile +++ b/kernel/bpf/preload/iterators/Makefile @@ -66,4 +66,4 @@ $(DEFAULT_BPFTOOL): $(BPFOBJ) | $(BPFTOOL_OUTPUT) OUTPUT=$(BPFTOOL_OUTPUT)/ \ LIBBPF_OUTPUT=$(LIBBPF_OUTPUT)/ \ LIBBPF_DESTDIR=$(LIBBPF_DESTDIR)/ \ - prefix= DESTDIR=$(abs_out)/ install + prefix= DESTDIR=$(abs_out)/ install-bin diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index ba02d71c39ef..9c2d13c513f0 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -226,10 +226,12 @@ clean: $(LIBBPF)-clean $(LIBBPF_BOOTSTRAP)-clean feature-detect-clean $(Q)$(RM) -- $(OUTPUT)FEATURE-DUMP.bpftool $(Q)$(RM) -r -- $(OUTPUT)feature/ -install: $(OUTPUT)bpftool +install-bin: $(OUTPUT)bpftool $(call QUIET_INSTALL, bpftool) $(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(prefix)/sbin $(Q)$(INSTALL) $(OUTPUT)bpftool $(DESTDIR)$(prefix)/sbin/bpftool + +install: install-bin $(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(bash_compdir) $(Q)$(INSTALL) -m 0644 bash-completion/bpftool $(DESTDIR)$(bash_compdir) @@ -256,6 +258,6 @@ zdep: @if [ "$(feature-zlib)" != "1" ]; then echo "No zlib found"; exit 1 ; fi .SECONDARY: -.PHONY: all FORCE clean install uninstall zdep +.PHONY: all FORCE clean install-bin install uninstall zdep .PHONY: doc doc-clean doc-install doc-uninstall .DEFAULT_GOAL := all diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index e023d734f7b0..498222543c37 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -216,7 +216,7 @@ $(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \ OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \ LIBBPF_OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \ LIBBPF_DESTDIR=$(HOST_SCRATCH_DIR)/ \ - prefix= DESTDIR=$(HOST_SCRATCH_DIR)/ install + prefix= DESTDIR=$(HOST_SCRATCH_DIR)/ install-bin all: docs -- cgit v1.2.3 From fa7f17d066bd0996b930b664aa0ed1f213fc1828 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Mon, 4 Oct 2021 17:48:57 +0800 Subject: bpf/selftests: Add test for writable bare tracepoint Add a writable bare tracepoint in bpf_testmod module, and trigger its calling when reading /sys/kernel/bpf_testmod with a specific buffer length. The reading will return the value in writable context if the early return flag is enabled in writable context. Signed-off-by: Hou Tao Signed-off-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211004094857.30868-4-hotforest@gmail.com --- .../selftests/bpf/bpf_testmod/bpf_testmod-events.h | 15 ++++++++++ .../selftests/bpf/bpf_testmod/bpf_testmod.c | 10 +++++++ .../selftests/bpf/bpf_testmod/bpf_testmod.h | 5 ++++ .../selftests/bpf/prog_tests/module_attach.c | 35 ++++++++++++++++++++++ .../selftests/bpf/progs/test_module_attach.c | 14 +++++++++ tools/testing/selftests/bpf/test_progs.c | 4 +-- tools/testing/selftests/bpf/test_progs.h | 2 ++ 7 files changed, 83 insertions(+), 2 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h index 89c6d58e5dd6..11ee801e75e7 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h @@ -34,6 +34,21 @@ DECLARE_TRACE(bpf_testmod_test_write_bare, TP_ARGS(task, ctx) ); +#undef BPF_TESTMOD_DECLARE_TRACE +#ifdef DECLARE_TRACE_WRITABLE +#define BPF_TESTMOD_DECLARE_TRACE(call, proto, args, size) \ + DECLARE_TRACE_WRITABLE(call, PARAMS(proto), PARAMS(args), size) +#else +#define BPF_TESTMOD_DECLARE_TRACE(call, proto, args, size) \ + DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) +#endif + +BPF_TESTMOD_DECLARE_TRACE(bpf_testmod_test_writable_bare, + TP_PROTO(struct bpf_testmod_test_writable_ctx *ctx), + TP_ARGS(ctx), + sizeof(struct bpf_testmod_test_writable_ctx) +); + #endif /* _BPF_TESTMOD_EVENTS_H */ #undef TRACE_INCLUDE_PATH diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index b892948dc134..5d52ea2768df 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -50,6 +50,16 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj, if (bpf_testmod_loop_test(101) > 100) trace_bpf_testmod_test_read(current, &ctx); + /* Magic number to enable writable tp */ + if (len == 64) { + struct bpf_testmod_test_writable_ctx writable = { + .val = 1024, + }; + trace_bpf_testmod_test_writable_bare(&writable); + if (writable.early_ret) + return snprintf(buf, len, "%d\n", writable.val); + } + return -EIO; /* always fail */ } EXPORT_SYMBOL(bpf_testmod_test_read); diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h index b3892dc40111..0d71e2607832 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h @@ -17,4 +17,9 @@ struct bpf_testmod_test_write_ctx { size_t len; }; +struct bpf_testmod_test_writable_ctx { + bool early_ret; + int val; +}; + #endif /* _BPF_TESTMOD_H */ diff --git a/tools/testing/selftests/bpf/prog_tests/module_attach.c b/tools/testing/selftests/bpf/prog_tests/module_attach.c index 1797a6e4d6d8..6d0e50dcf47c 100644 --- a/tools/testing/selftests/bpf/prog_tests/module_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/module_attach.c @@ -2,10 +2,36 @@ /* Copyright (c) 2020 Facebook */ #include +#include #include "test_module_attach.skel.h" static int duration; +static int trigger_module_test_writable(int *val) +{ + int fd, err; + char buf[65]; + ssize_t rd; + + fd = open(BPF_TESTMOD_TEST_FILE, O_RDONLY); + err = -errno; + if (!ASSERT_GE(fd, 0, "testmode_file_open")) + return err; + + rd = read(fd, buf, sizeof(buf) - 1); + err = -errno; + if (!ASSERT_GT(rd, 0, "testmod_file_rd_val")) { + close(fd); + return err; + } + + buf[rd] = '\0'; + *val = strtol(buf, NULL, 0); + close(fd); + + return 0; +} + static int delete_module(const char *name, int flags) { return syscall(__NR_delete_module, name, flags); @@ -19,6 +45,7 @@ void test_module_attach(void) struct test_module_attach__bss *bss; struct bpf_link *link; int err; + int writable_val = 0; skel = test_module_attach__open(); if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) @@ -51,6 +78,14 @@ void test_module_attach(void) ASSERT_EQ(bss->fexit_ret, -EIO, "fexit_tet"); ASSERT_EQ(bss->fmod_ret_read_sz, READ_SZ, "fmod_ret"); + bss->raw_tp_writable_bare_early_ret = true; + bss->raw_tp_writable_bare_out_val = 0xf1f2f3f4; + ASSERT_OK(trigger_module_test_writable(&writable_val), + "trigger_writable"); + ASSERT_EQ(bss->raw_tp_writable_bare_in_val, 1024, "writable_test_in"); + ASSERT_EQ(bss->raw_tp_writable_bare_out_val, writable_val, + "writable_test_out"); + test_module_attach__detach(skel); /* attach fentry/fexit and make sure it get's module reference */ diff --git a/tools/testing/selftests/bpf/progs/test_module_attach.c b/tools/testing/selftests/bpf/progs/test_module_attach.c index bd37ceec5587..b36857093f71 100644 --- a/tools/testing/selftests/bpf/progs/test_module_attach.c +++ b/tools/testing/selftests/bpf/progs/test_module_attach.c @@ -27,6 +27,20 @@ int BPF_PROG(handle_raw_tp_bare, return 0; } +int raw_tp_writable_bare_in_val = 0; +int raw_tp_writable_bare_early_ret = 0; +int raw_tp_writable_bare_out_val = 0; + +SEC("raw_tp.w/bpf_testmod_test_writable_bare") +int BPF_PROG(handle_raw_tp_writable_bare, + struct bpf_testmod_test_writable_ctx *writable) +{ + raw_tp_writable_bare_in_val = writable->val; + writable->early_ret = raw_tp_writable_bare_early_ret; + writable->val = raw_tp_writable_bare_out_val; + return 0; +} + __u32 tp_btf_read_sz = 0; SEC("tp_btf/bpf_testmod_test_read") diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 2ed01f615d20..007b4ff85fea 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -747,7 +747,7 @@ int trigger_module_test_read(int read_sz) { int fd, err; - fd = open("/sys/kernel/bpf_testmod", O_RDONLY); + fd = open(BPF_TESTMOD_TEST_FILE, O_RDONLY); err = -errno; if (!ASSERT_GE(fd, 0, "testmod_file_open")) return err; @@ -769,7 +769,7 @@ int trigger_module_test_write(int write_sz) memset(buf, 'a', write_sz); buf[write_sz-1] = '\0'; - fd = open("/sys/kernel/bpf_testmod", O_WRONLY); + fd = open(BPF_TESTMOD_TEST_FILE, O_WRONLY); err = -errno; if (!ASSERT_GE(fd, 0, "testmod_file_open")) { free(buf); diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index 94bef0aa74cf..9b8a1810b700 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -301,3 +301,5 @@ int trigger_module_test_write(int write_sz); #else #define SYS_NANOSLEEP_KPROBE_NAME "sys_nanosleep" #endif + +#define BPF_TESTMOD_TEST_FILE "/sys/kernel/bpf_testmod" -- cgit v1.2.3 From 91b2c0afd00cb01d715d6e9503624ab33580d5b6 Mon Sep 17 00:00:00 2001 From: Yucong Sun Date: Wed, 6 Oct 2021 11:56:06 -0700 Subject: selftests/bpf: Add parallelism to test_progs This patch adds "-j" mode to test_progs, executing tests in multiple process. "-j" mode is optional, and works with all existing test selection mechanism, as well as "-v", "-l" etc. In "-j" mode, main process use UDS/SEQPACKET to communicate to each forked worker, commanding it to run tests and collect logs. After all tests are finished, a summary is printed. main process use multiple competing threads to dispatch work to worker, trying to keep them all busy. The test status will be printed as soon as it is finished, if there are error logs, it will be printed after the final summary line. By specifying "--debug", additional debug information on server/worker communication will be printed. Example output: > ./test_progs -n 15-20 -j [ 12.801730] bpf_testmod: loading out-of-tree module taints kernel. Launching 8 workers. #20 btf_split:OK #16 btf_endian:OK #18 btf_module:OK #17 btf_map_in_map:OK #19 btf_skc_cls_ingress:OK #15 btf_dump:OK Summary: 6/20 PASSED, 0 SKIPPED, 0 FAILED Signed-off-by: Yucong Sun Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211006185619.364369-2-fallentree@fb.com --- tools/testing/selftests/bpf/test_progs.c | 611 +++++++++++++++++++++++++++++-- tools/testing/selftests/bpf/test_progs.h | 36 +- 2 files changed, 610 insertions(+), 37 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 007b4ff85fea..20dd1e2f8d06 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -12,6 +12,11 @@ #include #include /* backtrace */ #include +#include /* get_nprocs */ +#include +#include +#include +#include /* Adapted from perf/util/string.c */ static bool glob_match(const char *str, const char *pat) @@ -48,6 +53,8 @@ struct prog_test_def { bool force_log; int error_cnt; int skip_cnt; + int sub_succ_cnt; + bool should_run; bool tested; bool need_cgroup_cleanup; @@ -97,6 +104,10 @@ static void dump_test_log(const struct prog_test_def *test, bool failed) if (stdout == env.stdout) return; + /* worker always holds log */ + if (env.worker_id != -1) + return; + fflush(stdout); /* exports env.log_buf & env.log_cnt */ if (env.verbosity > VERBOSE_NONE || test->force_log || failed) { @@ -107,8 +118,6 @@ static void dump_test_log(const struct prog_test_def *test, bool failed) fprintf(env.stdout, "\n"); } } - - fseeko(stdout, 0, SEEK_SET); /* rewind */ } static void skip_account(void) @@ -124,7 +133,8 @@ static void stdio_restore(void); /* A bunch of tests set custom affinity per-thread and/or per-process. Reset * it after each test/sub-test. */ -static void reset_affinity() { +static void reset_affinity(void) +{ cpu_set_t cpuset; int i, err; @@ -165,21 +175,21 @@ static void restore_netns(void) } } -void test__end_subtest() +void test__end_subtest(void) { struct prog_test_def *test = env.test; int sub_error_cnt = test->error_cnt - test->old_error_cnt; dump_test_log(test, sub_error_cnt); - fprintf(env.stdout, "#%d/%d %s/%s:%s\n", + fprintf(stdout, "#%d/%d %s/%s:%s\n", test->test_num, test->subtest_num, test->test_name, test->subtest_name, sub_error_cnt ? "FAIL" : (test->skip_cnt ? "SKIP" : "OK")); if (sub_error_cnt) - env.fail_cnt++; + test->error_cnt++; else if (test->skip_cnt == 0) - env.sub_succ_cnt++; + test->sub_succ_cnt++; skip_account(); free(test->subtest_name); @@ -217,7 +227,8 @@ bool test__start_subtest(const char *name) return true; } -void test__force_log() { +void test__force_log(void) +{ env.test->force_log = true; } @@ -474,6 +485,8 @@ enum ARG_KEYS { ARG_LIST_TEST_NAMES = 'l', ARG_TEST_NAME_GLOB_ALLOWLIST = 'a', ARG_TEST_NAME_GLOB_DENYLIST = 'd', + ARG_NUM_WORKERS = 'j', + ARG_DEBUG = -1, }; static const struct argp_option opts[] = { @@ -495,6 +508,10 @@ static const struct argp_option opts[] = { "Run tests with name matching the pattern (supports '*' wildcard)." }, { "deny", ARG_TEST_NAME_GLOB_DENYLIST, "NAMES", 0, "Don't run tests with name matching the pattern (supports '*' wildcard)." }, + { "workers", ARG_NUM_WORKERS, "WORKERS", OPTION_ARG_OPTIONAL, + "Number of workers to run in parallel, default to number of cpus." }, + { "debug", ARG_DEBUG, NULL, 0, + "print extra debug information for test_progs." }, {}, }; @@ -650,7 +667,7 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) fprintf(stderr, "Unable to setenv SELFTESTS_VERBOSE=1 (errno=%d)", errno); - return -1; + return -EINVAL; } } @@ -661,6 +678,20 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) case ARG_LIST_TEST_NAMES: env->list_test_names = true; break; + case ARG_NUM_WORKERS: + if (arg) { + env->workers = atoi(arg); + if (!env->workers) { + fprintf(stderr, "Invalid number of worker: %s.", arg); + return -EINVAL; + } + } else { + env->workers = get_nprocs(); + } + break; + case ARG_DEBUG: + env->debug = true; + break; case ARGP_KEY_ARG: argp_usage(state); break; @@ -678,7 +709,7 @@ static void stdio_hijack(void) env.stdout = stdout; env.stderr = stderr; - if (env.verbosity > VERBOSE_NONE) { + if (env.verbosity > VERBOSE_NONE && env.worker_id == -1) { /* nothing to do, output to stdout by default */ return; } @@ -704,10 +735,6 @@ static void stdio_restore(void) return; fclose(stdout); - free(env.log_buf); - - env.log_buf = NULL; - env.log_cnt = 0; stdout = env.stdout; stderr = env.stderr; @@ -794,11 +821,461 @@ void crash_handler(int signum) dump_test_log(env.test, true); if (env.stdout) stdio_restore(); - + if (env.worker_id != -1) + fprintf(stderr, "[%d]: ", env.worker_id); fprintf(stderr, "Caught signal #%d!\nStack trace:\n", signum); backtrace_symbols_fd(bt, sz, STDERR_FILENO); } +static void sigint_handler(int signum) +{ + int i; + + for (i = 0; i < env.workers; i++) + if (env.worker_socks[i] > 0) + close(env.worker_socks[i]); +} + +static int current_test_idx; +static pthread_mutex_t current_test_lock; +static pthread_mutex_t stdout_output_lock; + +struct test_result { + int error_cnt; + int skip_cnt; + int sub_succ_cnt; + + size_t log_cnt; + char *log_buf; +}; + +static struct test_result test_results[ARRAY_SIZE(prog_test_defs)]; + +static inline const char *str_msg(const struct msg *msg, char *buf) +{ + switch (msg->type) { + case MSG_DO_TEST: + sprintf(buf, "MSG_DO_TEST %d", msg->do_test.test_num); + break; + case MSG_TEST_DONE: + sprintf(buf, "MSG_TEST_DONE %d (log: %d)", + msg->test_done.test_num, + msg->test_done.have_log); + break; + case MSG_TEST_LOG: + sprintf(buf, "MSG_TEST_LOG (cnt: %ld, last: %d)", + strlen(msg->test_log.log_buf), + msg->test_log.is_last); + break; + case MSG_EXIT: + sprintf(buf, "MSG_EXIT"); + break; + default: + sprintf(buf, "UNKNOWN"); + break; + } + + return buf; +} + +static int send_message(int sock, const struct msg *msg) +{ + char buf[256]; + + if (env.debug) + fprintf(stderr, "Sending msg: %s\n", str_msg(msg, buf)); + return send(sock, msg, sizeof(*msg), 0); +} + +static int recv_message(int sock, struct msg *msg) +{ + int ret; + char buf[256]; + + memset(msg, 0, sizeof(*msg)); + ret = recv(sock, msg, sizeof(*msg), 0); + if (ret >= 0) { + if (env.debug) + fprintf(stderr, "Received msg: %s\n", str_msg(msg, buf)); + } + return ret; +} + +static void run_one_test(int test_num) +{ + struct prog_test_def *test = &prog_test_defs[test_num]; + + env.test = test; + + test->run_test(); + + /* ensure last sub-test is finalized properly */ + if (test->subtest_name) + test__end_subtest(); + + test->tested = true; + + dump_test_log(test, test->error_cnt); + + reset_affinity(); + restore_netns(); + if (test->need_cgroup_cleanup) + cleanup_cgroup_environment(); +} + +struct dispatch_data { + int worker_id; + int sock_fd; +}; + +static void *dispatch_thread(void *ctx) +{ + struct dispatch_data *data = ctx; + int sock_fd; + FILE *log_fd = NULL; + + sock_fd = data->sock_fd; + + while (true) { + int test_to_run = -1; + struct prog_test_def *test; + struct test_result *result; + + /* grab a test */ + { + pthread_mutex_lock(¤t_test_lock); + + if (current_test_idx >= prog_test_cnt) { + pthread_mutex_unlock(¤t_test_lock); + goto done; + } + + test = &prog_test_defs[current_test_idx]; + test_to_run = current_test_idx; + current_test_idx++; + + pthread_mutex_unlock(¤t_test_lock); + } + + if (!test->should_run) + continue; + + /* run test through worker */ + { + struct msg msg_do_test; + + msg_do_test.type = MSG_DO_TEST; + msg_do_test.do_test.test_num = test_to_run; + if (send_message(sock_fd, &msg_do_test) < 0) { + perror("Fail to send command"); + goto done; + } + env.worker_current_test[data->worker_id] = test_to_run; + } + + /* wait for test done */ + { + int err; + struct msg msg_test_done; + + err = recv_message(sock_fd, &msg_test_done); + if (err < 0) + goto error; + if (msg_test_done.type != MSG_TEST_DONE) + goto error; + if (test_to_run != msg_test_done.test_done.test_num) + goto error; + + test->tested = true; + result = &test_results[test_to_run]; + + result->error_cnt = msg_test_done.test_done.error_cnt; + result->skip_cnt = msg_test_done.test_done.skip_cnt; + result->sub_succ_cnt = msg_test_done.test_done.sub_succ_cnt; + + /* collect all logs */ + if (msg_test_done.test_done.have_log) { + log_fd = open_memstream(&result->log_buf, &result->log_cnt); + if (!log_fd) + goto error; + + while (true) { + struct msg msg_log; + + if (recv_message(sock_fd, &msg_log) < 0) + goto error; + if (msg_log.type != MSG_TEST_LOG) + goto error; + + fprintf(log_fd, "%s", msg_log.test_log.log_buf); + if (msg_log.test_log.is_last) + break; + } + fclose(log_fd); + log_fd = NULL; + } + /* output log */ + { + pthread_mutex_lock(&stdout_output_lock); + + if (result->log_cnt) { + result->log_buf[result->log_cnt] = '\0'; + fprintf(stdout, "%s", result->log_buf); + if (result->log_buf[result->log_cnt - 1] != '\n') + fprintf(stdout, "\n"); + } + + fprintf(stdout, "#%d %s:%s\n", + test->test_num, test->test_name, + result->error_cnt ? "FAIL" : (result->skip_cnt ? "SKIP" : "OK")); + + pthread_mutex_unlock(&stdout_output_lock); + } + + } /* wait for test done */ + } /* while (true) */ +error: + if (env.debug) + fprintf(stderr, "[%d]: Protocol/IO error: %s.\n", data->worker_id, strerror(errno)); + + if (log_fd) + fclose(log_fd); +done: + { + struct msg msg_exit; + + msg_exit.type = MSG_EXIT; + if (send_message(sock_fd, &msg_exit) < 0) { + if (env.debug) + fprintf(stderr, "[%d]: send_message msg_exit: %s.\n", + data->worker_id, strerror(errno)); + } + } + return NULL; +} + +static void print_all_error_logs(void) +{ + int i; + + if (env.fail_cnt) + fprintf(stdout, "\nAll error logs:\n"); + + /* print error logs again */ + for (i = 0; i < prog_test_cnt; i++) { + struct prog_test_def *test; + struct test_result *result; + + test = &prog_test_defs[i]; + result = &test_results[i]; + + if (!test->tested || !result->error_cnt) + continue; + + fprintf(stdout, "\n#%d %s:%s\n", + test->test_num, test->test_name, + result->error_cnt ? "FAIL" : (result->skip_cnt ? "SKIP" : "OK")); + + if (result->log_cnt) { + result->log_buf[result->log_cnt] = '\0'; + fprintf(stdout, "%s", result->log_buf); + if (result->log_buf[result->log_cnt - 1] != '\n') + fprintf(stdout, "\n"); + } + } +} + +static int server_main(void) +{ + pthread_t *dispatcher_threads; + struct dispatch_data *data; + struct sigaction sigact_int = { + .sa_handler = sigint_handler, + .sa_flags = SA_RESETHAND, + }; + int i; + + sigaction(SIGINT, &sigact_int, NULL); + + dispatcher_threads = calloc(sizeof(pthread_t), env.workers); + data = calloc(sizeof(struct dispatch_data), env.workers); + + env.worker_current_test = calloc(sizeof(int), env.workers); + for (i = 0; i < env.workers; i++) { + int rc; + + data[i].worker_id = i; + data[i].sock_fd = env.worker_socks[i]; + rc = pthread_create(&dispatcher_threads[i], NULL, dispatch_thread, &data[i]); + if (rc < 0) { + perror("Failed to launch dispatcher thread"); + exit(EXIT_ERR_SETUP_INFRA); + } + } + + /* wait for all dispatcher to finish */ + for (i = 0; i < env.workers; i++) { + while (true) { + int ret = pthread_tryjoin_np(dispatcher_threads[i], NULL); + + if (!ret) { + break; + } else if (ret == EBUSY) { + if (env.debug) + fprintf(stderr, "Still waiting for thread %d (test %d).\n", + i, env.worker_current_test[i] + 1); + usleep(1000 * 1000); + continue; + } else { + fprintf(stderr, "Unexpected error joining dispatcher thread: %d", ret); + break; + } + } + } + free(dispatcher_threads); + free(env.worker_current_test); + free(data); + + /* generate summary */ + fflush(stderr); + fflush(stdout); + + for (i = 0; i < prog_test_cnt; i++) { + struct prog_test_def *current_test; + struct test_result *result; + + current_test = &prog_test_defs[i]; + result = &test_results[i]; + + if (!current_test->tested) + continue; + + env.succ_cnt += result->error_cnt ? 0 : 1; + env.skip_cnt += result->skip_cnt; + if (result->error_cnt) + env.fail_cnt++; + env.sub_succ_cnt += result->sub_succ_cnt; + } + + fprintf(stdout, "Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n", + env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt); + + print_all_error_logs(); + + /* reap all workers */ + for (i = 0; i < env.workers; i++) { + int wstatus, pid; + + pid = waitpid(env.worker_pids[i], &wstatus, 0); + if (pid != env.worker_pids[i]) + perror("Unable to reap worker"); + } + + return 0; +} + +static int worker_main(int sock) +{ + save_netns(); + + while (true) { + /* receive command */ + struct msg msg; + + if (recv_message(sock, &msg) < 0) + goto out; + + switch (msg.type) { + case MSG_EXIT: + if (env.debug) + fprintf(stderr, "[%d]: worker exit.\n", + env.worker_id); + goto out; + case MSG_DO_TEST: { + int test_to_run; + struct prog_test_def *test; + struct msg msg_done; + + test_to_run = msg.do_test.test_num; + test = &prog_test_defs[test_to_run]; + + if (env.debug) + fprintf(stderr, "[%d]: #%d:%s running.\n", + env.worker_id, + test_to_run + 1, + test->test_name); + + stdio_hijack(); + + run_one_test(test_to_run); + + stdio_restore(); + + memset(&msg_done, 0, sizeof(msg_done)); + msg_done.type = MSG_TEST_DONE; + msg_done.test_done.test_num = test_to_run; + msg_done.test_done.error_cnt = test->error_cnt; + msg_done.test_done.skip_cnt = test->skip_cnt; + msg_done.test_done.sub_succ_cnt = test->sub_succ_cnt; + msg_done.test_done.have_log = false; + + if (env.verbosity > VERBOSE_NONE || test->force_log || test->error_cnt) { + if (env.log_cnt) + msg_done.test_done.have_log = true; + } + if (send_message(sock, &msg_done) < 0) { + perror("Fail to send message done"); + goto out; + } + + /* send logs */ + if (msg_done.test_done.have_log) { + char *src; + size_t slen; + + src = env.log_buf; + slen = env.log_cnt; + while (slen) { + struct msg msg_log; + char *dest; + size_t len; + + memset(&msg_log, 0, sizeof(msg_log)); + msg_log.type = MSG_TEST_LOG; + dest = msg_log.test_log.log_buf; + len = slen >= MAX_LOG_TRUNK_SIZE ? MAX_LOG_TRUNK_SIZE : slen; + memcpy(dest, src, len); + + src += len; + slen -= len; + if (!slen) + msg_log.test_log.is_last = true; + + assert(send_message(sock, &msg_log) >= 0); + } + } + if (env.log_buf) { + free(env.log_buf); + env.log_buf = NULL; + env.log_cnt = 0; + } + if (env.debug) + fprintf(stderr, "[%d]: #%d:%s done.\n", + env.worker_id, + test_to_run + 1, + test->test_name); + break; + } /* case MSG_DO_TEST */ + default: + if (env.debug) + fprintf(stderr, "[%d]: unknown message.\n", env.worker_id); + return -1; + } + } +out: + return 0; +} + int main(int argc, char **argv) { static const struct argp argp = { @@ -809,7 +1286,7 @@ int main(int argc, char **argv) struct sigaction sigact = { .sa_handler = crash_handler, .sa_flags = SA_RESETHAND, - }; + }; int err, i; sigaction(SIGSEGV, &sigact, NULL); @@ -837,21 +1314,77 @@ int main(int argc, char **argv) return -1; } - save_netns(); - stdio_hijack(); + env.stdout = stdout; + env.stderr = stderr; + env.has_testmod = true; if (!env.list_test_names && load_bpf_testmod()) { fprintf(env.stderr, "WARNING! Selftests relying on bpf_testmod.ko will be skipped.\n"); env.has_testmod = false; } + + /* initializing tests */ for (i = 0; i < prog_test_cnt; i++) { struct prog_test_def *test = &prog_test_defs[i]; - env.test = test; test->test_num = i + 1; - - if (!should_run(&env.test_selector, + if (should_run(&env.test_selector, test->test_num, test->test_name)) + test->should_run = true; + else + test->should_run = false; + } + + /* ignore workers if we are just listing */ + if (env.get_test_cnt || env.list_test_names) + env.workers = 0; + + /* launch workers if requested */ + env.worker_id = -1; /* main process */ + if (env.workers) { + env.worker_pids = calloc(sizeof(__pid_t), env.workers); + env.worker_socks = calloc(sizeof(int), env.workers); + if (env.debug) + fprintf(stdout, "Launching %d workers.\n", env.workers); + for (i = 0; i < env.workers; i++) { + int sv[2]; + pid_t pid; + + if (socketpair(AF_UNIX, SOCK_SEQPACKET | SOCK_CLOEXEC, 0, sv) < 0) { + perror("Fail to create worker socket"); + return -1; + } + pid = fork(); + if (pid < 0) { + perror("Failed to fork worker"); + return -1; + } else if (pid != 0) { /* main process */ + close(sv[1]); + env.worker_pids[i] = pid; + env.worker_socks[i] = sv[0]; + } else { /* inside each worker process */ + close(sv[0]); + env.worker_id = i; + return worker_main(sv[1]); + } + } + + if (env.worker_id == -1) { + server_main(); + goto out; + } + } + + /* The rest of the main process */ + + /* on single mode */ + save_netns(); + + for (i = 0; i < prog_test_cnt; i++) { + struct prog_test_def *test = &prog_test_defs[i]; + struct test_result *result; + + if (!test->should_run) continue; if (env.get_test_cnt) { @@ -865,33 +1398,35 @@ int main(int argc, char **argv) continue; } - test->run_test(); - /* ensure last sub-test is finalized properly */ - if (test->subtest_name) - test__end_subtest(); + stdio_hijack(); - test->tested = true; + run_one_test(i); - dump_test_log(test, test->error_cnt); + stdio_restore(); fprintf(env.stdout, "#%d %s:%s\n", test->test_num, test->test_name, test->error_cnt ? "FAIL" : (test->skip_cnt ? "SKIP" : "OK")); + result = &test_results[i]; + result->error_cnt = test->error_cnt; + if (env.log_buf) { + result->log_buf = strdup(env.log_buf); + result->log_cnt = env.log_cnt; + + free(env.log_buf); + env.log_buf = NULL; + env.log_cnt = 0; + } + if (test->error_cnt) env.fail_cnt++; else env.succ_cnt++; - skip_account(); - reset_affinity(); - restore_netns(); - if (test->need_cgroup_cleanup) - cleanup_cgroup_environment(); + skip_account(); + env.sub_succ_cnt += test->sub_succ_cnt; } - if (!env.list_test_names && env.has_testmod) - unload_bpf_testmod(); - stdio_restore(); if (env.get_test_cnt) { printf("%d\n", env.succ_cnt); @@ -904,14 +1439,18 @@ int main(int argc, char **argv) fprintf(stdout, "Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n", env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt); + print_all_error_logs(); + + close(env.saved_netns_fd); out: + if (!env.list_test_names && env.has_testmod) + unload_bpf_testmod(); free_str_set(&env.test_selector.blacklist); free_str_set(&env.test_selector.whitelist); free(env.test_selector.num_set); free_str_set(&env.subtest_selector.blacklist); free_str_set(&env.subtest_selector.whitelist); free(env.subtest_selector.num_set); - close(env.saved_netns_fd); if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0) return EXIT_NO_TEST; diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index 9b8a1810b700..93c1ff705533 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -62,6 +62,7 @@ struct test_env { struct test_selector test_selector; struct test_selector subtest_selector; bool verifier_stats; + bool debug; enum verbosity verbosity; bool jit_enabled; @@ -69,7 +70,8 @@ struct test_env { bool get_test_cnt; bool list_test_names; - struct prog_test_def *test; + struct prog_test_def *test; /* current running tests */ + FILE *stdout; FILE *stderr; char *log_buf; @@ -82,6 +84,38 @@ struct test_env { int skip_cnt; /* skipped tests */ int saved_netns_fd; + int workers; /* number of worker process */ + int worker_id; /* id number of current worker, main process is -1 */ + pid_t *worker_pids; /* array of worker pids */ + int *worker_socks; /* array of worker socks */ + int *worker_current_test; /* array of current running test for each worker */ +}; + +#define MAX_LOG_TRUNK_SIZE 8192 +enum msg_type { + MSG_DO_TEST = 0, + MSG_TEST_DONE = 1, + MSG_TEST_LOG = 2, + MSG_EXIT = 255, +}; +struct msg { + enum msg_type type; + union { + struct { + int test_num; + } do_test; + struct { + int test_num; + int sub_succ_cnt; + int error_cnt; + int skip_cnt; + bool have_log; + } test_done; + struct { + char log_buf[MAX_LOG_TRUNK_SIZE + 1]; + bool is_last; + } test_log; + }; }; extern struct test_env env; -- cgit v1.2.3 From 6587ff58cea4a7f252b8bbc4031a1bf4ec3781d8 Mon Sep 17 00:00:00 2001 From: Yucong Sun Date: Wed, 6 Oct 2021 11:56:07 -0700 Subject: selftests/bpf: Allow some tests to be executed in sequence This patch allows tests to define serial_test_name() instead of test_name(), and this will make test_progs execute those in sequence after all other tests finished executing concurrently. Signed-off-by: Yucong Sun Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211006185619.364369-3-fallentree@fb.com --- tools/testing/selftests/bpf/test_progs.c | 60 ++++++++++++++++++++++++++++---- 1 file changed, 54 insertions(+), 6 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 20dd1e2f8d06..1f4a48566991 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -50,6 +50,7 @@ struct prog_test_def { const char *test_name; int test_num; void (*run_test)(void); + void (*run_serial_test)(void); bool force_log; int error_cnt; int skip_cnt; @@ -457,14 +458,17 @@ static int load_bpf_testmod(void) } /* extern declarations for test funcs */ -#define DEFINE_TEST(name) extern void test_##name(void); +#define DEFINE_TEST(name) \ + extern void test_##name(void) __weak; \ + extern void serial_test_##name(void) __weak; #include #undef DEFINE_TEST static struct prog_test_def prog_test_defs[] = { -#define DEFINE_TEST(name) { \ - .test_name = #name, \ - .run_test = &test_##name, \ +#define DEFINE_TEST(name) { \ + .test_name = #name, \ + .run_test = &test_##name, \ + .run_serial_test = &serial_test_##name, \ }, #include #undef DEFINE_TEST @@ -907,7 +911,10 @@ static void run_one_test(int test_num) env.test = test; - test->run_test(); + if (test->run_test) + test->run_test(); + else if (test->run_serial_test) + test->run_serial_test(); /* ensure last sub-test is finalized properly */ if (test->subtest_name) @@ -957,7 +964,7 @@ static void *dispatch_thread(void *ctx) pthread_mutex_unlock(¤t_test_lock); } - if (!test->should_run) + if (!test->should_run || test->run_serial_test) continue; /* run test through worker */ @@ -1136,6 +1143,40 @@ static int server_main(void) free(env.worker_current_test); free(data); + /* run serial tests */ + save_netns(); + + for (int i = 0; i < prog_test_cnt; i++) { + struct prog_test_def *test = &prog_test_defs[i]; + struct test_result *result = &test_results[i]; + + if (!test->should_run || !test->run_serial_test) + continue; + + stdio_hijack(); + + run_one_test(i); + + stdio_restore(); + if (env.log_buf) { + result->log_cnt = env.log_cnt; + result->log_buf = strdup(env.log_buf); + + free(env.log_buf); + env.log_buf = NULL; + env.log_cnt = 0; + } + restore_netns(); + + fprintf(stdout, "#%d %s:%s\n", + test->test_num, test->test_name, + test->error_cnt ? "FAIL" : (test->skip_cnt ? "SKIP" : "OK")); + + result->error_cnt = test->error_cnt; + result->skip_cnt = test->skip_cnt; + result->sub_succ_cnt = test->sub_succ_cnt; + } + /* generate summary */ fflush(stderr); fflush(stdout); @@ -1333,6 +1374,13 @@ int main(int argc, char **argv) test->should_run = true; else test->should_run = false; + + if ((test->run_test == NULL && test->run_serial_test == NULL) || + (test->run_test != NULL && test->run_serial_test != NULL)) { + fprintf(stderr, "Test %d:%s must have either test_%s() or serial_test_%sl() defined.\n", + test->test_num, test->test_name, test->test_name, test->test_name); + exit(EXIT_ERR_SETUP_INFRA); + } } /* ignore workers if we are just listing */ -- cgit v1.2.3 From e87c3434f81ae566693cfdc22370dc938b2989dd Mon Sep 17 00:00:00 2001 From: Yucong Sun Date: Wed, 6 Oct 2021 11:56:09 -0700 Subject: selftests/bpf: Add per worker cgroup suffix This patch make each worker use a unique cgroup base directory, thus allowing tests that uses cgroups to run concurrently. Signed-off-by: Yucong Sun Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211006185619.364369-5-fallentree@fb.com --- tools/testing/selftests/bpf/cgroup_helpers.c | 6 +++--- tools/testing/selftests/bpf/cgroup_helpers.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c index f3daa44a8266..8fcd44841bb2 100644 --- a/tools/testing/selftests/bpf/cgroup_helpers.c +++ b/tools/testing/selftests/bpf/cgroup_helpers.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "cgroup_helpers.h" @@ -33,10 +34,9 @@ #define CGROUP_MOUNT_DFLT "/sys/fs/cgroup" #define NETCLS_MOUNT_PATH CGROUP_MOUNT_DFLT "/net_cls" #define CGROUP_WORK_DIR "/cgroup-test-work-dir" - #define format_cgroup_path(buf, path) \ - snprintf(buf, sizeof(buf), "%s%s%s", CGROUP_MOUNT_PATH, \ - CGROUP_WORK_DIR, path) + snprintf(buf, sizeof(buf), "%s%s%d%s", CGROUP_MOUNT_PATH, \ + CGROUP_WORK_DIR, getpid(), path) #define format_classid_path(buf) \ snprintf(buf, sizeof(buf), "%s%s", NETCLS_MOUNT_PATH, \ diff --git a/tools/testing/selftests/bpf/cgroup_helpers.h b/tools/testing/selftests/bpf/cgroup_helpers.h index 629da3854b3e..fcc9cb91b211 100644 --- a/tools/testing/selftests/bpf/cgroup_helpers.h +++ b/tools/testing/selftests/bpf/cgroup_helpers.h @@ -26,4 +26,4 @@ int join_classid(void); int setup_classid_environment(void); void cleanup_classid_environment(void); -#endif /* __CGROUP_HELPERS_H */ +#endif /* __CGROUP_HELPERS_H */ \ No newline at end of file -- cgit v1.2.3 From d719de0d2f3cbdb5890a147b90a51c5eaaef103e Mon Sep 17 00:00:00 2001 From: Yucong Sun Date: Wed, 6 Oct 2021 11:56:11 -0700 Subject: selftests/bpf: Fix race condition in enable_stats In parallel execution mode, this test now need to use atomic operation to avoid race condition. Signed-off-by: Yucong Sun Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211006185619.364369-7-fallentree@fb.com --- tools/testing/selftests/bpf/progs/test_enable_stats.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/test_enable_stats.c b/tools/testing/selftests/bpf/progs/test_enable_stats.c index 01a002ade529..1705097d01d7 100644 --- a/tools/testing/selftests/bpf/progs/test_enable_stats.c +++ b/tools/testing/selftests/bpf/progs/test_enable_stats.c @@ -13,6 +13,6 @@ __u64 count = 0; SEC("raw_tracepoint/sys_enter") int test_enable_stats(void *ctx) { - count += 1; + __sync_fetch_and_add(&count, 1); return 0; } -- cgit v1.2.3 From 445e72c782a1f770440e229afeb0c4e386da943c Mon Sep 17 00:00:00 2001 From: Yucong Sun Date: Wed, 6 Oct 2021 11:56:12 -0700 Subject: selftests/bpf: Make cgroup_v1v2 use its own port This patch change cgroup_v1v2 use a different port, avoid conflict with other tests. Signed-off-by: Yucong Sun Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211006185619.364369-8-fallentree@fb.com --- tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c | 2 +- tools/testing/selftests/bpf/progs/connect4_dropper.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c index ab3b9bc5e6d1..9026b42914d3 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c @@ -46,7 +46,7 @@ void test_cgroup_v1v2(void) { struct network_helper_opts opts = {}; int server_fd, client_fd, cgroup_fd; - static const int port = 60123; + static const int port = 60120; /* Step 1: Check base connectivity works without any BPF. */ server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0); diff --git a/tools/testing/selftests/bpf/progs/connect4_dropper.c b/tools/testing/selftests/bpf/progs/connect4_dropper.c index b565d997810a..d3f4c5e4fb69 100644 --- a/tools/testing/selftests/bpf/progs/connect4_dropper.c +++ b/tools/testing/selftests/bpf/progs/connect4_dropper.c @@ -18,7 +18,7 @@ int connect_v4_dropper(struct bpf_sock_addr *ctx) { if (ctx->type != SOCK_STREAM) return VERDICT_PROCEED; - if (ctx->user_port == bpf_htons(60123)) + if (ctx->user_port == bpf_htons(60120)) return VERDICT_REJECT; return VERDICT_PROCEED; } -- cgit v1.2.3 From 0f4feacc9155776fd2c7f1c7bcb41001d94990a0 Mon Sep 17 00:00:00 2001 From: Yucong Sun Date: Wed, 6 Oct 2021 11:56:15 -0700 Subject: selftests/bpf: Adding pid filtering for atomics test This make atomics test able to run in parallel with other tests. Signed-off-by: Yucong Sun Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211006185619.364369-11-fallentree@fb.com --- tools/testing/selftests/bpf/prog_tests/atomics.c | 1 + tools/testing/selftests/bpf/progs/atomics.c | 16 ++++++++++++++++ 2 files changed, 17 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/atomics.c b/tools/testing/selftests/bpf/prog_tests/atomics.c index ba0e1efe5a45..1486be5d3209 100644 --- a/tools/testing/selftests/bpf/prog_tests/atomics.c +++ b/tools/testing/selftests/bpf/prog_tests/atomics.c @@ -225,6 +225,7 @@ void test_atomics(void) test__skip(); goto cleanup; } + skel->bss->pid = getpid(); if (test__start_subtest("add")) test_add(skel); diff --git a/tools/testing/selftests/bpf/progs/atomics.c b/tools/testing/selftests/bpf/progs/atomics.c index c245345e41ca..16e57313204a 100644 --- a/tools/testing/selftests/bpf/progs/atomics.c +++ b/tools/testing/selftests/bpf/progs/atomics.c @@ -10,6 +10,8 @@ bool skip_tests __attribute((__section__(".data"))) = false; bool skip_tests = true; #endif +__u32 pid = 0; + __u64 add64_value = 1; __u64 add64_result = 0; __u32 add32_value = 1; @@ -21,6 +23,8 @@ __u64 add_noreturn_value = 1; SEC("fentry/bpf_fentry_test1") int BPF_PROG(add, int a) { + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; #ifdef ENABLE_ATOMICS_TESTS __u64 add_stack_value = 1; @@ -45,6 +49,8 @@ __s64 sub_noreturn_value = 1; SEC("fentry/bpf_fentry_test1") int BPF_PROG(sub, int a) { + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; #ifdef ENABLE_ATOMICS_TESTS __u64 sub_stack_value = 1; @@ -67,6 +73,8 @@ __u64 and_noreturn_value = (0x110ull << 32); SEC("fentry/bpf_fentry_test1") int BPF_PROG(and, int a) { + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; #ifdef ENABLE_ATOMICS_TESTS and64_result = __sync_fetch_and_and(&and64_value, 0x011ull << 32); @@ -86,6 +94,8 @@ __u64 or_noreturn_value = (0x110ull << 32); SEC("fentry/bpf_fentry_test1") int BPF_PROG(or, int a) { + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; #ifdef ENABLE_ATOMICS_TESTS or64_result = __sync_fetch_and_or(&or64_value, 0x011ull << 32); or32_result = __sync_fetch_and_or(&or32_value, 0x011); @@ -104,6 +114,8 @@ __u64 xor_noreturn_value = (0x110ull << 32); SEC("fentry/bpf_fentry_test1") int BPF_PROG(xor, int a) { + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; #ifdef ENABLE_ATOMICS_TESTS xor64_result = __sync_fetch_and_xor(&xor64_value, 0x011ull << 32); xor32_result = __sync_fetch_and_xor(&xor32_value, 0x011); @@ -123,6 +135,8 @@ __u32 cmpxchg32_result_succeed = 0; SEC("fentry/bpf_fentry_test1") int BPF_PROG(cmpxchg, int a) { + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; #ifdef ENABLE_ATOMICS_TESTS cmpxchg64_result_fail = __sync_val_compare_and_swap(&cmpxchg64_value, 0, 3); cmpxchg64_result_succeed = __sync_val_compare_and_swap(&cmpxchg64_value, 1, 2); @@ -142,6 +156,8 @@ __u32 xchg32_result = 0; SEC("fentry/bpf_fentry_test1") int BPF_PROG(xchg, int a) { + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; #ifdef ENABLE_ATOMICS_TESTS __u64 val64 = 2; __u32 val32 = 2; -- cgit v1.2.3 From 5db02dd7f09fdc32fa3866386784d383aca191d8 Mon Sep 17 00:00:00 2001 From: Yucong Sun Date: Wed, 6 Oct 2021 11:56:17 -0700 Subject: selftests/bpf: Fix pid check in fexit_sleep test bpf_get_current_pid_tgid() returns u64, whose upper 32 bits are the same as userspace getpid() return value. Signed-off-by: Yucong Sun Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211006185619.364369-13-fallentree@fb.com --- tools/testing/selftests/bpf/progs/fexit_sleep.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/fexit_sleep.c b/tools/testing/selftests/bpf/progs/fexit_sleep.c index 03a672d76353..bca92c9bd29a 100644 --- a/tools/testing/selftests/bpf/progs/fexit_sleep.c +++ b/tools/testing/selftests/bpf/progs/fexit_sleep.c @@ -13,7 +13,7 @@ int fexit_cnt = 0; SEC("fentry/__x64_sys_nanosleep") int BPF_PROG(nanosleep_fentry, const struct pt_regs *regs) { - if ((int)bpf_get_current_pid_tgid() != pid) + if (bpf_get_current_pid_tgid() >> 32 != pid) return 0; fentry_cnt++; @@ -23,7 +23,7 @@ int BPF_PROG(nanosleep_fentry, const struct pt_regs *regs) SEC("fexit/__x64_sys_nanosleep") int BPF_PROG(nanosleep_fexit, const struct pt_regs *regs, int ret) { - if ((int)bpf_get_current_pid_tgid() != pid) + if (bpf_get_current_pid_tgid() >> 32 != pid) return 0; fexit_cnt++; -- cgit v1.2.3 From d3f7b1664d3ebd69751327f45f5cd4adfb29f620 Mon Sep 17 00:00:00 2001 From: Yucong Sun Date: Wed, 6 Oct 2021 11:56:19 -0700 Subject: selfetest/bpf: Make some tests serial Change tests that often fails in parallel execution mode to serial. Signed-off-by: Yucong Sun Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211006185619.364369-15-fallentree@fb.com --- tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c | 2 +- tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c | 2 +- tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c | 2 +- tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c | 2 +- tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c | 2 +- tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c | 2 +- tools/testing/selftests/bpf/prog_tests/cgroup_link.c | 2 +- tools/testing/selftests/bpf/prog_tests/check_mtu.c | 2 +- tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c | 3 ++- tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c | 2 +- tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c | 2 +- tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c | 2 +- tools/testing/selftests/bpf/prog_tests/kfree_skb.c | 3 ++- tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c | 2 +- tools/testing/selftests/bpf/prog_tests/modify_return.c | 3 ++- tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c | 3 ++- tools/testing/selftests/bpf/prog_tests/perf_buffer.c | 2 +- tools/testing/selftests/bpf/prog_tests/perf_link.c | 3 ++- tools/testing/selftests/bpf/prog_tests/probe_user.c | 3 ++- tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c | 3 ++- tools/testing/selftests/bpf/prog_tests/select_reuseport.c | 2 +- tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c | 3 ++- tools/testing/selftests/bpf/prog_tests/sk_storage_tracing.c | 2 +- tools/testing/selftests/bpf/prog_tests/snprintf_btf.c | 2 +- tools/testing/selftests/bpf/prog_tests/sock_fields.c | 2 +- tools/testing/selftests/bpf/prog_tests/sockmap_listen.c | 2 +- tools/testing/selftests/bpf/prog_tests/timer.c | 3 ++- tools/testing/selftests/bpf/prog_tests/timer_mim.c | 2 +- tools/testing/selftests/bpf/prog_tests/tp_attach_query.c | 2 +- tools/testing/selftests/bpf/prog_tests/trace_printk.c | 2 +- tools/testing/selftests/bpf/prog_tests/trace_vprintk.c | 2 +- tools/testing/selftests/bpf/prog_tests/trampoline_count.c | 3 ++- tools/testing/selftests/bpf/prog_tests/xdp_attach.c | 2 +- tools/testing/selftests/bpf/prog_tests/xdp_bonding.c | 2 +- tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c | 2 +- tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c | 2 +- tools/testing/selftests/bpf/prog_tests/xdp_info.c | 2 +- tools/testing/selftests/bpf/prog_tests/xdp_link.c | 2 +- 38 files changed, 48 insertions(+), 38 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c index 85babb0487b3..b52ff8ce34db 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c @@ -179,7 +179,7 @@ done: free_fds(est_fds, nr_est); } -void test_bpf_iter_setsockopt(void) +void serial_test_bpf_iter_setsockopt(void) { struct bpf_iter_setsockopt *iter_skel = NULL; struct bpf_cubic *cubic_skel = NULL; diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c index 284d5921c345..eb8eeebe6935 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c @@ -3,7 +3,7 @@ #define nr_iters 2 -void test_bpf_obj_id(void) +void serial_test_bpf_obj_id(void) { const __u64 array_magic_value = 0xfaceb00c; const __u32 array_key = 0; diff --git a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c index 876be0ecb654..621c57222191 100644 --- a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c @@ -363,7 +363,7 @@ close_bpf_object: cg_storage_multi_shared__destroy(obj); } -void test_cg_storage_multi(void) +void serial_test_cg_storage_multi(void) { int parent_cgroup_fd = -1, child_cgroup_fd = -1; diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c index 70e94e783070..5de485c7370f 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c @@ -21,7 +21,7 @@ static int prog_load(void) bpf_log_buf, BPF_LOG_BUF_SIZE); } -void test_cgroup_attach_autodetach(void) +void serial_test_cgroup_attach_autodetach(void) { __u32 duration = 0, prog_cnt = 4, attach_flags; int allow_prog[2] = {-1}; diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c index 20bb8831dda6..731bea84d8ed 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c @@ -74,7 +74,7 @@ static int prog_load_cnt(int verdict, int val) return ret; } -void test_cgroup_attach_multi(void) +void serial_test_cgroup_attach_multi(void) { __u32 prog_ids[4], prog_cnt = 0, attach_flags, saved_prog_id; int cg1 = 0, cg2 = 0, cg3 = 0, cg4 = 0, cg5 = 0, key = 0; diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c index 9e96f8d87fea..10d3c33821a7 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c @@ -23,7 +23,7 @@ static int prog_load(int verdict) bpf_log_buf, BPF_LOG_BUF_SIZE); } -void test_cgroup_attach_override(void) +void serial_test_cgroup_attach_override(void) { int drop_prog = -1, allow_prog = -1, foo = -1, bar = -1; __u32 duration = 0; diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_link.c b/tools/testing/selftests/bpf/prog_tests/cgroup_link.c index 9091524131d6..9e6e6aad347c 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_link.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_link.c @@ -24,7 +24,7 @@ int ping_and_check(int exp_calls, int exp_alt_calls) return 0; } -void test_cgroup_link(void) +void serial_test_cgroup_link(void) { struct { const char *path; diff --git a/tools/testing/selftests/bpf/prog_tests/check_mtu.c b/tools/testing/selftests/bpf/prog_tests/check_mtu.c index 012068f33a0a..f73e6e36b74d 100644 --- a/tools/testing/selftests/bpf/prog_tests/check_mtu.c +++ b/tools/testing/selftests/bpf/prog_tests/check_mtu.c @@ -195,7 +195,7 @@ cleanup: test_check_mtu__destroy(skel); } -void test_check_mtu(void) +void serial_test_check_mtu(void) { __u32 mtu_lo; diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c index 2839f4270a26..9cff14a23bb7 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c @@ -380,7 +380,8 @@ static void test_func_map_prog_compatibility(void) "./test_attach_probe.o"); } -void test_fexit_bpf2bpf(void) +/* NOTE: affect other tests, must run in serial mode */ +void serial_test_fexit_bpf2bpf(void) { if (test__start_subtest("target_no_callees")) test_target_no_callees(); diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c index 0e8a4d2f023d..6093728497c7 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c @@ -2,7 +2,7 @@ #include #include -void test_flow_dissector_load_bytes(void) +void serial_test_flow_dissector_load_bytes(void) { struct bpf_flow_keys flow_keys; __u32 duration = 0, retval, size; diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c index 3931ede5c534..f0c6c226aba8 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c @@ -628,7 +628,7 @@ out_close: } } -void test_flow_dissector_reattach(void) +void serial_test_flow_dissector_reattach(void) { int err, new_net, saved_net; diff --git a/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c b/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c index e4f92feb7b32..d6d70a359aeb 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c +++ b/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c @@ -73,7 +73,7 @@ static void close_perf_events(void) free(pfd_array); } -void test_get_branch_snapshot(void) +void serial_test_get_branch_snapshot(void) { struct get_branch_snapshot *skel = NULL; int err; diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c index ddfb6bf97152..032a322d51f2 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c +++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c @@ -48,7 +48,8 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size) *(bool *)ctx = true; } -void test_kfree_skb(void) +/* TODO: fix kernel panic caused by this test in parallel mode */ +void serial_test_kfree_skb(void) { struct __sk_buff skb = {}; struct bpf_prog_test_run_attr tattr = { diff --git a/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c b/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c index 59adb4715394..7589c03fd26b 100644 --- a/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c +++ b/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c @@ -541,7 +541,7 @@ close_servers: } } -void test_migrate_reuseport(void) +void serial_test_migrate_reuseport(void) { struct test_migrate_reuseport *skel; int i; diff --git a/tools/testing/selftests/bpf/prog_tests/modify_return.c b/tools/testing/selftests/bpf/prog_tests/modify_return.c index 97fec70c600b..b772fe30ce9b 100644 --- a/tools/testing/selftests/bpf/prog_tests/modify_return.c +++ b/tools/testing/selftests/bpf/prog_tests/modify_return.c @@ -53,7 +53,8 @@ cleanup: modify_return__destroy(skel); } -void test_modify_return(void) +/* TODO: conflict with get_func_ip_test */ +void serial_test_modify_return(void) { run_test(0 /* input_retval */, 1 /* want_side_effect */, diff --git a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c index 2535788e135f..24d493482ffc 100644 --- a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c +++ b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c @@ -78,7 +78,8 @@ static void test_ns_current_pid_tgid_new_ns(void) return; } -void test_ns_current_pid_tgid(void) +/* TODO: use a different tracepoint */ +void serial_test_ns_current_pid_tgid(void) { if (test__start_subtest("ns_current_pid_tgid_root_ns")) test_current_pid_tgid(NULL); diff --git a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c index 6490e9673002..6979aff4aab2 100644 --- a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c +++ b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c @@ -43,7 +43,7 @@ int trigger_on_cpu(int cpu) return 0; } -void test_perf_buffer(void) +void serial_test_perf_buffer(void) { int err, on_len, nr_on_cpus = 0, nr_cpus, i; struct perf_buffer_opts pb_opts = {}; diff --git a/tools/testing/selftests/bpf/prog_tests/perf_link.c b/tools/testing/selftests/bpf/prog_tests/perf_link.c index b1abd0c46607..ede07344f264 100644 --- a/tools/testing/selftests/bpf/prog_tests/perf_link.c +++ b/tools/testing/selftests/bpf/prog_tests/perf_link.c @@ -23,7 +23,8 @@ static void burn_cpu(void) ++j; } -void test_perf_link(void) +/* TODO: often fails in concurrent mode */ +void serial_test_perf_link(void) { struct test_perf_link *skel = NULL; struct perf_event_attr attr; diff --git a/tools/testing/selftests/bpf/prog_tests/probe_user.c b/tools/testing/selftests/bpf/prog_tests/probe_user.c index 52fe157e2a90..abf890d066eb 100644 --- a/tools/testing/selftests/bpf/prog_tests/probe_user.c +++ b/tools/testing/selftests/bpf/prog_tests/probe_user.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 #include -void test_probe_user(void) +/* TODO: corrupts other tests uses connect() */ +void serial_test_probe_user(void) { const char *prog_name = "handle_sys_connect"; const char *obj_file = "./test_probe_user.o"; diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c index 5c45424cac5f..ddefa1192e5d 100644 --- a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c +++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c @@ -3,7 +3,8 @@ #include #include -void test_raw_tp_writable_test_run(void) +/* NOTE: conflict with other tests. */ +void serial_test_raw_tp_writable_test_run(void) { __u32 duration = 0; char error[4096]; diff --git a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c index d40e9156c48d..3cfc910ab3c1 100644 --- a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c +++ b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c @@ -858,7 +858,7 @@ out: cleanup(); } -void test_select_reuseport(void) +void serial_test_select_reuseport(void) { saved_tcp_fo = read_int_sysctl(TCP_FO_SYSCTL); if (saved_tcp_fo < 0) diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c b/tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c index 189a34a7addb..15dacfcfaa6d 100644 --- a/tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c +++ b/tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c @@ -25,7 +25,8 @@ static void *worker(void *p) return NULL; } -void test_send_signal_sched_switch(void) +/* NOTE: cause events loss */ +void serial_test_send_signal_sched_switch(void) { struct test_send_signal_kern *skel; pthread_t threads[THREAD_COUNT]; diff --git a/tools/testing/selftests/bpf/prog_tests/sk_storage_tracing.c b/tools/testing/selftests/bpf/prog_tests/sk_storage_tracing.c index 2b392590e8ca..547ae53cde74 100644 --- a/tools/testing/selftests/bpf/prog_tests/sk_storage_tracing.c +++ b/tools/testing/selftests/bpf/prog_tests/sk_storage_tracing.c @@ -105,7 +105,7 @@ out: close(listen_fd); } -void test_sk_storage_tracing(void) +void serial_test_sk_storage_tracing(void) { struct test_sk_storage_trace_itself *skel_itself; int err; diff --git a/tools/testing/selftests/bpf/prog_tests/snprintf_btf.c b/tools/testing/selftests/bpf/prog_tests/snprintf_btf.c index 76e1f5fe18fa..dd41b826be30 100644 --- a/tools/testing/selftests/bpf/prog_tests/snprintf_btf.c +++ b/tools/testing/selftests/bpf/prog_tests/snprintf_btf.c @@ -6,7 +6,7 @@ /* Demonstrate that bpf_snprintf_btf succeeds and that various data types * are formatted correctly. */ -void test_snprintf_btf(void) +void serial_test_snprintf_btf(void) { struct netif_receive_skb *skel; struct netif_receive_skb__bss *bss; diff --git a/tools/testing/selftests/bpf/prog_tests/sock_fields.c b/tools/testing/selftests/bpf/prog_tests/sock_fields.c index 577d619fb07e..fae40db4d81f 100644 --- a/tools/testing/selftests/bpf/prog_tests/sock_fields.c +++ b/tools/testing/selftests/bpf/prog_tests/sock_fields.c @@ -329,7 +329,7 @@ done: close(listen_fd); } -void test_sock_fields(void) +void serial_test_sock_fields(void) { struct bpf_link *egress_link = NULL, *ingress_link = NULL; int parent_cg_fd = -1, child_cg_fd = -1; diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c index 5c5979046523..102c73a00402 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c @@ -2037,7 +2037,7 @@ static void run_tests(struct test_sockmap_listen *skel, struct bpf_map *map, test_udp_unix_redir(skel, map, family); } -void test_sockmap_listen(void) +void serial_test_sockmap_listen(void) { struct test_sockmap_listen *skel; diff --git a/tools/testing/selftests/bpf/prog_tests/timer.c b/tools/testing/selftests/bpf/prog_tests/timer.c index 25f40e1b9967..0f4e49e622cd 100644 --- a/tools/testing/selftests/bpf/prog_tests/timer.c +++ b/tools/testing/selftests/bpf/prog_tests/timer.c @@ -39,7 +39,8 @@ static int timer(struct timer *timer_skel) return 0; } -void test_timer(void) +/* TODO: use pid filtering */ +void serial_test_timer(void) { struct timer *timer_skel = NULL; int err; diff --git a/tools/testing/selftests/bpf/prog_tests/timer_mim.c b/tools/testing/selftests/bpf/prog_tests/timer_mim.c index ced8f6cf347c..949a0617869d 100644 --- a/tools/testing/selftests/bpf/prog_tests/timer_mim.c +++ b/tools/testing/selftests/bpf/prog_tests/timer_mim.c @@ -52,7 +52,7 @@ static int timer_mim(struct timer_mim *timer_skel) return 0; } -void test_timer_mim(void) +void serial_test_timer_mim(void) { struct timer_mim_reject *timer_reject_skel = NULL; libbpf_print_fn_t old_print_fn = NULL; diff --git a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c index fb095e5cd9af..8652d0a46c87 100644 --- a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c +++ b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include -void test_tp_attach_query(void) +void serial_test_tp_attach_query(void) { const int num_progs = 3; int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs]; diff --git a/tools/testing/selftests/bpf/prog_tests/trace_printk.c b/tools/testing/selftests/bpf/prog_tests/trace_printk.c index e47835f0a674..3f7a7141265e 100644 --- a/tools/testing/selftests/bpf/prog_tests/trace_printk.c +++ b/tools/testing/selftests/bpf/prog_tests/trace_printk.c @@ -8,7 +8,7 @@ #define TRACEBUF "/sys/kernel/debug/tracing/trace_pipe" #define SEARCHMSG "testing,testing" -void test_trace_printk(void) +void serial_test_trace_printk(void) { int err = 0, iter = 0, found = 0; struct trace_printk__bss *bss; diff --git a/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c b/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c index 61a24e62e1a0..46101270cb1a 100644 --- a/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c +++ b/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c @@ -8,7 +8,7 @@ #define TRACEBUF "/sys/kernel/debug/tracing/trace_pipe" #define SEARCHMSG "1,2,3,4,5,6,7,8,9,10" -void test_trace_vprintk(void) +void serial_test_trace_vprintk(void) { int err = 0, iter = 0, found = 0; struct trace_vprintk__bss *bss; diff --git a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c index d7f5a931d7f3..fc146671b20a 100644 --- a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c +++ b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c @@ -41,7 +41,8 @@ static struct bpf_link *load(struct bpf_object *obj, const char *name) return bpf_program__attach_trace(prog); } -void test_trampoline_count(void) +/* TODO: use different target function to run in concurrent mode */ +void serial_test_trampoline_count(void) { const char *fentry_name = "fentry/__set_task_comm"; const char *fexit_name = "fexit/__set_task_comm"; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c index 15ef3531483e..4c4057262cd8 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c @@ -4,7 +4,7 @@ #define IFINDEX_LO 1 #define XDP_FLAGS_REPLACE (1U << 4) -void test_xdp_attach(void) +void serial_test_xdp_attach(void) { __u32 duration = 0, id1, id2, id0 = 0, len; struct bpf_object *obj1, *obj2, *obj3; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c index ad3ba81b4048..faa22b84f2ee 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c @@ -519,7 +519,7 @@ static struct bond_test_case bond_test_cases[] = { { "xdp_bonding_xor_layer34", BOND_MODE_XOR, BOND_XMIT_POLICY_LAYER34, }, }; -void test_xdp_bonding(void) +void serial_test_xdp_bonding(void) { libbpf_print_fn_t old_print_fn; struct skeletons skeletons = {}; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c index 8755effd80b0..fd812bd43600 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c @@ -7,7 +7,7 @@ #define IFINDEX_LO 1 -void test_xdp_cpumap_attach(void) +void serial_test_xdp_cpumap_attach(void) { struct test_xdp_with_cpumap_helpers *skel; struct bpf_prog_info info = {}; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c index c72af030ff10..d4e9a9972a67 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c @@ -72,7 +72,7 @@ void test_neg_xdp_devmap_helpers(void) } -void test_xdp_devmap_attach(void) +void serial_test_xdp_devmap_attach(void) { if (test__start_subtest("DEVMAP with programs in entries")) test_xdp_with_devmap_helpers(); diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_info.c b/tools/testing/selftests/bpf/prog_tests/xdp_info.c index d2d7a283d72f..4e2a4fd56f67 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_info.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_info.c @@ -4,7 +4,7 @@ #define IFINDEX_LO 1 -void test_xdp_info(void) +void serial_test_xdp_info(void) { __u32 len = sizeof(struct bpf_prog_info), duration = 0, prog_id; const char *file = "./xdp_dummy.o"; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_link.c b/tools/testing/selftests/bpf/prog_tests/xdp_link.c index 46eed0a33c23..983ab0b47d30 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_link.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_link.c @@ -6,7 +6,7 @@ #define IFINDEX_LO 1 -void test_xdp_link(void) +void serial_test_xdp_link(void) { __u32 duration = 0, id1, id2, id0 = 0, prog_fd1, prog_fd2, err; DECLARE_LIBBPF_OPTS(bpf_xdp_set_link_opts, opts, .old_fd = -1); -- cgit v1.2.3 From 5319255b8df9271474bc9027cabf82253934f28d Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Thu, 7 Oct 2021 19:33:28 +0200 Subject: selftests/bpf: Skip verifier tests that fail to load with ENOTSUPP The verifier tests added in commit c48e51c8b07a ("bpf: selftests: Add selftests for module kfunc support") fail on s390, since the JIT does not support calling kernel functions. This is most likely an issue for all the other non-Intel arches, as well as on Intel with !CONFIG_DEBUG_INFO_BTF or !CONFIG_BPF_JIT. Trying to check for messages from all the possible add_kfunc_call() failure cases in test_verifier looks pointless, so do a much simpler thing instead: just like it's already done in do_prog_test_run(), skip the tests that fail to load with ENOTSUPP. Signed-off-by: Ilya Leoshkevich Signed-off-by: Andrii Nakryiko Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20211007173329.381754-1-iii@linux.ibm.com --- tools/testing/selftests/bpf/test_verifier.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 3a9e332c5e36..25afe423b3f0 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -47,6 +47,10 @@ #include "test_btf.h" #include "../../../include/linux/filter.h" +#ifndef ENOTSUPP +#define ENOTSUPP 524 +#endif + #define MAX_INSNS BPF_MAXINSNS #define MAX_TEST_INSNS 1000000 #define MAX_FIXUPS 8 @@ -974,7 +978,7 @@ static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val, if (err) { switch (saved_errno) { - case 524/*ENOTSUPP*/: + case ENOTSUPP: printf("Did not run the program (not supported) "); return 0; case EPERM: @@ -1119,6 +1123,12 @@ static void do_test_single(struct bpf_test *test, bool unpriv, goto close_fds; } + if (fd_prog < 0 && saved_errno == ENOTSUPP) { + printf("SKIP (program uses an unsupported feature)\n"); + skips++; + goto close_fds; + } + alignment_prevented_execution = 0; if (expected_ret == ACCEPT || expected_ret == VERBOSE_ACCEPT) { -- cgit v1.2.3 From 223f903e9c832699f4e5f422281a60756c1c6cfe Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 12 Oct 2021 09:48:38 -0700 Subject: bpf: Rename BTF_KIND_TAG to BTF_KIND_DECL_TAG Patch set [1] introduced BTF_KIND_TAG to allow tagging declarations for struct/union, struct/union field, var, func and func arguments and these tags will be encoded into dwarf. They are also encoded to btf by llvm for the bpf target. After BTF_KIND_TAG is introduced, we intended to use it for kernel __user attributes. But kernel __user is actually a type attribute. Upstream and internal discussion showed it is not a good idea to mix declaration attribute and type attribute. So we proposed to introduce btf_type_tag as a type attribute and existing btf_tag renamed to btf_decl_tag ([2]). This patch renamed BTF_KIND_TAG to BTF_KIND_DECL_TAG and some other declarations with *_tag to *_decl_tag to make it clear the tag is for declaration. In the future, BTF_KIND_TYPE_TAG might be introduced per [3]. [1] https://lore.kernel.org/bpf/20210914223004.244411-1-yhs@fb.com/ [2] https://reviews.llvm.org/D111588 [3] https://reviews.llvm.org/D111199 Fixes: b5ea834dde6b ("bpf: Support for new btf kind BTF_KIND_TAG") Fixes: 5b84bd10363e ("libbpf: Add support for BTF_KIND_TAG") Fixes: 5c07f2fec003 ("bpftool: Add support for BTF_KIND_TAG") Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211012164838.3345699-1-yhs@fb.com --- Documentation/bpf/btf.rst | 24 ++-- include/uapi/linux/btf.h | 8 +- kernel/bpf/btf.c | 44 +++--- tools/bpf/bpftool/btf.c | 6 +- tools/include/uapi/linux/btf.h | 8 +- tools/lib/bpf/btf.c | 36 ++--- tools/lib/bpf/btf.h | 12 +- tools/lib/bpf/btf_dump.c | 6 +- tools/lib/bpf/libbpf.c | 24 ++-- tools/lib/bpf/libbpf.map | 2 +- tools/lib/bpf/libbpf_internal.h | 4 +- tools/testing/selftests/bpf/README.rst | 4 +- tools/testing/selftests/bpf/btf_helpers.c | 8 +- tools/testing/selftests/bpf/prog_tests/btf.c | 160 ++++++++++----------- tools/testing/selftests/bpf/prog_tests/btf_write.c | 30 ++-- tools/testing/selftests/bpf/progs/tag.c | 6 +- tools/testing/selftests/bpf/test_btf.h | 4 +- 17 files changed, 193 insertions(+), 193 deletions(-) (limited to 'tools/testing') diff --git a/Documentation/bpf/btf.rst b/Documentation/bpf/btf.rst index 1bfe4072f5fc..9e5b4a98af76 100644 --- a/Documentation/bpf/btf.rst +++ b/Documentation/bpf/btf.rst @@ -85,7 +85,7 @@ sequentially and type id is assigned to each recognized type starting from id #define BTF_KIND_VAR 14 /* Variable */ #define BTF_KIND_DATASEC 15 /* Section */ #define BTF_KIND_FLOAT 16 /* Floating point */ - #define BTF_KIND_TAG 17 /* Tag */ + #define BTF_KIND_DECL_TAG 17 /* Decl Tag */ Note that the type section encodes debug info, not just pure types. ``BTF_KIND_FUNC`` is not a type, and it represents a defined subprogram. @@ -107,7 +107,7 @@ Each type contains the following common data:: * "size" tells the size of the type it is describing. * * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, - * FUNC, FUNC_PROTO and TAG. + * FUNC, FUNC_PROTO and DECL_TAG. * "type" is a type_id referring to another type. */ union { @@ -466,30 +466,30 @@ map definition. No additional type data follow ``btf_type``. -2.2.17 BTF_KIND_TAG -~~~~~~~~~~~~~~~~~~~ +2.2.17 BTF_KIND_DECL_TAG +~~~~~~~~~~~~~~~~~~~~~~~~ ``struct btf_type`` encoding requirement: * ``name_off``: offset to a non-empty string * ``info.kind_flag``: 0 - * ``info.kind``: BTF_KIND_TAG + * ``info.kind``: BTF_KIND_DECL_TAG * ``info.vlen``: 0 * ``type``: ``struct``, ``union``, ``func`` or ``var`` -``btf_type`` is followed by ``struct btf_tag``.:: +``btf_type`` is followed by ``struct btf_decl_tag``.:: - struct btf_tag { + struct btf_decl_tag { __u32 component_idx; }; -The ``name_off`` encodes btf_tag attribute string. +The ``name_off`` encodes btf_decl_tag attribute string. The ``type`` should be ``struct``, ``union``, ``func`` or ``var``. -For ``var`` type, ``btf_tag.component_idx`` must be ``-1``. -For the other three types, if the btf_tag attribute is +For ``var`` type, ``btf_decl_tag.component_idx`` must be ``-1``. +For the other three types, if the btf_decl_tag attribute is applied to the ``struct``, ``union`` or ``func`` itself, -``btf_tag.component_idx`` must be ``-1``. Otherwise, +``btf_decl_tag.component_idx`` must be ``-1``. Otherwise, the attribute is applied to a ``struct``/``union`` member or -a ``func`` argument, and ``btf_tag.component_idx`` should be a +a ``func`` argument, and ``btf_decl_tag.component_idx`` should be a valid index (starting from 0) pointing to a member or an argument. 3. BTF Kernel API diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h index 642b6ecb37d7..deb12f755f0f 100644 --- a/include/uapi/linux/btf.h +++ b/include/uapi/linux/btf.h @@ -43,7 +43,7 @@ struct btf_type { * "size" tells the size of the type it is describing. * * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, - * FUNC, FUNC_PROTO, VAR and TAG. + * FUNC, FUNC_PROTO, VAR and DECL_TAG. * "type" is a type_id referring to another type. */ union { @@ -74,7 +74,7 @@ enum { BTF_KIND_VAR = 14, /* Variable */ BTF_KIND_DATASEC = 15, /* Section */ BTF_KIND_FLOAT = 16, /* Floating point */ - BTF_KIND_TAG = 17, /* Tag */ + BTF_KIND_DECL_TAG = 17, /* Decl Tag */ NR_BTF_KINDS, BTF_KIND_MAX = NR_BTF_KINDS - 1, @@ -174,14 +174,14 @@ struct btf_var_secinfo { __u32 size; }; -/* BTF_KIND_TAG is followed by a single "struct btf_tag" to describe +/* BTF_KIND_DECL_TAG is followed by a single "struct btf_decl_tag" to describe * additional information related to the tag applied location. * If component_idx == -1, the tag is applied to a struct, union, * variable or function. Otherwise, it is applied to a struct/union * member or a func argument, and component_idx indicates which member * or argument (0 ... vlen-1). */ -struct btf_tag { +struct btf_decl_tag { __s32 component_idx; }; diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 2ebffb9f57eb..9059053088b9 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -281,7 +281,7 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = { [BTF_KIND_VAR] = "VAR", [BTF_KIND_DATASEC] = "DATASEC", [BTF_KIND_FLOAT] = "FLOAT", - [BTF_KIND_TAG] = "TAG", + [BTF_KIND_DECL_TAG] = "DECL_TAG", }; const char *btf_type_str(const struct btf_type *t) @@ -460,12 +460,12 @@ static bool btf_type_is_datasec(const struct btf_type *t) return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC; } -static bool btf_type_is_tag(const struct btf_type *t) +static bool btf_type_is_decl_tag(const struct btf_type *t) { - return BTF_INFO_KIND(t->info) == BTF_KIND_TAG; + return BTF_INFO_KIND(t->info) == BTF_KIND_DECL_TAG; } -static bool btf_type_is_tag_target(const struct btf_type *t) +static bool btf_type_is_decl_tag_target(const struct btf_type *t) { return btf_type_is_func(t) || btf_type_is_struct(t) || btf_type_is_var(t); @@ -549,7 +549,7 @@ const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf, static bool btf_type_is_resolve_source_only(const struct btf_type *t) { return btf_type_is_var(t) || - btf_type_is_tag(t) || + btf_type_is_decl_tag(t) || btf_type_is_datasec(t); } @@ -576,7 +576,7 @@ static bool btf_type_needs_resolve(const struct btf_type *t) btf_type_is_struct(t) || btf_type_is_array(t) || btf_type_is_var(t) || - btf_type_is_tag(t) || + btf_type_is_decl_tag(t) || btf_type_is_datasec(t); } @@ -630,9 +630,9 @@ static const struct btf_var *btf_type_var(const struct btf_type *t) return (const struct btf_var *)(t + 1); } -static const struct btf_tag *btf_type_tag(const struct btf_type *t) +static const struct btf_decl_tag *btf_type_decl_tag(const struct btf_type *t) { - return (const struct btf_tag *)(t + 1); + return (const struct btf_decl_tag *)(t + 1); } static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t) @@ -3820,11 +3820,11 @@ static const struct btf_kind_operations float_ops = { .show = btf_df_show, }; -static s32 btf_tag_check_meta(struct btf_verifier_env *env, +static s32 btf_decl_tag_check_meta(struct btf_verifier_env *env, const struct btf_type *t, u32 meta_left) { - const struct btf_tag *tag; + const struct btf_decl_tag *tag; u32 meta_needed = sizeof(*tag); s32 component_idx; const char *value; @@ -3852,7 +3852,7 @@ static s32 btf_tag_check_meta(struct btf_verifier_env *env, return -EINVAL; } - component_idx = btf_type_tag(t)->component_idx; + component_idx = btf_type_decl_tag(t)->component_idx; if (component_idx < -1) { btf_verifier_log_type(env, t, "Invalid component_idx"); return -EINVAL; @@ -3863,7 +3863,7 @@ static s32 btf_tag_check_meta(struct btf_verifier_env *env, return meta_needed; } -static int btf_tag_resolve(struct btf_verifier_env *env, +static int btf_decl_tag_resolve(struct btf_verifier_env *env, const struct resolve_vertex *v) { const struct btf_type *next_type; @@ -3874,7 +3874,7 @@ static int btf_tag_resolve(struct btf_verifier_env *env, u32 vlen; next_type = btf_type_by_id(btf, next_type_id); - if (!next_type || !btf_type_is_tag_target(next_type)) { + if (!next_type || !btf_type_is_decl_tag_target(next_type)) { btf_verifier_log_type(env, v->t, "Invalid type_id"); return -EINVAL; } @@ -3883,7 +3883,7 @@ static int btf_tag_resolve(struct btf_verifier_env *env, !env_type_is_resolved(env, next_type_id)) return env_stack_push(env, next_type, next_type_id); - component_idx = btf_type_tag(t)->component_idx; + component_idx = btf_type_decl_tag(t)->component_idx; if (component_idx != -1) { if (btf_type_is_var(next_type)) { btf_verifier_log_type(env, v->t, "Invalid component_idx"); @@ -3909,18 +3909,18 @@ static int btf_tag_resolve(struct btf_verifier_env *env, return 0; } -static void btf_tag_log(struct btf_verifier_env *env, const struct btf_type *t) +static void btf_decl_tag_log(struct btf_verifier_env *env, const struct btf_type *t) { btf_verifier_log(env, "type=%u component_idx=%d", t->type, - btf_type_tag(t)->component_idx); + btf_type_decl_tag(t)->component_idx); } -static const struct btf_kind_operations tag_ops = { - .check_meta = btf_tag_check_meta, - .resolve = btf_tag_resolve, +static const struct btf_kind_operations decl_tag_ops = { + .check_meta = btf_decl_tag_check_meta, + .resolve = btf_decl_tag_resolve, .check_member = btf_df_check_member, .check_kflag_member = btf_df_check_kflag_member, - .log_details = btf_tag_log, + .log_details = btf_decl_tag_log, .show = btf_df_show, }; @@ -4058,7 +4058,7 @@ static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = { [BTF_KIND_VAR] = &var_ops, [BTF_KIND_DATASEC] = &datasec_ops, [BTF_KIND_FLOAT] = &float_ops, - [BTF_KIND_TAG] = &tag_ops, + [BTF_KIND_DECL_TAG] = &decl_tag_ops, }; static s32 btf_check_meta(struct btf_verifier_env *env, @@ -4143,7 +4143,7 @@ static bool btf_resolve_valid(struct btf_verifier_env *env, return !btf_resolved_type_id(btf, type_id) && !btf_resolved_type_size(btf, type_id); - if (btf_type_is_tag(t)) + if (btf_type_is_decl_tag(t)) return btf_resolved_type_id(btf, type_id) && !btf_resolved_type_size(btf, type_id); diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c index 49743ad96851..7b68d4f65fe6 100644 --- a/tools/bpf/bpftool/btf.c +++ b/tools/bpf/bpftool/btf.c @@ -37,7 +37,7 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = { [BTF_KIND_VAR] = "VAR", [BTF_KIND_DATASEC] = "DATASEC", [BTF_KIND_FLOAT] = "FLOAT", - [BTF_KIND_TAG] = "TAG", + [BTF_KIND_DECL_TAG] = "DECL_TAG", }; struct btf_attach_table { @@ -348,8 +348,8 @@ static int dump_btf_type(const struct btf *btf, __u32 id, printf(" size=%u", t->size); break; } - case BTF_KIND_TAG: { - const struct btf_tag *tag = (const void *)(t + 1); + case BTF_KIND_DECL_TAG: { + const struct btf_decl_tag *tag = (const void *)(t + 1); if (json_output) { jsonw_uint_field(w, "type_id", t->type); diff --git a/tools/include/uapi/linux/btf.h b/tools/include/uapi/linux/btf.h index 642b6ecb37d7..deb12f755f0f 100644 --- a/tools/include/uapi/linux/btf.h +++ b/tools/include/uapi/linux/btf.h @@ -43,7 +43,7 @@ struct btf_type { * "size" tells the size of the type it is describing. * * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, - * FUNC, FUNC_PROTO, VAR and TAG. + * FUNC, FUNC_PROTO, VAR and DECL_TAG. * "type" is a type_id referring to another type. */ union { @@ -74,7 +74,7 @@ enum { BTF_KIND_VAR = 14, /* Variable */ BTF_KIND_DATASEC = 15, /* Section */ BTF_KIND_FLOAT = 16, /* Floating point */ - BTF_KIND_TAG = 17, /* Tag */ + BTF_KIND_DECL_TAG = 17, /* Decl Tag */ NR_BTF_KINDS, BTF_KIND_MAX = NR_BTF_KINDS - 1, @@ -174,14 +174,14 @@ struct btf_var_secinfo { __u32 size; }; -/* BTF_KIND_TAG is followed by a single "struct btf_tag" to describe +/* BTF_KIND_DECL_TAG is followed by a single "struct btf_decl_tag" to describe * additional information related to the tag applied location. * If component_idx == -1, the tag is applied to a struct, union, * variable or function. Otherwise, it is applied to a struct/union * member or a func argument, and component_idx indicates which member * or argument (0 ... vlen-1). */ -struct btf_tag { +struct btf_decl_tag { __s32 component_idx; }; diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 60fbd1c6d466..1f6dea11f600 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -309,8 +309,8 @@ static int btf_type_size(const struct btf_type *t) return base_size + sizeof(struct btf_var); case BTF_KIND_DATASEC: return base_size + vlen * sizeof(struct btf_var_secinfo); - case BTF_KIND_TAG: - return base_size + sizeof(struct btf_tag); + case BTF_KIND_DECL_TAG: + return base_size + sizeof(struct btf_decl_tag); default: pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t)); return -EINVAL; @@ -383,8 +383,8 @@ static int btf_bswap_type_rest(struct btf_type *t) v->size = bswap_32(v->size); } return 0; - case BTF_KIND_TAG: - btf_tag(t)->component_idx = bswap_32(btf_tag(t)->component_idx); + case BTF_KIND_DECL_TAG: + btf_decl_tag(t)->component_idx = bswap_32(btf_decl_tag(t)->component_idx); return 0; default: pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t)); @@ -596,7 +596,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id) case BTF_KIND_CONST: case BTF_KIND_RESTRICT: case BTF_KIND_VAR: - case BTF_KIND_TAG: + case BTF_KIND_DECL_TAG: type_id = t->type; break; case BTF_KIND_ARRAY: @@ -2569,7 +2569,7 @@ int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __ } /* - * Append new BTF_KIND_TAG type with: + * Append new BTF_KIND_DECL_TAG type with: * - *value* - non-empty/non-NULL string; * - *ref_type_id* - referenced type ID, it might not exist yet; * - *component_idx* - -1 for tagging reference type, otherwise struct/union @@ -2578,7 +2578,7 @@ int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __ * - >0, type ID of newly added BTF type; * - <0, on error. */ -int btf__add_tag(struct btf *btf, const char *value, int ref_type_id, +int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id, int component_idx) { struct btf_type *t; @@ -2593,7 +2593,7 @@ int btf__add_tag(struct btf *btf, const char *value, int ref_type_id, if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM); - sz = sizeof(struct btf_type) + sizeof(struct btf_tag); + sz = sizeof(struct btf_type) + sizeof(struct btf_decl_tag); t = btf_add_type_mem(btf, sz); if (!t) return libbpf_err(-ENOMEM); @@ -2603,9 +2603,9 @@ int btf__add_tag(struct btf *btf, const char *value, int ref_type_id, return value_off; t->name_off = value_off; - t->info = btf_type_info(BTF_KIND_TAG, 0, false); + t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, false); t->type = ref_type_id; - btf_tag(t)->component_idx = component_idx; + btf_decl_tag(t)->component_idx = component_idx; return btf_commit_type(btf, sz); } @@ -3427,7 +3427,7 @@ static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2) } /* Calculate type signature hash of INT or TAG. */ -static long btf_hash_int_tag(struct btf_type *t) +static long btf_hash_int_decl_tag(struct btf_type *t) { __u32 info = *(__u32 *)(t + 1); long h; @@ -3705,8 +3705,8 @@ static int btf_dedup_prep(struct btf_dedup *d) h = btf_hash_common(t); break; case BTF_KIND_INT: - case BTF_KIND_TAG: - h = btf_hash_int_tag(t); + case BTF_KIND_DECL_TAG: + h = btf_hash_int_decl_tag(t); break; case BTF_KIND_ENUM: h = btf_hash_enum(t); @@ -3761,11 +3761,11 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) case BTF_KIND_FUNC_PROTO: case BTF_KIND_VAR: case BTF_KIND_DATASEC: - case BTF_KIND_TAG: + case BTF_KIND_DECL_TAG: return 0; case BTF_KIND_INT: - h = btf_hash_int_tag(t); + h = btf_hash_int_decl_tag(t); for_each_dedup_cand(d, hash_entry, h) { cand_id = (__u32)(long)hash_entry->value; cand = btf_type_by_id(d->btf, cand_id); @@ -4382,13 +4382,13 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) } break; - case BTF_KIND_TAG: + case BTF_KIND_DECL_TAG: ref_type_id = btf_dedup_ref_type(d, t->type); if (ref_type_id < 0) return ref_type_id; t->type = ref_type_id; - h = btf_hash_int_tag(t); + h = btf_hash_int_decl_tag(t); for_each_dedup_cand(d, hash_entry, h) { cand_id = (__u32)(long)hash_entry->value; cand = btf_type_by_id(d->btf, cand_id); @@ -4671,7 +4671,7 @@ int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ct case BTF_KIND_TYPEDEF: case BTF_KIND_FUNC: case BTF_KIND_VAR: - case BTF_KIND_TAG: + case BTF_KIND_DECL_TAG: return visit(&t->type, ctx); case BTF_KIND_ARRAY: { diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 864eb51753a1..4011e206e6f7 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -236,7 +236,7 @@ LIBBPF_API int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __u32 byte_sz); /* tag construction API */ -LIBBPF_API int btf__add_tag(struct btf *btf, const char *value, int ref_type_id, +LIBBPF_API int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id, int component_idx); struct btf_dedup_opts { @@ -426,9 +426,9 @@ static inline bool btf_is_float(const struct btf_type *t) return btf_kind(t) == BTF_KIND_FLOAT; } -static inline bool btf_is_tag(const struct btf_type *t) +static inline bool btf_is_decl_tag(const struct btf_type *t) { - return btf_kind(t) == BTF_KIND_TAG; + return btf_kind(t) == BTF_KIND_DECL_TAG; } static inline __u8 btf_int_encoding(const struct btf_type *t) @@ -499,10 +499,10 @@ btf_var_secinfos(const struct btf_type *t) return (struct btf_var_secinfo *)(t + 1); } -struct btf_tag; -static inline struct btf_tag *btf_tag(const struct btf_type *t) +struct btf_decl_tag; +static inline struct btf_decl_tag *btf_decl_tag(const struct btf_type *t) { - return (struct btf_tag *)(t + 1); + return (struct btf_decl_tag *)(t + 1); } #ifdef __cplusplus diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index ad6df97295ae..5ef42f0abed1 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -316,7 +316,7 @@ static int btf_dump_mark_referenced(struct btf_dump *d) case BTF_KIND_TYPEDEF: case BTF_KIND_FUNC: case BTF_KIND_VAR: - case BTF_KIND_TAG: + case BTF_KIND_DECL_TAG: d->type_states[t->type].referenced = 1; break; @@ -584,7 +584,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr) case BTF_KIND_FUNC: case BTF_KIND_VAR: case BTF_KIND_DATASEC: - case BTF_KIND_TAG: + case BTF_KIND_DECL_TAG: d->type_states[id].order_state = ORDERED; return 0; @@ -2217,7 +2217,7 @@ static int btf_dump_dump_type_data(struct btf_dump *d, case BTF_KIND_FWD: case BTF_KIND_FUNC: case BTF_KIND_FUNC_PROTO: - case BTF_KIND_TAG: + case BTF_KIND_DECL_TAG: err = btf_dump_unsupported_data(d, t, id); break; case BTF_KIND_INT: diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index ae0889bebe32..63d738654ff6 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -195,8 +195,8 @@ enum kern_feature_id { FEAT_BTF_FLOAT, /* BPF perf link support */ FEAT_PERF_LINK, - /* BTF_KIND_TAG support */ - FEAT_BTF_TAG, + /* BTF_KIND_DECL_TAG support */ + FEAT_BTF_DECL_TAG, __FEAT_CNT, }; @@ -2024,7 +2024,7 @@ static const char *__btf_kind_str(__u16 kind) case BTF_KIND_VAR: return "var"; case BTF_KIND_DATASEC: return "datasec"; case BTF_KIND_FLOAT: return "float"; - case BTF_KIND_TAG: return "tag"; + case BTF_KIND_DECL_TAG: return "decl_tag"; default: return "unknown"; } } @@ -2524,9 +2524,9 @@ static bool btf_needs_sanitization(struct bpf_object *obj) bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC); bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT); bool has_func = kernel_supports(obj, FEAT_BTF_FUNC); - bool has_tag = kernel_supports(obj, FEAT_BTF_TAG); + bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG); - return !has_func || !has_datasec || !has_func_global || !has_float || !has_tag; + return !has_func || !has_datasec || !has_func_global || !has_float || !has_decl_tag; } static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf) @@ -2535,15 +2535,15 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf) bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC); bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT); bool has_func = kernel_supports(obj, FEAT_BTF_FUNC); - bool has_tag = kernel_supports(obj, FEAT_BTF_TAG); + bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG); struct btf_type *t; int i, j, vlen; for (i = 1; i <= btf__get_nr_types(btf); i++) { t = (struct btf_type *)btf__type_by_id(btf, i); - if ((!has_datasec && btf_is_var(t)) || (!has_tag && btf_is_tag(t))) { - /* replace VAR/TAG with INT */ + if ((!has_datasec && btf_is_var(t)) || (!has_decl_tag && btf_is_decl_tag(t))) { + /* replace VAR/DECL_TAG with INT */ t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0); /* * using size = 1 is the safest choice, 4 will be too @@ -4248,7 +4248,7 @@ static int probe_kern_btf_float(void) strs, sizeof(strs))); } -static int probe_kern_btf_tag(void) +static int probe_kern_btf_decl_tag(void) { static const char strs[] = "\0tag"; __u32 types[] = { @@ -4258,7 +4258,7 @@ static int probe_kern_btf_tag(void) BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1), BTF_VAR_STATIC, /* attr */ - BTF_TYPE_TAG_ENC(1, 2, -1), + BTF_TYPE_DECL_TAG_ENC(1, 2, -1), }; return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), @@ -4481,8 +4481,8 @@ static struct kern_feature_desc { [FEAT_PERF_LINK] = { "BPF perf link support", probe_perf_link, }, - [FEAT_BTF_TAG] = { - "BTF_KIND_TAG support", probe_kern_btf_tag, + [FEAT_BTF_DECL_TAG] = { + "BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag, }, }; diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index f270d25e4af3..e6fb1ba49369 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -394,5 +394,5 @@ LIBBPF_0.6.0 { bpf_object__prev_map; bpf_object__prev_program; btf__add_btf; - btf__add_tag; + btf__add_decl_tag; } LIBBPF_0.5.0; diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index f7fd3944d46d..f6a5748dd318 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -69,8 +69,8 @@ #define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size) #define BTF_TYPE_FLOAT_ENC(name, sz) \ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 0), sz) -#define BTF_TYPE_TAG_ENC(value, type, component_idx) \ - BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_TAG, 0, 0), type), (component_idx) +#define BTF_TYPE_DECL_TAG_ENC(value, type, component_idx) \ + BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), type), (component_idx) #ifndef likely #define likely(x) __builtin_expect(!!(x), 1) diff --git a/tools/testing/selftests/bpf/README.rst b/tools/testing/selftests/bpf/README.rst index 554553acc6d9..5e287e445f75 100644 --- a/tools/testing/selftests/bpf/README.rst +++ b/tools/testing/selftests/bpf/README.rst @@ -204,7 +204,7 @@ __ https://reviews.llvm.org/D93563 btf_tag test and Clang version ============================== -The btf_tag selftest require LLVM support to recognize the btf_tag attribute. +The btf_tag selftest require LLVM support to recognize the btf_decl_tag attribute. It was introduced in `Clang 14`__. Without it, the btf_tag selftest will be skipped and you will observe: @@ -213,7 +213,7 @@ Without it, the btf_tag selftest will be skipped and you will observe: # btf_tag:SKIP -__ https://reviews.llvm.org/D106614 +__ https://reviews.llvm.org/D111588 Clang dependencies for static linking tests =========================================== diff --git a/tools/testing/selftests/bpf/btf_helpers.c b/tools/testing/selftests/bpf/btf_helpers.c index ce103fb0ad1b..668cfa20bb1b 100644 --- a/tools/testing/selftests/bpf/btf_helpers.c +++ b/tools/testing/selftests/bpf/btf_helpers.c @@ -24,12 +24,12 @@ static const char * const btf_kind_str_mapping[] = { [BTF_KIND_VAR] = "VAR", [BTF_KIND_DATASEC] = "DATASEC", [BTF_KIND_FLOAT] = "FLOAT", - [BTF_KIND_TAG] = "TAG", + [BTF_KIND_DECL_TAG] = "DECL_TAG", }; static const char *btf_kind_str(__u16 kind) { - if (kind > BTF_KIND_TAG) + if (kind > BTF_KIND_DECL_TAG) return "UNKNOWN"; return btf_kind_str_mapping[kind]; } @@ -178,9 +178,9 @@ int fprintf_btf_type_raw(FILE *out, const struct btf *btf, __u32 id) case BTF_KIND_FLOAT: fprintf(out, " size=%u", t->size); break; - case BTF_KIND_TAG: + case BTF_KIND_DECL_TAG: fprintf(out, " type_id=%u component_idx=%d", - t->type, btf_tag(t)->component_idx); + t->type, btf_decl_tag(t)->component_idx); break; default: break; diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index acd33d0cd5d9..fa67f25bbef5 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -3662,15 +3662,15 @@ static struct btf_raw_test raw_tests[] = { }, { - .descr = "tag test #1, struct/member, well-formed", + .descr = "decl_tag test #1, struct/member, well-formed", .raw_types = { BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ BTF_STRUCT_ENC(0, 2, 8), /* [2] */ BTF_MEMBER_ENC(NAME_TBD, 1, 0), BTF_MEMBER_ENC(NAME_TBD, 1, 32), - BTF_TAG_ENC(NAME_TBD, 2, -1), - BTF_TAG_ENC(NAME_TBD, 2, 0), - BTF_TAG_ENC(NAME_TBD, 2, 1), + BTF_DECL_TAG_ENC(NAME_TBD, 2, -1), + BTF_DECL_TAG_ENC(NAME_TBD, 2, 0), + BTF_DECL_TAG_ENC(NAME_TBD, 2, 1), BTF_END_RAW, }, BTF_STR_SEC("\0m1\0m2\0tag1\0tag2\0tag3"), @@ -3683,15 +3683,15 @@ static struct btf_raw_test raw_tests[] = { .max_entries = 1, }, { - .descr = "tag test #2, union/member, well-formed", + .descr = "decl_tag test #2, union/member, well-formed", .raw_types = { BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ BTF_UNION_ENC(NAME_TBD, 2, 4), /* [2] */ BTF_MEMBER_ENC(NAME_TBD, 1, 0), BTF_MEMBER_ENC(NAME_TBD, 1, 0), - BTF_TAG_ENC(NAME_TBD, 2, -1), - BTF_TAG_ENC(NAME_TBD, 2, 0), - BTF_TAG_ENC(NAME_TBD, 2, 1), + BTF_DECL_TAG_ENC(NAME_TBD, 2, -1), + BTF_DECL_TAG_ENC(NAME_TBD, 2, 0), + BTF_DECL_TAG_ENC(NAME_TBD, 2, 1), BTF_END_RAW, }, BTF_STR_SEC("\0t\0m1\0m2\0tag1\0tag2\0tag3"), @@ -3704,13 +3704,13 @@ static struct btf_raw_test raw_tests[] = { .max_entries = 1, }, { - .descr = "tag test #3, variable, well-formed", + .descr = "decl_tag test #3, variable, well-formed", .raw_types = { BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ BTF_VAR_ENC(NAME_TBD, 1, 1), /* [3] */ - BTF_TAG_ENC(NAME_TBD, 2, -1), - BTF_TAG_ENC(NAME_TBD, 3, -1), + BTF_DECL_TAG_ENC(NAME_TBD, 2, -1), + BTF_DECL_TAG_ENC(NAME_TBD, 3, -1), BTF_END_RAW, }, BTF_STR_SEC("\0local\0global\0tag1\0tag2"), @@ -3723,16 +3723,16 @@ static struct btf_raw_test raw_tests[] = { .max_entries = 1, }, { - .descr = "tag test #4, func/parameter, well-formed", + .descr = "decl_tag test #4, func/parameter, well-formed", .raw_types = { BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ BTF_FUNC_PROTO_ENC(0, 2), /* [2] */ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */ - BTF_TAG_ENC(NAME_TBD, 3, -1), - BTF_TAG_ENC(NAME_TBD, 3, 0), - BTF_TAG_ENC(NAME_TBD, 3, 1), + BTF_DECL_TAG_ENC(NAME_TBD, 3, -1), + BTF_DECL_TAG_ENC(NAME_TBD, 3, 0), + BTF_DECL_TAG_ENC(NAME_TBD, 3, 1), BTF_END_RAW, }, BTF_STR_SEC("\0arg1\0arg2\0f\0tag1\0tag2\0tag3"), @@ -3745,11 +3745,11 @@ static struct btf_raw_test raw_tests[] = { .max_entries = 1, }, { - .descr = "tag test #5, invalid value", + .descr = "decl_tag test #5, invalid value", .raw_types = { BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ - BTF_TAG_ENC(0, 2, -1), + BTF_DECL_TAG_ENC(0, 2, -1), BTF_END_RAW, }, BTF_STR_SEC("\0local\0tag"), @@ -3764,10 +3764,10 @@ static struct btf_raw_test raw_tests[] = { .err_str = "Invalid value", }, { - .descr = "tag test #6, invalid target type", + .descr = "decl_tag test #6, invalid target type", .raw_types = { BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ - BTF_TAG_ENC(NAME_TBD, 1, -1), + BTF_DECL_TAG_ENC(NAME_TBD, 1, -1), BTF_END_RAW, }, BTF_STR_SEC("\0tag1"), @@ -3782,11 +3782,11 @@ static struct btf_raw_test raw_tests[] = { .err_str = "Invalid type", }, { - .descr = "tag test #7, invalid vlen", + .descr = "decl_tag test #7, invalid vlen", .raw_types = { BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ - BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_TAG, 0, 1), 2), (0), + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 1), 2), (0), BTF_END_RAW, }, BTF_STR_SEC("\0local\0tag1"), @@ -3801,11 +3801,11 @@ static struct btf_raw_test raw_tests[] = { .err_str = "vlen != 0", }, { - .descr = "tag test #8, invalid kflag", + .descr = "decl_tag test #8, invalid kflag", .raw_types = { BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ - BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_TAG, 1, 0), 2), (-1), + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 1, 0), 2), (-1), BTF_END_RAW, }, BTF_STR_SEC("\0local\0tag1"), @@ -3820,11 +3820,11 @@ static struct btf_raw_test raw_tests[] = { .err_str = "Invalid btf_info kind_flag", }, { - .descr = "tag test #9, var, invalid component_idx", + .descr = "decl_tag test #9, var, invalid component_idx", .raw_types = { BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ - BTF_TAG_ENC(NAME_TBD, 2, 0), + BTF_DECL_TAG_ENC(NAME_TBD, 2, 0), BTF_END_RAW, }, BTF_STR_SEC("\0local\0tag"), @@ -3839,13 +3839,13 @@ static struct btf_raw_test raw_tests[] = { .err_str = "Invalid component_idx", }, { - .descr = "tag test #10, struct member, invalid component_idx", + .descr = "decl_tag test #10, struct member, invalid component_idx", .raw_types = { BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ BTF_STRUCT_ENC(0, 2, 8), /* [2] */ BTF_MEMBER_ENC(NAME_TBD, 1, 0), BTF_MEMBER_ENC(NAME_TBD, 1, 32), - BTF_TAG_ENC(NAME_TBD, 2, 2), + BTF_DECL_TAG_ENC(NAME_TBD, 2, 2), BTF_END_RAW, }, BTF_STR_SEC("\0m1\0m2\0tag"), @@ -3860,14 +3860,14 @@ static struct btf_raw_test raw_tests[] = { .err_str = "Invalid component_idx", }, { - .descr = "tag test #11, func parameter, invalid component_idx", + .descr = "decl_tag test #11, func parameter, invalid component_idx", .raw_types = { BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ BTF_FUNC_PROTO_ENC(0, 2), /* [2] */ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */ - BTF_TAG_ENC(NAME_TBD, 3, 2), + BTF_DECL_TAG_ENC(NAME_TBD, 3, 2), BTF_END_RAW, }, BTF_STR_SEC("\0arg1\0arg2\0f\0tag"), @@ -3882,14 +3882,14 @@ static struct btf_raw_test raw_tests[] = { .err_str = "Invalid component_idx", }, { - .descr = "tag test #12, < -1 component_idx", + .descr = "decl_tag test #12, < -1 component_idx", .raw_types = { BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ BTF_FUNC_PROTO_ENC(0, 2), /* [2] */ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */ - BTF_TAG_ENC(NAME_TBD, 3, -2), + BTF_DECL_TAG_ENC(NAME_TBD, 3, -2), BTF_END_RAW, }, BTF_STR_SEC("\0arg1\0arg2\0f\0tag"), @@ -6672,9 +6672,9 @@ const struct btf_dedup_test dedup_tests[] = { /* const -> [1] int */ BTF_CONST_ENC(1), /* [6] */ /* tag -> [3] struct s */ - BTF_TAG_ENC(NAME_NTH(2), 3, -1), /* [7] */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 3, -1), /* [7] */ /* tag -> [3] struct s, member 1 */ - BTF_TAG_ENC(NAME_NTH(2), 3, 1), /* [8] */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 3, 1), /* [8] */ /* full copy of the above */ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), /* [9] */ @@ -6689,8 +6689,8 @@ const struct btf_dedup_test dedup_tests[] = { BTF_PTR_ENC(14), /* [13] */ BTF_CONST_ENC(9), /* [14] */ BTF_TYPE_FLOAT_ENC(NAME_NTH(7), 4), /* [15] */ - BTF_TAG_ENC(NAME_NTH(2), 11, -1), /* [16] */ - BTF_TAG_ENC(NAME_NTH(2), 11, 1), /* [17] */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 11, -1), /* [16] */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 11, 1), /* [17] */ BTF_END_RAW, }, BTF_STR_SEC("\0int\0s\0next\0a\0b\0c\0float\0d"), @@ -6714,8 +6714,8 @@ const struct btf_dedup_test dedup_tests[] = { BTF_PTR_ENC(6), /* [5] */ /* const -> [1] int */ BTF_CONST_ENC(1), /* [6] */ - BTF_TAG_ENC(NAME_NTH(2), 3, -1), /* [7] */ - BTF_TAG_ENC(NAME_NTH(2), 3, 1), /* [8] */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 3, -1), /* [7] */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 3, 1), /* [8] */ BTF_TYPE_FLOAT_ENC(NAME_NTH(7), 4), /* [9] */ BTF_END_RAW, }, @@ -6841,8 +6841,8 @@ const struct btf_dedup_test dedup_tests[] = { BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8), BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */ BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [14] float */ - BTF_TAG_ENC(NAME_TBD, 13, -1), /* [15] tag */ - BTF_TAG_ENC(NAME_TBD, 13, 1), /* [16] tag */ + BTF_DECL_TAG_ENC(NAME_TBD, 13, -1), /* [15] tag */ + BTF_DECL_TAG_ENC(NAME_TBD, 13, 1), /* [16] tag */ BTF_END_RAW, }, BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P"), @@ -6869,8 +6869,8 @@ const struct btf_dedup_test dedup_tests[] = { BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8), BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */ BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [14] float */ - BTF_TAG_ENC(NAME_TBD, 13, -1), /* [15] tag */ - BTF_TAG_ENC(NAME_TBD, 13, 1), /* [16] tag */ + BTF_DECL_TAG_ENC(NAME_TBD, 13, -1), /* [15] tag */ + BTF_DECL_TAG_ENC(NAME_TBD, 13, 1), /* [16] tag */ BTF_END_RAW, }, BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P"), @@ -7036,14 +7036,14 @@ const struct btf_dedup_test dedup_tests[] = { BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(3), 1), BTF_FUNC_ENC(NAME_NTH(4), 2), /* [4] */ /* tag -> t */ - BTF_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */ - BTF_TAG_ENC(NAME_NTH(5), 2, -1), /* [6] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [6] */ /* tag -> func */ - BTF_TAG_ENC(NAME_NTH(5), 4, -1), /* [7] */ - BTF_TAG_ENC(NAME_NTH(5), 4, -1), /* [8] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 4, -1), /* [7] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 4, -1), /* [8] */ /* tag -> func arg a1 */ - BTF_TAG_ENC(NAME_NTH(5), 4, 1), /* [9] */ - BTF_TAG_ENC(NAME_NTH(5), 4, 1), /* [10] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 4, 1), /* [9] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 4, 1), /* [10] */ BTF_END_RAW, }, BTF_STR_SEC("\0t\0a1\0a2\0f\0tag"), @@ -7056,9 +7056,9 @@ const struct btf_dedup_test dedup_tests[] = { BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1), BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(3), 1), BTF_FUNC_ENC(NAME_NTH(4), 2), /* [4] */ - BTF_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */ - BTF_TAG_ENC(NAME_NTH(5), 4, -1), /* [6] */ - BTF_TAG_ENC(NAME_NTH(5), 4, 1), /* [7] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 4, -1), /* [6] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 4, 1), /* [7] */ BTF_END_RAW, }, BTF_STR_SEC("\0t\0a1\0a2\0f\0tag"), @@ -7084,17 +7084,17 @@ const struct btf_dedup_test dedup_tests[] = { BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1), BTF_FUNC_ENC(NAME_NTH(3), 4), /* [5] */ /* tag -> f: tag1, tag2 */ - BTF_TAG_ENC(NAME_NTH(4), 3, -1), /* [6] */ - BTF_TAG_ENC(NAME_NTH(5), 3, -1), /* [7] */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 3, -1), /* [6] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 3, -1), /* [7] */ /* tag -> f/a2: tag1, tag2 */ - BTF_TAG_ENC(NAME_NTH(4), 3, 1), /* [8] */ - BTF_TAG_ENC(NAME_NTH(5), 3, 1), /* [9] */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 3, 1), /* [8] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 3, 1), /* [9] */ /* tag -> f: tag1, tag3 */ - BTF_TAG_ENC(NAME_NTH(4), 5, -1), /* [10] */ - BTF_TAG_ENC(NAME_NTH(6), 5, -1), /* [11] */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 5, -1), /* [10] */ + BTF_DECL_TAG_ENC(NAME_NTH(6), 5, -1), /* [11] */ /* tag -> f/a2: tag1, tag3 */ - BTF_TAG_ENC(NAME_NTH(4), 5, 1), /* [12] */ - BTF_TAG_ENC(NAME_NTH(6), 5, 1), /* [13] */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 5, 1), /* [12] */ + BTF_DECL_TAG_ENC(NAME_NTH(6), 5, 1), /* [13] */ BTF_END_RAW, }, BTF_STR_SEC("\0a1\0a2\0f\0tag1\0tag2\0tag3"), @@ -7106,12 +7106,12 @@ const struct btf_dedup_test dedup_tests[] = { BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(1), 1), BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1), BTF_FUNC_ENC(NAME_NTH(3), 2), /* [3] */ - BTF_TAG_ENC(NAME_NTH(4), 3, -1), /* [4] */ - BTF_TAG_ENC(NAME_NTH(5), 3, -1), /* [5] */ - BTF_TAG_ENC(NAME_NTH(6), 3, -1), /* [6] */ - BTF_TAG_ENC(NAME_NTH(4), 3, 1), /* [7] */ - BTF_TAG_ENC(NAME_NTH(5), 3, 1), /* [8] */ - BTF_TAG_ENC(NAME_NTH(6), 3, 1), /* [9] */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 3, -1), /* [4] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 3, -1), /* [5] */ + BTF_DECL_TAG_ENC(NAME_NTH(6), 3, -1), /* [6] */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 3, 1), /* [7] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 3, 1), /* [8] */ + BTF_DECL_TAG_ENC(NAME_NTH(6), 3, 1), /* [9] */ BTF_END_RAW, }, BTF_STR_SEC("\0a1\0a2\0f\0tag1\0tag2\0tag3"), @@ -7133,17 +7133,17 @@ const struct btf_dedup_test dedup_tests[] = { BTF_MEMBER_ENC(NAME_NTH(2), 1, 0), BTF_MEMBER_ENC(NAME_NTH(3), 1, 32), /* tag -> t: tag1, tag2 */ - BTF_TAG_ENC(NAME_NTH(4), 2, -1), /* [4] */ - BTF_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 2, -1), /* [4] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */ /* tag -> t/m2: tag1, tag2 */ - BTF_TAG_ENC(NAME_NTH(4), 2, 1), /* [6] */ - BTF_TAG_ENC(NAME_NTH(5), 2, 1), /* [7] */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 2, 1), /* [6] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 2, 1), /* [7] */ /* tag -> t: tag1, tag3 */ - BTF_TAG_ENC(NAME_NTH(4), 3, -1), /* [8] */ - BTF_TAG_ENC(NAME_NTH(6), 3, -1), /* [9] */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 3, -1), /* [8] */ + BTF_DECL_TAG_ENC(NAME_NTH(6), 3, -1), /* [9] */ /* tag -> t/m2: tag1, tag3 */ - BTF_TAG_ENC(NAME_NTH(4), 3, 1), /* [10] */ - BTF_TAG_ENC(NAME_NTH(6), 3, 1), /* [11] */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 3, 1), /* [10] */ + BTF_DECL_TAG_ENC(NAME_NTH(6), 3, 1), /* [11] */ BTF_END_RAW, }, BTF_STR_SEC("\0t\0m1\0m2\0tag1\0tag2\0tag3"), @@ -7154,12 +7154,12 @@ const struct btf_dedup_test dedup_tests[] = { BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [2] */ BTF_MEMBER_ENC(NAME_NTH(2), 1, 0), BTF_MEMBER_ENC(NAME_NTH(3), 1, 32), - BTF_TAG_ENC(NAME_NTH(4), 2, -1), /* [3] */ - BTF_TAG_ENC(NAME_NTH(5), 2, -1), /* [4] */ - BTF_TAG_ENC(NAME_NTH(6), 2, -1), /* [5] */ - BTF_TAG_ENC(NAME_NTH(4), 2, 1), /* [6] */ - BTF_TAG_ENC(NAME_NTH(5), 2, 1), /* [7] */ - BTF_TAG_ENC(NAME_NTH(6), 2, 1), /* [8] */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 2, -1), /* [3] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [4] */ + BTF_DECL_TAG_ENC(NAME_NTH(6), 2, -1), /* [5] */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 2, 1), /* [6] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 2, 1), /* [7] */ + BTF_DECL_TAG_ENC(NAME_NTH(6), 2, 1), /* [8] */ BTF_END_RAW, }, BTF_STR_SEC("\0t\0m1\0m2\0tag1\0tag2\0tag3"), @@ -7202,8 +7202,8 @@ static int btf_type_size(const struct btf_type *t) return base_size + sizeof(struct btf_var); case BTF_KIND_DATASEC: return base_size + vlen * sizeof(struct btf_var_secinfo); - case BTF_KIND_TAG: - return base_size + sizeof(struct btf_tag); + case BTF_KIND_DECL_TAG: + return base_size + sizeof(struct btf_decl_tag); default: fprintf(stderr, "Unsupported BTF_KIND:%u\n", kind); return -EINVAL; diff --git a/tools/testing/selftests/bpf/prog_tests/btf_write.c b/tools/testing/selftests/bpf/prog_tests/btf_write.c index 886e0fc1efb1..b912eeb0b6b4 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_write.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_write.c @@ -277,26 +277,26 @@ static void gen_btf(struct btf *btf) "[17] DATASEC 'datasec1' size=12 vlen=1\n" "\ttype_id=1 offset=4 size=8", "raw_dump"); - /* TAG */ - id = btf__add_tag(btf, "tag1", 16, -1); + /* DECL_TAG */ + id = btf__add_decl_tag(btf, "tag1", 16, -1); ASSERT_EQ(id, 18, "tag_id"); t = btf__type_by_id(btf, 18); ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "tag1", "tag_value"); - ASSERT_EQ(btf_kind(t), BTF_KIND_TAG, "tag_kind"); + ASSERT_EQ(btf_kind(t), BTF_KIND_DECL_TAG, "tag_kind"); ASSERT_EQ(t->type, 16, "tag_type"); - ASSERT_EQ(btf_tag(t)->component_idx, -1, "tag_component_idx"); + ASSERT_EQ(btf_decl_tag(t)->component_idx, -1, "tag_component_idx"); ASSERT_STREQ(btf_type_raw_dump(btf, 18), - "[18] TAG 'tag1' type_id=16 component_idx=-1", "raw_dump"); + "[18] DECL_TAG 'tag1' type_id=16 component_idx=-1", "raw_dump"); - id = btf__add_tag(btf, "tag2", 14, 1); + id = btf__add_decl_tag(btf, "tag2", 14, 1); ASSERT_EQ(id, 19, "tag_id"); t = btf__type_by_id(btf, 19); ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "tag2", "tag_value"); - ASSERT_EQ(btf_kind(t), BTF_KIND_TAG, "tag_kind"); + ASSERT_EQ(btf_kind(t), BTF_KIND_DECL_TAG, "tag_kind"); ASSERT_EQ(t->type, 14, "tag_type"); - ASSERT_EQ(btf_tag(t)->component_idx, 1, "tag_component_idx"); + ASSERT_EQ(btf_decl_tag(t)->component_idx, 1, "tag_component_idx"); ASSERT_STREQ(btf_type_raw_dump(btf, 19), - "[19] TAG 'tag2' type_id=14 component_idx=1", "raw_dump"); + "[19] DECL_TAG 'tag2' type_id=14 component_idx=1", "raw_dump"); } static void test_btf_add() @@ -336,8 +336,8 @@ static void test_btf_add() "[16] VAR 'var1' type_id=1, linkage=global-alloc", "[17] DATASEC 'datasec1' size=12 vlen=1\n" "\ttype_id=1 offset=4 size=8", - "[18] TAG 'tag1' type_id=16 component_idx=-1", - "[19] TAG 'tag2' type_id=14 component_idx=1"); + "[18] DECL_TAG 'tag1' type_id=16 component_idx=-1", + "[19] DECL_TAG 'tag2' type_id=14 component_idx=1"); btf__free(btf); } @@ -389,8 +389,8 @@ static void test_btf_add_btf() "[16] VAR 'var1' type_id=1, linkage=global-alloc", "[17] DATASEC 'datasec1' size=12 vlen=1\n" "\ttype_id=1 offset=4 size=8", - "[18] TAG 'tag1' type_id=16 component_idx=-1", - "[19] TAG 'tag2' type_id=14 component_idx=1", + "[18] DECL_TAG 'tag1' type_id=16 component_idx=-1", + "[19] DECL_TAG 'tag2' type_id=14 component_idx=1", /* types appended from the second BTF */ "[20] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", @@ -418,8 +418,8 @@ static void test_btf_add_btf() "[35] VAR 'var1' type_id=20, linkage=global-alloc", "[36] DATASEC 'datasec1' size=12 vlen=1\n" "\ttype_id=20 offset=4 size=8", - "[37] TAG 'tag1' type_id=35 component_idx=-1", - "[38] TAG 'tag2' type_id=33 component_idx=1"); + "[37] DECL_TAG 'tag1' type_id=35 component_idx=-1", + "[38] DECL_TAG 'tag2' type_id=33 component_idx=1"); cleanup: btf__free(btf1); diff --git a/tools/testing/selftests/bpf/progs/tag.c b/tools/testing/selftests/bpf/progs/tag.c index b46b1bfac7da..672d19e7b120 100644 --- a/tools/testing/selftests/bpf/progs/tag.c +++ b/tools/testing/selftests/bpf/progs/tag.c @@ -8,9 +8,9 @@ #define __has_attribute(x) 0 #endif -#if __has_attribute(btf_tag) -#define __tag1 __attribute__((btf_tag("tag1"))) -#define __tag2 __attribute__((btf_tag("tag2"))) +#if __has_attribute(btf_decl_tag) +#define __tag1 __attribute__((btf_decl_tag("tag1"))) +#define __tag2 __attribute__((btf_decl_tag("tag2"))) volatile const bool skip_tests __tag1 __tag2 = false; #else #define __tag1 diff --git a/tools/testing/selftests/bpf/test_btf.h b/tools/testing/selftests/bpf/test_btf.h index 0619e06d745e..32c7a57867da 100644 --- a/tools/testing/selftests/bpf/test_btf.h +++ b/tools/testing/selftests/bpf/test_btf.h @@ -69,7 +69,7 @@ #define BTF_TYPE_FLOAT_ENC(name, sz) \ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 0), sz) -#define BTF_TAG_ENC(value, type, component_idx) \ - BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_TAG, 0, 0), type), (component_idx) +#define BTF_DECL_TAG_ENC(value, type, component_idx) \ + BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), type), (component_idx) #endif /* _TEST_BTF_H */ -- cgit v1.2.3 From b8f49dce799f0c177ee6a56b3fd67e7fdc6e68c2 Mon Sep 17 00:00:00 2001 From: Wan Jiabing Date: Tue, 12 Oct 2021 10:32:30 +0800 Subject: selftests/bpf: Remove duplicated include in cgroup_helpers Fix following checkincludes.pl warning: ./scripts/checkincludes.pl tools/testing/selftests/bpf/cgroup_helpers.c tools/testing/selftests/bpf/cgroup_helpers.c: unistd.h is included more than once. Signed-off-by: Wan Jiabing Signed-off-by: Andrii Nakryiko Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20211012023231.19911-1-wanjiabing@vivo.com --- tools/testing/selftests/bpf/cgroup_helpers.c | 1 - 1 file changed, 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c index 8fcd44841bb2..9d59c3990ca8 100644 --- a/tools/testing/selftests/bpf/cgroup_helpers.c +++ b/tools/testing/selftests/bpf/cgroup_helpers.c @@ -11,7 +11,6 @@ #include #include #include -#include #include "cgroup_helpers.h" -- cgit v1.2.3 From b16d12f3900283e00aded9131ba1e9b2880513c3 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Wed, 13 Oct 2021 18:08:59 +0200 Subject: selftests/bpf: Use cpu_number only on arches that have it cpu_number exists only on Intel and aarch64, so skip the test involing it on other arches. An alternative would be to replace it with an exported non-ifdefed primitive-typed percpu variable from the common code, but there appears to be none. Signed-off-by: Ilya Leoshkevich Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211013160902.428340-2-iii@linux.ibm.com --- tools/testing/selftests/bpf/prog_tests/btf_dump.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c index 87f9df653e4e..12f457b6786d 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c @@ -778,8 +778,10 @@ static void test_btf_dump_struct_data(struct btf *btf, struct btf_dump *d, static void test_btf_dump_var_data(struct btf *btf, struct btf_dump *d, char *str) { +#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) TEST_BTF_DUMP_VAR(btf, d, NULL, str, "cpu_number", int, BTF_F_COMPACT, "int cpu_number = (int)100", 100); +#endif TEST_BTF_DUMP_VAR(btf, d, NULL, str, "cpu_profile_flip", int, BTF_F_COMPACT, "static int cpu_profile_flip = (int)2", 2); } -- cgit v1.2.3 From 7960d02dddccc0676d4a1b58e9964718b47388e1 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Fri, 15 Oct 2021 09:33:18 +0000 Subject: selftests/bpf: Some more atomic tests Some new verifier tests that hit some important gaps in the parameter space for atomic ops. There are already exhaustive tests for the JIT part in lib/test_bpf.c, but these exercise the verifier too. Signed-off-by: Brendan Jackman Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211015093318.1273686-1-jackmanb@google.com --- .../selftests/bpf/verifier/atomic_cmpxchg.c | 38 +++++++++++++++ .../testing/selftests/bpf/verifier/atomic_fetch.c | 57 ++++++++++++++++++++++ .../selftests/bpf/verifier/atomic_invalid.c | 25 ++++++++++ 3 files changed, 120 insertions(+) create mode 100644 tools/testing/selftests/bpf/verifier/atomic_fetch.c create mode 100644 tools/testing/selftests/bpf/verifier/atomic_invalid.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c b/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c index 6e52dfc64415..c22dc83a41fd 100644 --- a/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c +++ b/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c @@ -119,3 +119,41 @@ }, .result = ACCEPT, }, +{ + "Dest pointer in r0 - fail", + .insns = { + /* val = 0; */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + /* r0 = &val */ + BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), + /* r0 = atomic_cmpxchg(&val, r0, 1); */ + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8), + /* if (r0 != 0) exit(1); */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + /* exit(0); */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, +{ + "Dest pointer in r0 - succeed", + .insns = { + /* r0 = &val */ + BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), + /* val = r0; */ + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + /* r0 = atomic_cmpxchg(&val, r0, 0); */ + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8), + /* r1 = *r0 */ + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), + /* exit(0); */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, diff --git a/tools/testing/selftests/bpf/verifier/atomic_fetch.c b/tools/testing/selftests/bpf/verifier/atomic_fetch.c new file mode 100644 index 000000000000..3bc9ff7a860b --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/atomic_fetch.c @@ -0,0 +1,57 @@ +#define __ATOMIC_FETCH_OP_TEST(src_reg, dst_reg, operand1, op, operand2, expect) \ + { \ + "atomic fetch " #op ", src=" #dst_reg " dst=" #dst_reg, \ + .insns = { \ + /* u64 val = operan1; */ \ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, operand1), \ + /* u64 old = atomic_fetch_add(&val, operand2); */ \ + BPF_MOV64_REG(dst_reg, BPF_REG_10), \ + BPF_MOV64_IMM(src_reg, operand2), \ + BPF_ATOMIC_OP(BPF_DW, op, \ + dst_reg, src_reg, -8), \ + /* if (old != operand1) exit(1); */ \ + BPF_JMP_IMM(BPF_JEQ, src_reg, operand1, 2), \ + BPF_MOV64_IMM(BPF_REG_0, 1), \ + BPF_EXIT_INSN(), \ + /* if (val != result) exit (2); */ \ + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -8), \ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, expect, 2), \ + BPF_MOV64_IMM(BPF_REG_0, 2), \ + BPF_EXIT_INSN(), \ + /* exit(0); */ \ + BPF_MOV64_IMM(BPF_REG_0, 0), \ + BPF_EXIT_INSN(), \ + }, \ + .result = ACCEPT, \ + } +__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_2, 1, BPF_ADD | BPF_FETCH, 2, 3), +__ATOMIC_FETCH_OP_TEST(BPF_REG_0, BPF_REG_1, 1, BPF_ADD | BPF_FETCH, 2, 3), +__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_0, 1, BPF_ADD | BPF_FETCH, 2, 3), +__ATOMIC_FETCH_OP_TEST(BPF_REG_2, BPF_REG_3, 1, BPF_ADD | BPF_FETCH, 2, 3), +__ATOMIC_FETCH_OP_TEST(BPF_REG_4, BPF_REG_5, 1, BPF_ADD | BPF_FETCH, 2, 3), +__ATOMIC_FETCH_OP_TEST(BPF_REG_9, BPF_REG_8, 1, BPF_ADD | BPF_FETCH, 2, 3), +__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_2, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010), +__ATOMIC_FETCH_OP_TEST(BPF_REG_0, BPF_REG_1, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010), +__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_0, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010), +__ATOMIC_FETCH_OP_TEST(BPF_REG_2, BPF_REG_3, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010), +__ATOMIC_FETCH_OP_TEST(BPF_REG_4, BPF_REG_5, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010), +__ATOMIC_FETCH_OP_TEST(BPF_REG_9, BPF_REG_8, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010), +__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_2, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011), +__ATOMIC_FETCH_OP_TEST(BPF_REG_0, BPF_REG_1, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011), +__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_0, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011), +__ATOMIC_FETCH_OP_TEST(BPF_REG_2, BPF_REG_3, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011), +__ATOMIC_FETCH_OP_TEST(BPF_REG_4, BPF_REG_5, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011), +__ATOMIC_FETCH_OP_TEST(BPF_REG_9, BPF_REG_8, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011), +__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_2, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001), +__ATOMIC_FETCH_OP_TEST(BPF_REG_0, BPF_REG_1, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001), +__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_0, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001), +__ATOMIC_FETCH_OP_TEST(BPF_REG_2, BPF_REG_3, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001), +__ATOMIC_FETCH_OP_TEST(BPF_REG_4, BPF_REG_5, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001), +__ATOMIC_FETCH_OP_TEST(BPF_REG_9, BPF_REG_8, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001), +__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_2, 0x010, BPF_XCHG, 0x011, 0x011), +__ATOMIC_FETCH_OP_TEST(BPF_REG_0, BPF_REG_1, 0x010, BPF_XCHG, 0x011, 0x011), +__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_0, 0x010, BPF_XCHG, 0x011, 0x011), +__ATOMIC_FETCH_OP_TEST(BPF_REG_2, BPF_REG_3, 0x010, BPF_XCHG, 0x011, 0x011), +__ATOMIC_FETCH_OP_TEST(BPF_REG_4, BPF_REG_5, 0x010, BPF_XCHG, 0x011, 0x011), +__ATOMIC_FETCH_OP_TEST(BPF_REG_9, BPF_REG_8, 0x010, BPF_XCHG, 0x011, 0x011), +#undef __ATOMIC_FETCH_OP_TEST diff --git a/tools/testing/selftests/bpf/verifier/atomic_invalid.c b/tools/testing/selftests/bpf/verifier/atomic_invalid.c new file mode 100644 index 000000000000..39272720b2f6 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/atomic_invalid.c @@ -0,0 +1,25 @@ +#define __INVALID_ATOMIC_ACCESS_TEST(op) \ + { \ + "atomic " #op " access through non-pointer ", \ + .insns = { \ + BPF_MOV64_IMM(BPF_REG_0, 1), \ + BPF_MOV64_IMM(BPF_REG_1, 0), \ + BPF_ATOMIC_OP(BPF_DW, op, BPF_REG_1, BPF_REG_0, -8), \ + BPF_MOV64_IMM(BPF_REG_0, 0), \ + BPF_EXIT_INSN(), \ + }, \ + .result = REJECT, \ + .errstr = "R1 invalid mem access 'inv'" \ + } +__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD), +__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD | BPF_FETCH), +__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD), +__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD | BPF_FETCH), +__INVALID_ATOMIC_ACCESS_TEST(BPF_AND), +__INVALID_ATOMIC_ACCESS_TEST(BPF_AND | BPF_FETCH), +__INVALID_ATOMIC_ACCESS_TEST(BPF_OR), +__INVALID_ATOMIC_ACCESS_TEST(BPF_OR | BPF_FETCH), +__INVALID_ATOMIC_ACCESS_TEST(BPF_XOR), +__INVALID_ATOMIC_ACCESS_TEST(BPF_XOR | BPF_FETCH), +__INVALID_ATOMIC_ACCESS_TEST(BPF_XCHG), +__INVALID_ATOMIC_ACCESS_TEST(BPF_CMPXCHG), -- cgit v1.2.3 From b6c4e71516099be676ebd897ea440ce2dddca6d1 Mon Sep 17 00:00:00 2001 From: Hengqi Chen Date: Thu, 21 Oct 2021 21:47:52 +0800 Subject: selftests/bpf: Test bpf_skc_to_unix_sock() helper Add a new test which triggers unix_listen kernel function to test bpf_skc_to_unix_sock helper. Signed-off-by: Hengqi Chen Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211021134752.1223426-3-hengqi.chen@gmail.com --- .../selftests/bpf/prog_tests/skc_to_unix_sock.c | 54 ++++++++++++++++++++++ .../selftests/bpf/progs/test_skc_to_unix_sock.c | 40 ++++++++++++++++ 2 files changed, 94 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/skc_to_unix_sock.c create mode 100644 tools/testing/selftests/bpf/progs/test_skc_to_unix_sock.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/skc_to_unix_sock.c b/tools/testing/selftests/bpf/prog_tests/skc_to_unix_sock.c new file mode 100644 index 000000000000..3eefdfed1db9 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/skc_to_unix_sock.c @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Hengqi Chen */ + +#include +#include +#include "test_skc_to_unix_sock.skel.h" + +static const char *sock_path = "@skc_to_unix_sock"; + +void test_skc_to_unix_sock(void) +{ + struct test_skc_to_unix_sock *skel; + struct sockaddr_un sockaddr; + int err, sockfd = 0; + + skel = test_skc_to_unix_sock__open(); + if (!ASSERT_OK_PTR(skel, "could not open BPF object")) + return; + + skel->rodata->my_pid = getpid(); + + err = test_skc_to_unix_sock__load(skel); + if (!ASSERT_OK(err, "could not load BPF object")) + goto cleanup; + + err = test_skc_to_unix_sock__attach(skel); + if (!ASSERT_OK(err, "could not attach BPF object")) + goto cleanup; + + /* trigger unix_listen */ + sockfd = socket(AF_UNIX, SOCK_STREAM, 0); + if (!ASSERT_GT(sockfd, 0, "socket failed")) + goto cleanup; + + memset(&sockaddr, 0, sizeof(sockaddr)); + sockaddr.sun_family = AF_UNIX; + strncpy(sockaddr.sun_path, sock_path, strlen(sock_path)); + sockaddr.sun_path[0] = '\0'; + + err = bind(sockfd, (struct sockaddr *)&sockaddr, sizeof(sockaddr)); + if (!ASSERT_OK(err, "bind failed")) + goto cleanup; + + err = listen(sockfd, 1); + if (!ASSERT_OK(err, "listen failed")) + goto cleanup; + + ASSERT_EQ(strcmp(skel->bss->path, sock_path), 0, "bpf_skc_to_unix_sock failed"); + +cleanup: + if (sockfd) + close(sockfd); + test_skc_to_unix_sock__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/test_skc_to_unix_sock.c b/tools/testing/selftests/bpf/progs/test_skc_to_unix_sock.c new file mode 100644 index 000000000000..a408ec95cba4 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_skc_to_unix_sock.c @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Hengqi Chen */ + +#include "vmlinux.h" +#include +#include +#include "bpf_tracing_net.h" + +const volatile pid_t my_pid = 0; +char path[256] = {}; + +SEC("fentry/unix_listen") +int BPF_PROG(unix_listen, struct socket *sock, int backlog) +{ + pid_t pid = bpf_get_current_pid_tgid() >> 32; + struct unix_sock *unix_sk; + int i, len; + + if (pid != my_pid) + return 0; + + unix_sk = (struct unix_sock *)bpf_skc_to_unix_sock(sock->sk); + if (!unix_sk) + return 0; + + if (!UNIX_ABSTRACT(unix_sk)) + return 0; + + len = unix_sk->addr->len - sizeof(short); + path[0] = '@'; + for (i = 1; i < len; i++) { + if (i >= sizeof(struct sockaddr_un)) + break; + + path[i] = unix_sk->addr->name->sun_path[i]; + } + return 0; +} + +char _license[] SEC("license") = "GPL"; -- cgit v1.2.3 From e1b9023fc7ab0dcfeefe9f0a14772ad7a1c37b5b Mon Sep 17 00:00:00 2001 From: Dave Marchevsky Date: Wed, 20 Oct 2021 00:48:18 -0700 Subject: selftests/bpf: Add verif_stats test verified_insns field was added to response of bpf_obj_get_info_by_fd call on a prog. Confirm that it's being populated by loading a simple program and asking for its info. Signed-off-by: Dave Marchevsky Signed-off-by: Andrii Nakryiko Acked-by: John Fastabend Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211020074818.1017682-3-davemarchevsky@fb.com --- .../testing/selftests/bpf/prog_tests/verif_stats.c | 28 ++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/verif_stats.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verif_stats.c b/tools/testing/selftests/bpf/prog_tests/verif_stats.c new file mode 100644 index 000000000000..b4bae1340cf1 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/verif_stats.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include + +#include "trace_vprintk.lskel.h" + +void test_verif_stats(void) +{ + __u32 len = sizeof(struct bpf_prog_info); + struct bpf_prog_info info = {}; + struct trace_vprintk *skel; + int err; + + skel = trace_vprintk__open_and_load(); + if (!ASSERT_OK_PTR(skel, "trace_vprintk__open_and_load")) + goto cleanup; + + err = bpf_obj_get_info_by_fd(skel->progs.sys_enter.prog_fd, &info, &len); + if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd")) + goto cleanup; + + if (!ASSERT_GT(info.verified_insns, 0, "verified_insns")) + goto cleanup; + +cleanup: + trace_vprintk__destroy(skel); +} -- cgit v1.2.3 From d4121376ac7a9c81a696d7558789b2f29ef3574e Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Thu, 21 Oct 2021 13:41:30 +0200 Subject: selftests/bpf: Fix perf_buffer test on system with offline cpus The perf_buffer fails on system with offline cpus: # test_progs -t perf_buffer test_perf_buffer:PASS:nr_cpus 0 nsec test_perf_buffer:PASS:nr_on_cpus 0 nsec test_perf_buffer:PASS:skel_load 0 nsec test_perf_buffer:PASS:attach_kprobe 0 nsec test_perf_buffer:PASS:perf_buf__new 0 nsec test_perf_buffer:PASS:epoll_fd 0 nsec skipping offline CPU #24 skipping offline CPU #25 skipping offline CPU #26 skipping offline CPU #27 skipping offline CPU #28 skipping offline CPU #29 skipping offline CPU #30 skipping offline CPU #31 test_perf_buffer:PASS:perf_buffer__poll 0 nsec test_perf_buffer:PASS:seen_cpu_cnt 0 nsec test_perf_buffer:FAIL:buf_cnt got 24, expected 32 Summary: 0/0 PASSED, 0 SKIPPED, 1 FAILED Changing the test to check online cpus instead of possible. Signed-off-by: Jiri Olsa Signed-off-by: Andrii Nakryiko Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20211021114132.8196-2-jolsa@kernel.org --- tools/testing/selftests/bpf/prog_tests/perf_buffer.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c index 6979aff4aab2..877600392851 100644 --- a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c +++ b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c @@ -107,8 +107,8 @@ void serial_test_perf_buffer(void) "expect %d, seen %d\n", nr_on_cpus, CPU_COUNT(&cpu_seen))) goto out_free_pb; - if (CHECK(perf_buffer__buffer_cnt(pb) != nr_cpus, "buf_cnt", - "got %zu, expected %d\n", perf_buffer__buffer_cnt(pb), nr_cpus)) + if (CHECK(perf_buffer__buffer_cnt(pb) != nr_on_cpus, "buf_cnt", + "got %zu, expected %d\n", perf_buffer__buffer_cnt(pb), nr_on_cpus)) goto out_close; for (i = 0; i < nr_cpus; i++) { -- cgit v1.2.3 From aa274f98b269b2babf37b6308f8a1a009ca4fc41 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Thu, 21 Oct 2021 13:41:31 +0200 Subject: selftests/bpf: Fix possible/online index mismatch in perf_buffer test The perf_buffer fails on system with offline cpus: # test_progs -t perf_buffer serial_test_perf_buffer:PASS:nr_cpus 0 nsec serial_test_perf_buffer:PASS:nr_on_cpus 0 nsec serial_test_perf_buffer:PASS:skel_load 0 nsec serial_test_perf_buffer:PASS:attach_kprobe 0 nsec serial_test_perf_buffer:PASS:perf_buf__new 0 nsec serial_test_perf_buffer:PASS:epoll_fd 0 nsec skipping offline CPU #4 serial_test_perf_buffer:PASS:perf_buffer__poll 0 nsec serial_test_perf_buffer:PASS:seen_cpu_cnt 0 nsec serial_test_perf_buffer:PASS:buf_cnt 0 nsec ... serial_test_perf_buffer:PASS:fd_check 0 nsec serial_test_perf_buffer:PASS:drain_buf 0 nsec serial_test_perf_buffer:PASS:consume_buf 0 nsec serial_test_perf_buffer:FAIL:cpu_seen cpu 5 not seen #88 perf_buffer:FAIL Summary: 0/0 PASSED, 0 SKIPPED, 1 FAILED If the offline cpu is from the middle of the possible set, we get mismatch with possible and online cpu buffers. The perf buffer test calls perf_buffer__consume_buffer for all 'possible' cpus, but the library holds only 'online' cpu buffers and perf_buffer__consume_buffer returns them based on index. Adding extra (online) index to keep track of online buffers, we need the original (possible) index to trigger trace on proper cpu. Signed-off-by: Jiri Olsa Signed-off-by: Andrii Nakryiko Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20211021114132.8196-3-jolsa@kernel.org --- tools/testing/selftests/bpf/prog_tests/perf_buffer.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c index 877600392851..0b0cd045979b 100644 --- a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c +++ b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c @@ -45,7 +45,7 @@ int trigger_on_cpu(int cpu) void serial_test_perf_buffer(void) { - int err, on_len, nr_on_cpus = 0, nr_cpus, i; + int err, on_len, nr_on_cpus = 0, nr_cpus, i, j; struct perf_buffer_opts pb_opts = {}; struct test_perf_buffer *skel; cpu_set_t cpu_seen; @@ -111,15 +111,15 @@ void serial_test_perf_buffer(void) "got %zu, expected %d\n", perf_buffer__buffer_cnt(pb), nr_on_cpus)) goto out_close; - for (i = 0; i < nr_cpus; i++) { + for (i = 0, j = 0; i < nr_cpus; i++) { if (i >= on_len || !online[i]) continue; - fd = perf_buffer__buffer_fd(pb, i); + fd = perf_buffer__buffer_fd(pb, j); CHECK(fd < 0 || last_fd == fd, "fd_check", "last fd %d == fd %d\n", last_fd, fd); last_fd = fd; - err = perf_buffer__consume_buffer(pb, i); + err = perf_buffer__consume_buffer(pb, j); if (CHECK(err, "drain_buf", "cpu %d, err %d\n", i, err)) goto out_close; @@ -127,12 +127,13 @@ void serial_test_perf_buffer(void) if (trigger_on_cpu(i)) goto out_close; - err = perf_buffer__consume_buffer(pb, i); - if (CHECK(err, "consume_buf", "cpu %d, err %d\n", i, err)) + err = perf_buffer__consume_buffer(pb, j); + if (CHECK(err, "consume_buf", "cpu %d, err %d\n", j, err)) goto out_close; if (CHECK(!CPU_ISSET(i, &cpu_seen), "cpu_seen", "cpu %d not seen\n", i)) goto out_close; + j++; } out_free_pb: -- cgit v1.2.3 From 99d099757ab487e0d317c69541b47aaae0b6c431 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Thu, 21 Oct 2021 13:41:32 +0200 Subject: selftests/bpf: Use nanosleep tracepoint in perf buffer test The perf buffer tests triggers trace with nanosleep syscall, but monitors all syscalls, which results in lot of data in the buffer and makes it harder to debug. Let's lower the trace traffic and monitor just nanosleep syscall. Signed-off-by: Jiri Olsa Signed-off-by: Andrii Nakryiko Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20211021114132.8196-4-jolsa@kernel.org --- tools/testing/selftests/bpf/progs/test_perf_buffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/test_perf_buffer.c b/tools/testing/selftests/bpf/progs/test_perf_buffer.c index d37ce29fd393..a08874c5bdf2 100644 --- a/tools/testing/selftests/bpf/progs/test_perf_buffer.c +++ b/tools/testing/selftests/bpf/progs/test_perf_buffer.c @@ -12,7 +12,7 @@ struct { __type(value, int); } perf_buf_map SEC(".maps"); -SEC("tp/raw_syscalls/sys_enter") +SEC("tp/syscalls/sys_enter_nanosleep") int handle_sys_enter(void *ctx) { int cpu = bpf_get_smp_processor_id(); -- cgit v1.2.3 From 30c5bd96476ced0d3e08372be5186ff7f421d10c Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 20 Oct 2021 18:44:02 -0700 Subject: selftests/bpf: Demonstrate use of custom .rodata/.data sections Enhance existing selftests to demonstrate the use of custom .data/.rodata sections. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20211021014404.2635234-9-andrii@kernel.org --- tools/testing/selftests/bpf/prog_tests/skeleton.c | 29 +++++++++++++++++++++++ tools/testing/selftests/bpf/progs/test_skeleton.c | 18 ++++++++++++++ 2 files changed, 47 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/skeleton.c b/tools/testing/selftests/bpf/prog_tests/skeleton.c index fe1e204a65c6..180afd632f4c 100644 --- a/tools/testing/selftests/bpf/prog_tests/skeleton.c +++ b/tools/testing/selftests/bpf/prog_tests/skeleton.c @@ -16,10 +16,13 @@ void test_skeleton(void) struct test_skeleton* skel; struct test_skeleton__bss *bss; struct test_skeleton__data *data; + struct test_skeleton__data_dyn *data_dyn; struct test_skeleton__rodata *rodata; + struct test_skeleton__rodata_dyn *rodata_dyn; struct test_skeleton__kconfig *kcfg; const void *elf_bytes; size_t elf_bytes_sz = 0; + int i; skel = test_skeleton__open(); if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) @@ -30,7 +33,12 @@ void test_skeleton(void) bss = skel->bss; data = skel->data; + data_dyn = skel->data_dyn; rodata = skel->rodata; + rodata_dyn = skel->rodata_dyn; + + ASSERT_STREQ(bpf_map__name(skel->maps.rodata_dyn), ".rodata.dyn", "rodata_dyn_name"); + ASSERT_STREQ(bpf_map__name(skel->maps.data_dyn), ".data.dyn", "data_dyn_name"); /* validate values are pre-initialized correctly */ CHECK(data->in1 != -1, "in1", "got %d != exp %d\n", data->in1, -1); @@ -46,6 +54,12 @@ void test_skeleton(void) CHECK(rodata->in.in6 != 0, "in6", "got %d != exp %d\n", rodata->in.in6, 0); CHECK(bss->out6 != 0, "out6", "got %d != exp %d\n", bss->out6, 0); + ASSERT_EQ(rodata_dyn->in_dynarr_sz, 0, "in_dynarr_sz"); + for (i = 0; i < 4; i++) + ASSERT_EQ(rodata_dyn->in_dynarr[i], -(i + 1), "in_dynarr"); + for (i = 0; i < 4; i++) + ASSERT_EQ(data_dyn->out_dynarr[i], i + 1, "out_dynarr"); + /* validate we can pre-setup global variables, even in .bss */ data->in1 = 10; data->in2 = 11; @@ -53,6 +67,10 @@ void test_skeleton(void) bss->in4 = 13; rodata->in.in6 = 14; + rodata_dyn->in_dynarr_sz = 4; + for (i = 0; i < 4; i++) + rodata_dyn->in_dynarr[i] = i + 10; + err = test_skeleton__load(skel); if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err)) goto cleanup; @@ -64,6 +82,10 @@ void test_skeleton(void) CHECK(bss->in4 != 13, "in4", "got %lld != exp %lld\n", bss->in4, 13LL); CHECK(rodata->in.in6 != 14, "in6", "got %d != exp %d\n", rodata->in.in6, 14); + ASSERT_EQ(rodata_dyn->in_dynarr_sz, 4, "in_dynarr_sz"); + for (i = 0; i < 4; i++) + ASSERT_EQ(rodata_dyn->in_dynarr[i], i + 10, "in_dynarr"); + /* now set new values and attach to get them into outX variables */ data->in1 = 1; data->in2 = 2; @@ -73,6 +95,8 @@ void test_skeleton(void) bss->in5.b = 6; kcfg = skel->kconfig; + skel->data_read_mostly->read_mostly_var = 123; + err = test_skeleton__attach(skel); if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) goto cleanup; @@ -93,6 +117,11 @@ void test_skeleton(void) CHECK(bss->kern_ver != kcfg->LINUX_KERNEL_VERSION, "ext2", "got %d != exp %d\n", bss->kern_ver, kcfg->LINUX_KERNEL_VERSION); + for (i = 0; i < 4; i++) + ASSERT_EQ(data_dyn->out_dynarr[i], i + 10, "out_dynarr"); + + ASSERT_EQ(skel->bss->out_mostly_var, 123, "out_mostly_var"); + elf_bytes = test_skeleton__elf_bytes(&elf_bytes_sz); ASSERT_OK_PTR(elf_bytes, "elf_bytes"); ASSERT_GE(elf_bytes_sz, 0, "elf_bytes_sz"); diff --git a/tools/testing/selftests/bpf/progs/test_skeleton.c b/tools/testing/selftests/bpf/progs/test_skeleton.c index 441fa1c552c8..1b1187d2967b 100644 --- a/tools/testing/selftests/bpf/progs/test_skeleton.c +++ b/tools/testing/selftests/bpf/progs/test_skeleton.c @@ -5,6 +5,8 @@ #include #include +#define __read_mostly SEC(".data.read_mostly") + struct s { int a; long long b; @@ -40,9 +42,20 @@ int kern_ver = 0; struct s out5 = {}; + +const volatile int in_dynarr_sz SEC(".rodata.dyn"); +const volatile int in_dynarr[4] SEC(".rodata.dyn") = { -1, -2, -3, -4 }; + +int out_dynarr[4] SEC(".data.dyn") = { 1, 2, 3, 4 }; + +int read_mostly_var __read_mostly; +int out_mostly_var; + SEC("raw_tp/sys_enter") int handler(const void *ctx) { + int i; + out1 = in1; out2 = in2; out3 = in3; @@ -53,6 +66,11 @@ int handler(const void *ctx) bpf_syscall = CONFIG_BPF_SYSCALL; kern_ver = LINUX_KERNEL_VERSION; + for (i = 0; i < in_dynarr_sz; i++) + out_dynarr[i] = in_dynarr[i]; + + out_mostly_var = read_mostly_var; + return 0; } -- cgit v1.2.3 From 4f2511e1990985103929ab799fb3ebca81969b77 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 20 Oct 2021 18:44:04 -0700 Subject: selftests/bpf: Switch to ".bss"/".rodata"/".data" lookups for internal maps Utilize libbpf's feature of allowing to lookup internal maps by their ELF section names. No need to guess or calculate the exact truncated prefix taken from the object name. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20211021014404.2635234-11-andrii@kernel.org --- tools/testing/selftests/bpf/prog_tests/core_autosize.c | 2 +- tools/testing/selftests/bpf/prog_tests/core_reloc.c | 2 +- tools/testing/selftests/bpf/prog_tests/global_data.c | 11 +++++++++-- tools/testing/selftests/bpf/prog_tests/global_data_init.c | 2 +- tools/testing/selftests/bpf/prog_tests/kfree_skb.c | 2 +- tools/testing/selftests/bpf/prog_tests/rdonly_maps.c | 2 +- 6 files changed, 14 insertions(+), 7 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/core_autosize.c b/tools/testing/selftests/bpf/prog_tests/core_autosize.c index 3d4b2a358d47..2a0dac6394ef 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_autosize.c +++ b/tools/testing/selftests/bpf/prog_tests/core_autosize.c @@ -163,7 +163,7 @@ void test_core_autosize(void) usleep(1); - bss_map = bpf_object__find_map_by_name(skel->obj, "test_cor.bss"); + bss_map = bpf_object__find_map_by_name(skel->obj, ".bss"); if (!ASSERT_OK_PTR(bss_map, "bss_map_find")) goto cleanup; diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index 763302e63a29..cc50f8feeca3 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -867,7 +867,7 @@ void test_core_reloc(void) goto cleanup; } - data_map = bpf_object__find_map_by_name(obj, "test_cor.bss"); + data_map = bpf_object__find_map_by_name(obj, ".bss"); if (CHECK(!data_map, "find_data_map", "data map not found\n")) goto cleanup; diff --git a/tools/testing/selftests/bpf/prog_tests/global_data.c b/tools/testing/selftests/bpf/prog_tests/global_data.c index 9efa7e50eab2..afd8639f9a94 100644 --- a/tools/testing/selftests/bpf/prog_tests/global_data.c +++ b/tools/testing/selftests/bpf/prog_tests/global_data.c @@ -103,11 +103,18 @@ static void test_global_data_struct(struct bpf_object *obj, __u32 duration) static void test_global_data_rdonly(struct bpf_object *obj, __u32 duration) { int err = -ENOMEM, map_fd, zero = 0; - struct bpf_map *map; + struct bpf_map *map, *map2; __u8 *buff; map = bpf_object__find_map_by_name(obj, "test_glo.rodata"); - if (CHECK_FAIL(!map || !bpf_map__is_internal(map))) + if (!ASSERT_OK_PTR(map, "map")) + return; + if (!ASSERT_TRUE(bpf_map__is_internal(map), "is_internal")) + return; + + /* ensure we can lookup internal maps by their ELF names */ + map2 = bpf_object__find_map_by_name(obj, ".rodata"); + if (!ASSERT_EQ(map, map2, "same_maps")) return; map_fd = bpf_map__fd(map); diff --git a/tools/testing/selftests/bpf/prog_tests/global_data_init.c b/tools/testing/selftests/bpf/prog_tests/global_data_init.c index ee46b11f1f9a..1db86eab101b 100644 --- a/tools/testing/selftests/bpf/prog_tests/global_data_init.c +++ b/tools/testing/selftests/bpf/prog_tests/global_data_init.c @@ -16,7 +16,7 @@ void test_global_data_init(void) if (CHECK_FAIL(err)) return; - map = bpf_object__find_map_by_name(obj, "test_glo.rodata"); + map = bpf_object__find_map_by_name(obj, ".rodata"); if (CHECK_FAIL(!map || !bpf_map__is_internal(map))) goto out; diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c index 032a322d51f2..01e51d16c8b8 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c +++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c @@ -93,7 +93,7 @@ void serial_test_kfree_skb(void) if (CHECK(!fexit, "find_prog", "prog eth_type_trans not found\n")) goto close_prog; - global_data = bpf_object__find_map_by_name(obj2, "kfree_sk.bss"); + global_data = bpf_object__find_map_by_name(obj2, ".bss"); if (CHECK(!global_data, "find global data", "not found\n")) goto close_prog; diff --git a/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c b/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c index 5f9eaa3ab584..fd5d2ddfb062 100644 --- a/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c +++ b/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c @@ -37,7 +37,7 @@ void test_rdonly_maps(void) if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno)) goto cleanup; - bss_map = bpf_object__find_map_by_name(obj, "test_rdo.bss"); + bss_map = bpf_object__find_map_by_name(obj, ".bss"); if (CHECK(!bss_map, "find_bss_map", "failed\n")) goto cleanup; -- cgit v1.2.3 From 57385ae31ff0ffa6e9c9ae39206740efdc7f5972 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 22 Oct 2021 13:13:42 -0700 Subject: selftests/bpf: Make perf_buffer selftests work on 4.9 kernel again Recent change to use tp/syscalls/sys_enter_nanosleep for perf_buffer selftests causes this selftest to fail on 4.9 kernel in libbpf CI ([0]): libbpf: prog 'handle_sys_enter': failed to attach to perf_event FD 6: Invalid argument libbpf: prog 'handle_sys_enter': failed to attach to tracepoint 'syscalls/sys_enter_nanosleep': Invalid argument It's not exactly clear why, because perf_event itself is created for this tracepoint, but I can't even compile 4.9 kernel locally, so it's hard to figure this out. If anyone has better luck and would like to help investigating this, I'd really appreciate this. For now, unblock CI by switching back to raw_syscalls/sys_enter, but reduce amount of unnecessary samples emitted by filter by process ID. Use explicit ARRAY map for that to make it work on 4.9 as well, because global data isn't yet supported there. Fixes: aa274f98b269 ("selftests/bpf: Fix possible/online index mismatch in perf_buffer test") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211022201342.3490692-1-andrii@kernel.org --- tools/testing/selftests/bpf/prog_tests/perf_buffer.c | 5 +++++ tools/testing/selftests/bpf/progs/test_perf_buffer.c | 20 ++++++++++++++++++-- 2 files changed, 23 insertions(+), 2 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c index 0b0cd045979b..4e32f3586a75 100644 --- a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c +++ b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c @@ -46,6 +46,7 @@ int trigger_on_cpu(int cpu) void serial_test_perf_buffer(void) { int err, on_len, nr_on_cpus = 0, nr_cpus, i, j; + int zero = 0, my_pid = getpid(); struct perf_buffer_opts pb_opts = {}; struct test_perf_buffer *skel; cpu_set_t cpu_seen; @@ -71,6 +72,10 @@ void serial_test_perf_buffer(void) if (CHECK(!skel, "skel_load", "skeleton open/load failed\n")) goto out_close; + err = bpf_map_update_elem(bpf_map__fd(skel->maps.my_pid_map), &zero, &my_pid, 0); + if (!ASSERT_OK(err, "my_pid_update")) + goto out_close; + /* attach probe */ err = test_perf_buffer__attach(skel); if (CHECK(err, "attach_kprobe", "err %d\n", err)) diff --git a/tools/testing/selftests/bpf/progs/test_perf_buffer.c b/tools/testing/selftests/bpf/progs/test_perf_buffer.c index a08874c5bdf2..17d5b67744d5 100644 --- a/tools/testing/selftests/bpf/progs/test_perf_buffer.c +++ b/tools/testing/selftests/bpf/progs/test_perf_buffer.c @@ -6,20 +6,36 @@ #include #include +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, int); + __uint(max_entries, 1); +} my_pid_map SEC(".maps"); + struct { __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); __type(key, int); __type(value, int); } perf_buf_map SEC(".maps"); -SEC("tp/syscalls/sys_enter_nanosleep") +SEC("tp/raw_syscalls/sys_enter") int handle_sys_enter(void *ctx) { + int zero = 0, *my_pid, cur_pid; int cpu = bpf_get_smp_processor_id(); + my_pid = bpf_map_lookup_elem(&my_pid_map, &zero); + if (!my_pid) + return 1; + + cur_pid = bpf_get_current_pid_tgid() >> 32; + if (cur_pid != *my_pid) + return 1; + bpf_perf_event_output(ctx, &perf_buf_map, BPF_F_CURRENT_CPU, &cpu, sizeof(cpu)); - return 0; + return 1; } char _license[] SEC("license") = "GPL"; -- cgit v1.2.3 From 487ef148cf17730442444c07a4c56f16578ec73e Mon Sep 17 00:00:00 2001 From: Hengqi Chen Date: Fri, 22 Oct 2021 21:06:23 +0800 Subject: selftests/bpf: Switch to new btf__type_cnt/btf__raw_data APIs Replace the calls to btf__get_nr_types/btf__get_raw_data in selftests with new APIs btf__type_cnt/btf__raw_data. The old APIs will be deprecated in libbpf v0.7+. Signed-off-by: Hengqi Chen Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211022130623.1548429-6-hengqi.chen@gmail.com --- tools/testing/selftests/bpf/btf_helpers.c | 4 ++-- tools/testing/selftests/bpf/prog_tests/btf.c | 10 +++++----- tools/testing/selftests/bpf/prog_tests/btf_dump.c | 8 ++++---- tools/testing/selftests/bpf/prog_tests/btf_endian.c | 12 ++++++------ tools/testing/selftests/bpf/prog_tests/btf_split.c | 2 +- tools/testing/selftests/bpf/prog_tests/core_autosize.c | 2 +- tools/testing/selftests/bpf/prog_tests/core_reloc.c | 2 +- tools/testing/selftests/bpf/prog_tests/resolve_btfids.c | 4 ++-- 8 files changed, 22 insertions(+), 22 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/btf_helpers.c b/tools/testing/selftests/bpf/btf_helpers.c index 668cfa20bb1b..b5b6b013a245 100644 --- a/tools/testing/selftests/bpf/btf_helpers.c +++ b/tools/testing/selftests/bpf/btf_helpers.c @@ -215,7 +215,7 @@ int btf_validate_raw(struct btf *btf, int nr_types, const char *exp_types[]) int i; bool ok = true; - ASSERT_EQ(btf__get_nr_types(btf), nr_types, "btf_nr_types"); + ASSERT_EQ(btf__type_cnt(btf) - 1, nr_types, "btf_nr_types"); for (i = 1; i <= nr_types; i++) { if (!ASSERT_STREQ(btf_type_raw_dump(btf, i), exp_types[i - 1], "raw_dump")) @@ -254,7 +254,7 @@ const char *btf_type_c_dump(const struct btf *btf) return NULL; } - for (i = 1; i <= btf__get_nr_types(btf); i++) { + for (i = 1; i < btf__type_cnt(btf); i++) { err = btf_dump__dump_type(d, i); if (err) { fprintf(stderr, "Failed to dump type [%d]: %d\n", i, err); diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index fa67f25bbef5..557f948f9964 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -7274,8 +7274,8 @@ static void do_test_dedup(unsigned int test_num) goto done; } - test_btf_data = btf__get_raw_data(test_btf, &test_btf_size); - expect_btf_data = btf__get_raw_data(expect_btf, &expect_btf_size); + test_btf_data = btf__raw_data(test_btf, &test_btf_size); + expect_btf_data = btf__raw_data(expect_btf, &expect_btf_size); if (CHECK(test_btf_size != expect_btf_size, "test_btf_size:%u != expect_btf_size:%u", test_btf_size, expect_btf_size)) { @@ -7329,8 +7329,8 @@ static void do_test_dedup(unsigned int test_num) expect_str_cur += expect_len + 1; } - test_nr_types = btf__get_nr_types(test_btf); - expect_nr_types = btf__get_nr_types(expect_btf); + test_nr_types = btf__type_cnt(test_btf); + expect_nr_types = btf__type_cnt(expect_btf); if (CHECK(test_nr_types != expect_nr_types, "test_nr_types:%u != expect_nr_types:%u", test_nr_types, expect_nr_types)) { @@ -7338,7 +7338,7 @@ static void do_test_dedup(unsigned int test_num) goto done; } - for (i = 1; i <= test_nr_types; i++) { + for (i = 1; i < test_nr_types; i++) { const struct btf_type *test_type, *expect_type; int test_size, expect_size; diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c index 12f457b6786d..3d837a7019fd 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c @@ -27,7 +27,7 @@ static struct btf_dump_test_case { static int btf_dump_all_types(const struct btf *btf, const struct btf_dump_opts *opts) { - size_t type_cnt = btf__get_nr_types(btf); + size_t type_cnt = btf__type_cnt(btf); struct btf_dump *d; int err = 0, id; @@ -36,7 +36,7 @@ static int btf_dump_all_types(const struct btf *btf, if (err) return err; - for (id = 1; id <= type_cnt; id++) { + for (id = 1; id < type_cnt; id++) { err = btf_dump__dump_type(d, id); if (err) goto done; @@ -171,7 +171,7 @@ void test_btf_dump_incremental(void) err = btf__add_field(btf, "x", 2, 0, 0); ASSERT_OK(err, "field_ok"); - for (i = 1; i <= btf__get_nr_types(btf); i++) { + for (i = 1; i < btf__type_cnt(btf); i++) { err = btf_dump__dump_type(d, i); ASSERT_OK(err, "dump_type_ok"); } @@ -210,7 +210,7 @@ void test_btf_dump_incremental(void) err = btf__add_field(btf, "s", 3, 32, 0); ASSERT_OK(err, "field_ok"); - for (i = 1; i <= btf__get_nr_types(btf); i++) { + for (i = 1; i < btf__type_cnt(btf); i++) { err = btf_dump__dump_type(d, i); ASSERT_OK(err, "dump_type_ok"); } diff --git a/tools/testing/selftests/bpf/prog_tests/btf_endian.c b/tools/testing/selftests/bpf/prog_tests/btf_endian.c index 8ab5d3e358dd..2653cc482df4 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_endian.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_endian.c @@ -32,7 +32,7 @@ void test_btf_endian() { ASSERT_EQ(btf__endianness(btf), swap_endian, "endian"); /* Get raw BTF data in non-native endianness... */ - raw_data = btf__get_raw_data(btf, &raw_sz); + raw_data = btf__raw_data(btf, &raw_sz); if (!ASSERT_OK_PTR(raw_data, "raw_data_inverted")) goto err_out; @@ -42,9 +42,9 @@ void test_btf_endian() { goto err_out; ASSERT_EQ(btf__endianness(swap_btf), swap_endian, "endian"); - ASSERT_EQ(btf__get_nr_types(swap_btf), btf__get_nr_types(btf), "nr_types"); + ASSERT_EQ(btf__type_cnt(swap_btf), btf__type_cnt(btf), "nr_types"); - swap_raw_data = btf__get_raw_data(swap_btf, &swap_raw_sz); + swap_raw_data = btf__raw_data(swap_btf, &swap_raw_sz); if (!ASSERT_OK_PTR(swap_raw_data, "swap_raw_data")) goto err_out; @@ -58,7 +58,7 @@ void test_btf_endian() { /* swap it back to native endianness */ btf__set_endianness(swap_btf, endian); - swap_raw_data = btf__get_raw_data(swap_btf, &swap_raw_sz); + swap_raw_data = btf__raw_data(swap_btf, &swap_raw_sz); if (!ASSERT_OK_PTR(swap_raw_data, "swap_raw_data")) goto err_out; @@ -75,7 +75,7 @@ void test_btf_endian() { swap_btf = NULL; btf__set_endianness(btf, swap_endian); - raw_data = btf__get_raw_data(btf, &raw_sz); + raw_data = btf__raw_data(btf, &raw_sz); if (!ASSERT_OK_PTR(raw_data, "raw_data_inverted")) goto err_out; @@ -85,7 +85,7 @@ void test_btf_endian() { goto err_out; ASSERT_EQ(btf__endianness(swap_btf), swap_endian, "endian"); - ASSERT_EQ(btf__get_nr_types(swap_btf), btf__get_nr_types(btf), "nr_types"); + ASSERT_EQ(btf__type_cnt(swap_btf), btf__type_cnt(btf), "nr_types"); /* the type should appear as if it was stored in native endianness */ t = btf__type_by_id(swap_btf, var_id); diff --git a/tools/testing/selftests/bpf/prog_tests/btf_split.c b/tools/testing/selftests/bpf/prog_tests/btf_split.c index ca7c2a91610a..b1ffe61f2aa9 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_split.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_split.c @@ -72,7 +72,7 @@ void test_btf_split() { d = btf_dump__new(btf2, NULL, &opts, btf_dump_printf); if (!ASSERT_OK_PTR(d, "btf_dump__new")) goto cleanup; - for (i = 1; i <= btf__get_nr_types(btf2); i++) { + for (i = 1; i < btf__type_cnt(btf2); i++) { err = btf_dump__dump_type(d, i); ASSERT_OK(err, "dump_type_ok"); } diff --git a/tools/testing/selftests/bpf/prog_tests/core_autosize.c b/tools/testing/selftests/bpf/prog_tests/core_autosize.c index 2a0dac6394ef..1dfe14ff6aa4 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_autosize.c +++ b/tools/testing/selftests/bpf/prog_tests/core_autosize.c @@ -112,7 +112,7 @@ void test_core_autosize(void) if (!ASSERT_OK_PTR(f, "btf_fdopen")) goto cleanup; - raw_data = btf__get_raw_data(btf, &raw_sz); + raw_data = btf__raw_data(btf, &raw_sz); if (!ASSERT_OK_PTR(raw_data, "raw_data")) goto cleanup; written = fwrite(raw_data, 1, raw_sz, f); diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index cc50f8feeca3..55ec85ba7375 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -381,7 +381,7 @@ static int setup_type_id_case_local(struct core_reloc_test_case *test) exp->local_anon_void_ptr = -1; exp->local_anon_arr = -1; - for (i = 1; i <= btf__get_nr_types(local_btf); i++) + for (i = 1; i < btf__type_cnt(local_btf); i++) { t = btf__type_by_id(local_btf, i); /* we are interested only in anonymous types */ diff --git a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c index f62361306f6d..badda6309fd9 100644 --- a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c +++ b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c @@ -106,9 +106,9 @@ static int resolve_symbols(void) "Failed to load BTF from btf_data.o\n")) return -1; - nr = btf__get_nr_types(btf); + nr = btf__type_cnt(btf); - for (type_id = 1; type_id <= nr; type_id++) { + for (type_id = 1; type_id < nr; type_id++) { if (__resolve_symbol(btf, type_id)) break; } -- cgit v1.2.3 From d1321207b176b0a5a8004b371ade5a0d8dec7732 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Thu, 21 Oct 2021 14:48:14 -0700 Subject: selftests/bpf: Fix flow dissector tests - update custom loader to search by name, not section name - update bpftool commands to use proper pin path Signed-off-by: Stanislav Fomichev Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211021214814.1236114-4-sdf@google.com --- tools/testing/selftests/bpf/flow_dissector_load.c | 18 +++++++++++------- tools/testing/selftests/bpf/flow_dissector_load.h | 10 ++-------- tools/testing/selftests/bpf/test_flow_dissector.sh | 10 +++++----- 3 files changed, 18 insertions(+), 20 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/flow_dissector_load.c b/tools/testing/selftests/bpf/flow_dissector_load.c index 3fd83b9dc1bf..87fd1aa323a9 100644 --- a/tools/testing/selftests/bpf/flow_dissector_load.c +++ b/tools/testing/selftests/bpf/flow_dissector_load.c @@ -17,7 +17,7 @@ const char *cfg_pin_path = "/sys/fs/bpf/flow_dissector"; const char *cfg_map_name = "jmp_table"; bool cfg_attach = true; -char *cfg_section_name; +char *cfg_prog_name; char *cfg_path_name; static void load_and_attach_program(void) @@ -25,7 +25,11 @@ static void load_and_attach_program(void) int prog_fd, ret; struct bpf_object *obj; - ret = bpf_flow_load(&obj, cfg_path_name, cfg_section_name, + ret = libbpf_set_strict_mode(LIBBPF_STRICT_ALL); + if (ret) + error(1, 0, "failed to enable libbpf strict mode: %d", ret); + + ret = bpf_flow_load(&obj, cfg_path_name, cfg_prog_name, cfg_map_name, NULL, &prog_fd, NULL); if (ret) error(1, 0, "bpf_flow_load %s", cfg_path_name); @@ -75,15 +79,15 @@ static void parse_opts(int argc, char **argv) break; case 'p': if (cfg_path_name) - error(1, 0, "only one prog name can be given"); + error(1, 0, "only one path can be given"); cfg_path_name = optarg; break; case 's': - if (cfg_section_name) - error(1, 0, "only one section can be given"); + if (cfg_prog_name) + error(1, 0, "only one prog can be given"); - cfg_section_name = optarg; + cfg_prog_name = optarg; break; } } @@ -94,7 +98,7 @@ static void parse_opts(int argc, char **argv) if (cfg_attach && !cfg_path_name) error(1, 0, "must provide a path to the BPF program"); - if (cfg_attach && !cfg_section_name) + if (cfg_attach && !cfg_prog_name) error(1, 0, "must provide a section name"); } diff --git a/tools/testing/selftests/bpf/flow_dissector_load.h b/tools/testing/selftests/bpf/flow_dissector_load.h index 7290401ec172..9d0acc2fc6cc 100644 --- a/tools/testing/selftests/bpf/flow_dissector_load.h +++ b/tools/testing/selftests/bpf/flow_dissector_load.h @@ -7,7 +7,7 @@ static inline int bpf_flow_load(struct bpf_object **obj, const char *path, - const char *section_name, + const char *prog_name, const char *map_name, const char *keys_map_name, int *prog_fd, @@ -23,13 +23,7 @@ static inline int bpf_flow_load(struct bpf_object **obj, if (ret) return ret; - main_prog = NULL; - bpf_object__for_each_program(prog, *obj) { - if (strcmp(section_name, bpf_program__section_name(prog)) == 0) { - main_prog = prog; - break; - } - } + main_prog = bpf_object__find_program_by_name(*obj, prog_name); if (!main_prog) return -1; diff --git a/tools/testing/selftests/bpf/test_flow_dissector.sh b/tools/testing/selftests/bpf/test_flow_dissector.sh index 174b72a64a4c..dbd91221727d 100755 --- a/tools/testing/selftests/bpf/test_flow_dissector.sh +++ b/tools/testing/selftests/bpf/test_flow_dissector.sh @@ -26,22 +26,22 @@ if [[ -z $(ip netns identify $$) ]]; then type flow_dissector if ! unshare --net $bpftool prog attach pinned \ - /sys/fs/bpf/flow/flow_dissector flow_dissector; then + /sys/fs/bpf/flow/_dissect flow_dissector; then echo "Unexpected unsuccessful attach in namespace" >&2 err=1 fi - $bpftool prog attach pinned /sys/fs/bpf/flow/flow_dissector \ + $bpftool prog attach pinned /sys/fs/bpf/flow/_dissect \ flow_dissector if unshare --net $bpftool prog attach pinned \ - /sys/fs/bpf/flow/flow_dissector flow_dissector; then + /sys/fs/bpf/flow/_dissect flow_dissector; then echo "Unexpected successful attach in namespace" >&2 err=1 fi if ! $bpftool prog detach pinned \ - /sys/fs/bpf/flow/flow_dissector flow_dissector; then + /sys/fs/bpf/flow/_dissect flow_dissector; then echo "Failed to detach flow dissector" >&2 err=1 fi @@ -95,7 +95,7 @@ else fi # Attach BPF program -./flow_dissector_load -p bpf_flow.o -s flow_dissector +./flow_dissector_load -p bpf_flow.o -s _dissect # Setup tc qdisc add dev lo ingress -- cgit v1.2.3 From 9d19a12b02bf009fefc3620234b4297e4bd7c5d5 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 21 Oct 2021 12:56:33 -0700 Subject: selftests/bpf: Add BTF_KIND_DECL_TAG typedef unit tests Test good and bad variants of typedef BTF_KIND_DECL_TAG encoding. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211021195633.4019472-1-yhs@fb.com --- tools/testing/selftests/bpf/prog_tests/btf.c | 36 ++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index 557f948f9964..8257ee0af14d 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -3903,6 +3903,42 @@ static struct btf_raw_test raw_tests[] = { .btf_load_err = true, .err_str = "Invalid component_idx", }, +{ + .descr = "decl_tag test #13, typedef, well-formed", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [2] */ + BTF_DECL_TAG_ENC(NAME_TBD, 2, -1), + BTF_END_RAW, + }, + BTF_STR_SEC("\0t\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, +}, +{ + .descr = "decl_tag test #14, typedef, invalid component_idx", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [2] */ + BTF_DECL_TAG_ENC(NAME_TBD, 2, 0), + BTF_END_RAW, + }, + BTF_STR_SEC("\0local\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid component_idx", +}, }; /* struct btf_raw_test raw_tests[] */ -- cgit v1.2.3 From 557c8c480401457d885bf7a82221dcc877692aa7 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 21 Oct 2021 12:56:38 -0700 Subject: selftests/bpf: Test deduplication for BTF_KIND_DECL_TAG typedef Add unit tests for deduplication of BTF_KIND_DECL_TAG to typedef types. Also changed a few comments from "tag" to "decl_tag" to match BTF_KIND_DECL_TAG enum value name. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211021195638.4019770-1-yhs@fb.com --- tools/testing/selftests/bpf/prog_tests/btf.c | 47 ++++++++++++++++++++++++---- 1 file changed, 41 insertions(+), 6 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index 8257ee0af14d..ac596cb06e40 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -6877,11 +6877,12 @@ const struct btf_dedup_test dedup_tests[] = { BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8), BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */ BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [14] float */ - BTF_DECL_TAG_ENC(NAME_TBD, 13, -1), /* [15] tag */ - BTF_DECL_TAG_ENC(NAME_TBD, 13, 1), /* [16] tag */ + BTF_DECL_TAG_ENC(NAME_TBD, 13, -1), /* [15] decl_tag */ + BTF_DECL_TAG_ENC(NAME_TBD, 13, 1), /* [16] decl_tag */ + BTF_DECL_TAG_ENC(NAME_TBD, 7, -1), /* [17] decl_tag */ BTF_END_RAW, }, - BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P"), + BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q"), }, .expect = { .raw_types = { @@ -6905,11 +6906,12 @@ const struct btf_dedup_test dedup_tests[] = { BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8), BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */ BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [14] float */ - BTF_DECL_TAG_ENC(NAME_TBD, 13, -1), /* [15] tag */ - BTF_DECL_TAG_ENC(NAME_TBD, 13, 1), /* [16] tag */ + BTF_DECL_TAG_ENC(NAME_TBD, 13, -1), /* [15] decl_tag */ + BTF_DECL_TAG_ENC(NAME_TBD, 13, 1), /* [16] decl_tag */ + BTF_DECL_TAG_ENC(NAME_TBD, 7, -1), /* [17] decl_tag */ BTF_END_RAW, }, - BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P"), + BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q"), }, .opts = { .dont_resolve_fwds = false, @@ -7204,6 +7206,39 @@ const struct btf_dedup_test dedup_tests[] = { .dont_resolve_fwds = false, }, }, +{ + .descr = "dedup: typedef tags", + .input = { + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPEDEF_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_TYPEDEF_ENC(NAME_NTH(1), 1), /* [3] */ + /* tag -> t: tag1, tag2 */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 2, -1), /* [4] */ + BTF_DECL_TAG_ENC(NAME_NTH(3), 2, -1), /* [5] */ + /* tag -> t: tag1, tag3 */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 3, -1), /* [6] */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 3, -1), /* [7] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0t\0tag1\0tag2\0tag3"), + }, + .expect = { + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPEDEF_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 2, -1), /* [3] */ + BTF_DECL_TAG_ENC(NAME_NTH(3), 2, -1), /* [4] */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 2, -1), /* [5] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0t\0tag1\0tag2\0tag3"), + }, + .opts = { + .dont_resolve_fwds = false, + }, +}, }; -- cgit v1.2.3 From 8c18ea2d2c2913c80d8700c8bc8fe8568b8650a1 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 21 Oct 2021 12:56:43 -0700 Subject: selftests/bpf: Add BTF_KIND_DECL_TAG typedef example in tag.c Change value type in progs/tag.c to a typedef with a btf_decl_tag. With `bpftool btf dump file tag.o`, we have ... [14] TYPEDEF 'value_t' type_id=17 [15] DECL_TAG 'tag1' type_id=14 component_idx=-1 [16] DECL_TAG 'tag2' type_id=14 component_idx=-1 [17] STRUCT '(anon)' size=8 vlen=2 'a' type_id=2 bits_offset=0 'b' type_id=2 bits_offset=32 ... The btf_tag selftest also succeeded: $ ./test_progs -t tag #21 btf_tag:OK Summary: 1/0 PASSED, 0 SKIPPED, 0 FAILED Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211021195643.4020315-1-yhs@fb.com --- tools/testing/selftests/bpf/progs/tag.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/tag.c b/tools/testing/selftests/bpf/progs/tag.c index 672d19e7b120..1792f4eda095 100644 --- a/tools/testing/selftests/bpf/progs/tag.c +++ b/tools/testing/selftests/bpf/progs/tag.c @@ -24,18 +24,23 @@ struct key_t { int c; } __tag1 __tag2; +typedef struct { + int a; + int b; +} value_t __tag1 __tag2; + struct { __uint(type, BPF_MAP_TYPE_HASH); __uint(max_entries, 3); __type(key, struct key_t); - __type(value, __u64); + __type(value, value_t); } hashmap1 SEC(".maps"); static __noinline int foo(int x __tag1 __tag2) __tag1 __tag2 { struct key_t key; - __u64 val = 1; + value_t val = {}; key.a = key.b = key.c = x; bpf_map_update_elem(&hashmap1, &key, &val, 0); -- cgit v1.2.3 From 6972dc3b8778ce0d9ce819c6f1e3d32ce2bc3dd9 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 22 Oct 2021 15:32:25 -0700 Subject: selftests/bpf: Normalize selftest entry points Ensure that all test entry points are global void functions with no input arguments. Mark few subtest entry points as static. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211022223228.99920-2-andrii@kernel.org --- tools/testing/selftests/bpf/prog_tests/btf_dump.c | 2 +- tools/testing/selftests/bpf/prog_tests/resolve_btfids.c | 10 ++++------ tools/testing/selftests/bpf/prog_tests/signal_pending.c | 2 +- tools/testing/selftests/bpf/prog_tests/snprintf.c | 4 ++-- tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c | 6 +++--- tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c | 4 ++-- 6 files changed, 13 insertions(+), 15 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c index 3d837a7019fd..aa76360d8f49 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c @@ -133,7 +133,7 @@ static char *dump_buf; static size_t dump_buf_sz; static FILE *dump_buf_file; -void test_btf_dump_incremental(void) +static void test_btf_dump_incremental(void) { struct btf *btf = NULL; struct btf_dump *d = NULL; diff --git a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c index badda6309fd9..f4a13d9dd5c8 100644 --- a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c +++ b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c @@ -117,14 +117,14 @@ static int resolve_symbols(void) return 0; } -int test_resolve_btfids(void) +void test_resolve_btfids(void) { __u32 *test_list, *test_lists[] = { test_list_local, test_list_global }; unsigned int i, j; int ret = 0; if (resolve_symbols()) - return -1; + return; /* Check BTF_ID_LIST(test_list_local) and * BTF_ID_LIST_GLOBAL(test_list_global) IDs @@ -138,7 +138,7 @@ int test_resolve_btfids(void) test_symbols[i].name, test_list[i], test_symbols[i].id); if (ret) - return ret; + return; } } @@ -161,9 +161,7 @@ int test_resolve_btfids(void) if (i > 0) { if (!ASSERT_LE(test_set.ids[i - 1], test_set.ids[i], "sort_check")) - return -1; + return; } } - - return ret; } diff --git a/tools/testing/selftests/bpf/prog_tests/signal_pending.c b/tools/testing/selftests/bpf/prog_tests/signal_pending.c index dfcbddcbe4d3..fdfdcff6cbef 100644 --- a/tools/testing/selftests/bpf/prog_tests/signal_pending.c +++ b/tools/testing/selftests/bpf/prog_tests/signal_pending.c @@ -42,7 +42,7 @@ static void test_signal_pending_by_type(enum bpf_prog_type prog_type) signal(SIGALRM, SIG_DFL); } -void test_signal_pending(enum bpf_prog_type prog_type) +void test_signal_pending(void) { test_signal_pending_by_type(BPF_PROG_TYPE_SOCKET_FILTER); test_signal_pending_by_type(BPF_PROG_TYPE_FLOW_DISSECTOR); diff --git a/tools/testing/selftests/bpf/prog_tests/snprintf.c b/tools/testing/selftests/bpf/prog_tests/snprintf.c index 8fd1b4b29a0e..394ebfc3bbf3 100644 --- a/tools/testing/selftests/bpf/prog_tests/snprintf.c +++ b/tools/testing/selftests/bpf/prog_tests/snprintf.c @@ -33,7 +33,7 @@ #define EXP_NO_BUF_RET 29 -void test_snprintf_positive(void) +static void test_snprintf_positive(void) { char exp_addr_out[] = EXP_ADDR_OUT; char exp_sym_out[] = EXP_SYM_OUT; @@ -103,7 +103,7 @@ static int load_single_snprintf(char *fmt) return ret; } -void test_snprintf_negative(void) +static void test_snprintf_negative(void) { ASSERT_OK(load_single_snprintf("valid %d"), "valid usage"); diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c index d5c98f2cb12f..f529e3c923ae 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c @@ -2,7 +2,7 @@ #include #include -void test_xdp_adjust_tail_shrink(void) +static void test_xdp_adjust_tail_shrink(void) { const char *file = "./test_xdp_adjust_tail_shrink.o"; __u32 duration, retval, size, expect_sz; @@ -30,7 +30,7 @@ void test_xdp_adjust_tail_shrink(void) bpf_object__close(obj); } -void test_xdp_adjust_tail_grow(void) +static void test_xdp_adjust_tail_grow(void) { const char *file = "./test_xdp_adjust_tail_grow.o"; struct bpf_object *obj; @@ -58,7 +58,7 @@ void test_xdp_adjust_tail_grow(void) bpf_object__close(obj); } -void test_xdp_adjust_tail_grow2(void) +static void test_xdp_adjust_tail_grow2(void) { const char *file = "./test_xdp_adjust_tail_grow.o"; char buf[4096]; /* avoid segfault: large buf to hold grow results */ diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c index d4e9a9972a67..3079d5568f8f 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c @@ -8,7 +8,7 @@ #define IFINDEX_LO 1 -void test_xdp_with_devmap_helpers(void) +static void test_xdp_with_devmap_helpers(void) { struct test_xdp_with_devmap_helpers *skel; struct bpf_prog_info info = {}; @@ -60,7 +60,7 @@ out_close: test_xdp_with_devmap_helpers__destroy(skel); } -void test_neg_xdp_devmap_helpers(void) +static void test_neg_xdp_devmap_helpers(void) { struct test_xdp_devmap_helpers *skel; -- cgit v1.2.3 From 8ea688e7f444f1830aa4b283e1364ab9d9bf42b5 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 22 Oct 2021 15:32:26 -0700 Subject: selftests/bpf: Support multiple tests per file Revamp how test discovery works for test_progs and allow multiple test entries per file. Any global void function with no arguments and serial_test_ or test_ prefix is considered a test. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211022223228.99920-3-andrii@kernel.org --- tools/testing/selftests/bpf/Makefile | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 498222543c37..ac47cf9760fc 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -421,10 +421,9 @@ ifeq ($($(TRUNNER_TESTS_DIR)-tests-hdr),) $(TRUNNER_TESTS_DIR)-tests-hdr := y $(TRUNNER_TESTS_HDR): $(TRUNNER_TESTS_DIR)/*.c $$(call msg,TEST-HDR,$(TRUNNER_BINARY),$$@) - $$(shell ( cd $(TRUNNER_TESTS_DIR); \ - echo '/* Generated header, do not edit */'; \ - ls *.c 2> /dev/null | \ - sed -e 's@\([^\.]*\)\.c@DEFINE_TEST(\1)@'; \ + $$(shell (echo '/* Generated header, do not edit */'; \ + sed -n -E 's/^void (serial_)?test_([a-zA-Z0-9_]+)\((void)?\).*/DEFINE_TEST(\2)/p' \ + $(TRUNNER_TESTS_DIR)/*.c | sort ; \ ) > $$@) endif -- cgit v1.2.3 From 2c0f51ac320649402a80a6228744eb54f42a5c21 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 22 Oct 2021 15:32:27 -0700 Subject: selftests/bpf: Mark tc_redirect selftest as serial It seems to cause a lot of harm to kprobe/tracepoint selftests. Yucong mentioned before that it does manipulate sysfs, which might be the reason. So let's mark it as serial, though ideally it would be less intrusive on the system at test. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211022223228.99920-4-andrii@kernel.org --- tools/testing/selftests/bpf/prog_tests/tc_redirect.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c index e87bc4466d9a..53672634bc52 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c @@ -769,7 +769,7 @@ static void *test_tc_redirect_run_tests(void *arg) return NULL; } -void test_tc_redirect(void) +void serial_test_tc_redirect(void) { pthread_t test_thread; int err; -- cgit v1.2.3 From 3762a39ce85feb07996f3f0390963da71874c651 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 22 Oct 2021 15:32:28 -0700 Subject: selftests/bpf: Split out bpf_verif_scale selftests into multiple tests Instead of using subtests in bpf_verif_scale selftest, turn each scale sub-test into its own test. Each subtest is compltely independent and just reuses a bit of common test running logic, so the conversion is trivial. For convenience, keep all of BPF verifier scale tests in one file. This conversion shaves off a significant amount of time when running test_progs in parallel mode. E.g., just running scale tests (-t verif_scale): BEFORE ====== Summary: 24/0 PASSED, 0 SKIPPED, 0 FAILED real 0m22.894s user 0m0.012s sys 0m22.797s AFTER ===== Summary: 24/0 PASSED, 0 SKIPPED, 0 FAILED real 0m12.044s user 0m0.024s sys 0m27.869s Ten second saving right there. test_progs -j is not yet ready to be turned on by default, unfortunately, and some tests fail almost every time, but this is a good improvement nevertheless. Ignoring few failures, here is sequential vs parallel run times when running all tests now: SEQUENTIAL ========== Summary: 206/953 PASSED, 4 SKIPPED, 0 FAILED real 1m5.625s user 0m4.211s sys 0m31.650s PARALLEL ======== Summary: 204/952 PASSED, 4 SKIPPED, 2 FAILED real 0m35.550s user 0m4.998s sys 0m39.890s Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211022223228.99920-5-andrii@kernel.org --- .../selftests/bpf/prog_tests/bpf_verif_scale.c | 220 ++++++++++++++------- 1 file changed, 152 insertions(+), 68 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c index 3d002c245d2b..867349e4ed9e 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c @@ -39,82 +39,166 @@ struct scale_test_def { bool fails; }; -void test_bpf_verif_scale(void) -{ - struct scale_test_def tests[] = { - { "loop3.o", BPF_PROG_TYPE_RAW_TRACEPOINT, true /* fails */ }, - - { "test_verif_scale1.o", BPF_PROG_TYPE_SCHED_CLS }, - { "test_verif_scale2.o", BPF_PROG_TYPE_SCHED_CLS }, - { "test_verif_scale3.o", BPF_PROG_TYPE_SCHED_CLS }, - - { "pyperf_global.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - { "pyperf_subprogs.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - - /* full unroll by llvm */ - { "pyperf50.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - { "pyperf100.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - { "pyperf180.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - - /* partial unroll. llvm will unroll loop ~150 times. - * C loop count -> 600. - * Asm loop count -> 4. - * 16k insns in loop body. - * Total of 5 such loops. Total program size ~82k insns. - */ - { "pyperf600.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - - /* no unroll at all. - * C loop count -> 600. - * ASM loop count -> 600. - * ~110 insns in loop body. - * Total of 5 such loops. Total program size ~1500 insns. - */ - { "pyperf600_nounroll.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - - { "loop1.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - { "loop2.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - { "loop4.o", BPF_PROG_TYPE_SCHED_CLS }, - { "loop5.o", BPF_PROG_TYPE_SCHED_CLS }, - { "loop6.o", BPF_PROG_TYPE_KPROBE }, - - /* partial unroll. 19k insn in a loop. - * Total program size 20.8k insn. - * ~350k processed_insns - */ - { "strobemeta.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - - /* no unroll, tiny loops */ - { "strobemeta_nounroll1.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - { "strobemeta_nounroll2.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - - /* non-inlined subprogs */ - { "strobemeta_subprogs.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - - { "test_sysctl_loop1.o", BPF_PROG_TYPE_CGROUP_SYSCTL }, - { "test_sysctl_loop2.o", BPF_PROG_TYPE_CGROUP_SYSCTL }, - - { "test_xdp_loop.o", BPF_PROG_TYPE_XDP }, - { "test_seg6_loop.o", BPF_PROG_TYPE_LWT_SEG6LOCAL }, - }; +static void scale_test(const char *file, + enum bpf_prog_type attach_type, + bool should_fail) +{ libbpf_print_fn_t old_print_fn = NULL; - int err, i; + int err; if (env.verifier_stats) { test__force_log(); old_print_fn = libbpf_set_print(libbpf_debug_print); } - for (i = 0; i < ARRAY_SIZE(tests); i++) { - const struct scale_test_def *test = &tests[i]; - - if (!test__start_subtest(test->file)) - continue; - - err = check_load(test->file, test->attach_type); - CHECK_FAIL(err && !test->fails); - } + err = check_load(file, attach_type); + if (should_fail) + ASSERT_ERR(err, "expect_error"); + else + ASSERT_OK(err, "expect_success"); if (env.verifier_stats) libbpf_set_print(old_print_fn); } + +void test_verif_scale1() +{ + scale_test("test_verif_scale1.o", BPF_PROG_TYPE_SCHED_CLS, false); +} + +void test_verif_scale2() +{ + scale_test("test_verif_scale2.o", BPF_PROG_TYPE_SCHED_CLS, false); +} + +void test_verif_scale3() +{ + scale_test("test_verif_scale3.o", BPF_PROG_TYPE_SCHED_CLS, false); +} + +void test_verif_scale_pyperf_global() +{ + scale_test("pyperf_global.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_pyperf_subprogs() +{ + scale_test("pyperf_subprogs.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_pyperf50() +{ + /* full unroll by llvm */ + scale_test("pyperf50.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_pyperf100() +{ + /* full unroll by llvm */ + scale_test("pyperf100.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_pyperf180() +{ + /* full unroll by llvm */ + scale_test("pyperf180.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_pyperf600() +{ + /* partial unroll. llvm will unroll loop ~150 times. + * C loop count -> 600. + * Asm loop count -> 4. + * 16k insns in loop body. + * Total of 5 such loops. Total program size ~82k insns. + */ + scale_test("pyperf600.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_pyperf600_nounroll() +{ + /* no unroll at all. + * C loop count -> 600. + * ASM loop count -> 600. + * ~110 insns in loop body. + * Total of 5 such loops. Total program size ~1500 insns. + */ + scale_test("pyperf600_nounroll.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_loop1() +{ + scale_test("loop1.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_loop2() +{ + scale_test("loop2.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_loop3_fail() +{ + scale_test("loop3.o", BPF_PROG_TYPE_RAW_TRACEPOINT, true /* fails */); +} + +void test_verif_scale_loop4() +{ + scale_test("loop4.o", BPF_PROG_TYPE_SCHED_CLS, false); +} + +void test_verif_scale_loop5() +{ + scale_test("loop5.o", BPF_PROG_TYPE_SCHED_CLS, false); +} + +void test_verif_scale_loop6() +{ + scale_test("loop6.o", BPF_PROG_TYPE_KPROBE, false); +} + +void test_verif_scale_strobemeta() +{ + /* partial unroll. 19k insn in a loop. + * Total program size 20.8k insn. + * ~350k processed_insns + */ + scale_test("strobemeta.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_strobemeta_nounroll1() +{ + /* no unroll, tiny loops */ + scale_test("strobemeta_nounroll1.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_strobemeta_nounroll2() +{ + /* no unroll, tiny loops */ + scale_test("strobemeta_nounroll2.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_strobemeta_subprogs() +{ + /* non-inlined subprogs */ + scale_test("strobemeta_subprogs.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_sysctl_loop1() +{ + scale_test("test_sysctl_loop1.o", BPF_PROG_TYPE_CGROUP_SYSCTL, false); +} + +void test_verif_scale_sysctl_loop2() +{ + scale_test("test_sysctl_loop2.o", BPF_PROG_TYPE_CGROUP_SYSCTL, false); +} + +void test_verif_scale_xdp_loop() +{ + scale_test("test_xdp_loop.o", BPF_PROG_TYPE_XDP, false); +} + +void test_verif_scale_seg6_loop() +{ + scale_test("test_seg6_loop.o", BPF_PROG_TYPE_LWT_SEG6LOCAL, false); +} -- cgit v1.2.3 From 06fca841fb64c9ed499a3575a530014268d0251a Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Tue, 26 Oct 2021 03:08:28 +0200 Subject: selftests/bpf: Use __BYTE_ORDER__ Use the compiler-defined __BYTE_ORDER__ instead of the libc-defined __BYTE_ORDER for consistency. Signed-off-by: Ilya Leoshkevich Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211026010831.748682-4-iii@linux.ibm.com --- tools/testing/selftests/bpf/prog_tests/btf_endian.c | 6 +++--- tools/testing/selftests/bpf/test_sysctl.c | 4 ++-- tools/testing/selftests/bpf/verifier/ctx_skb.c | 14 +++++++------- tools/testing/selftests/bpf/verifier/lwt.c | 2 +- .../selftests/bpf/verifier/perf_event_sample_period.c | 6 +++--- 5 files changed, 16 insertions(+), 16 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/btf_endian.c b/tools/testing/selftests/bpf/prog_tests/btf_endian.c index 2653cc482df4..8afbf3d0b89a 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_endian.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_endian.c @@ -7,12 +7,12 @@ #include void test_btf_endian() { -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ enum btf_endianness endian = BTF_LITTLE_ENDIAN; -#elif __BYTE_ORDER == __BIG_ENDIAN +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ enum btf_endianness endian = BTF_BIG_ENDIAN; #else -#error "Unrecognized __BYTE_ORDER" +#error "Unrecognized __BYTE_ORDER__" #endif enum btf_endianness swap_endian = 1 - endian; struct btf *btf = NULL, *swap_btf = NULL; diff --git a/tools/testing/selftests/bpf/test_sysctl.c b/tools/testing/selftests/bpf/test_sysctl.c index a20a919244c0..a3bb6d399daa 100644 --- a/tools/testing/selftests/bpf/test_sysctl.c +++ b/tools/testing/selftests/bpf/test_sysctl.c @@ -124,7 +124,7 @@ static struct sysctl_test tests[] = { .descr = "ctx:write sysctl:write read ok narrow", .insns = { /* u64 w = (u16)write & 1; */ -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_1, offsetof(struct bpf_sysctl, write)), #else @@ -184,7 +184,7 @@ static struct sysctl_test tests[] = { .descr = "ctx:file_pos sysctl:read read ok narrow", .insns = { /* If (file_pos == X) */ -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1, offsetof(struct bpf_sysctl, file_pos)), #else diff --git a/tools/testing/selftests/bpf/verifier/ctx_skb.c b/tools/testing/selftests/bpf/verifier/ctx_skb.c index 9e1a30b94197..83cecfbd6739 100644 --- a/tools/testing/selftests/bpf/verifier/ctx_skb.c +++ b/tools/testing/selftests/bpf/verifier/ctx_skb.c @@ -502,7 +502,7 @@ "check skb->hash byte load permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, hash)), #else @@ -537,7 +537,7 @@ "check skb->hash byte load permitted 3", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, hash) + 3), #else @@ -646,7 +646,7 @@ "check skb->hash half load permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, hash)), #else @@ -661,7 +661,7 @@ "check skb->hash half load permitted 2", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, hash) + 2), #else @@ -676,7 +676,7 @@ "check skb->hash half load not permitted, unaligned 1", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, hash) + 1), #else @@ -693,7 +693,7 @@ "check skb->hash half load not permitted, unaligned 3", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, hash) + 3), #else @@ -951,7 +951,7 @@ "check skb->data half load not permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, data)), #else diff --git a/tools/testing/selftests/bpf/verifier/lwt.c b/tools/testing/selftests/bpf/verifier/lwt.c index 2cab6a3966bb..5c8944d0b091 100644 --- a/tools/testing/selftests/bpf/verifier/lwt.c +++ b/tools/testing/selftests/bpf/verifier/lwt.c @@ -174,7 +174,7 @@ "check skb->tc_classid half load not permitted for lwt prog", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, tc_classid)), #else diff --git a/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c b/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c index 471c1a5950d8..d8a9b1a1f9a2 100644 --- a/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c +++ b/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c @@ -2,7 +2,7 @@ "check bpf_perf_event_data->sample_period byte load permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, offsetof(struct bpf_perf_event_data, sample_period)), #else @@ -18,7 +18,7 @@ "check bpf_perf_event_data->sample_period half load permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, offsetof(struct bpf_perf_event_data, sample_period)), #else @@ -34,7 +34,7 @@ "check bpf_perf_event_data->sample_period word load permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, offsetof(struct bpf_perf_event_data, sample_period)), #else -- cgit v1.2.3 From 3e7ed9cebb551ce9bfbf2985da9cdadd8186e1eb Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Tue, 26 Oct 2021 03:08:30 +0200 Subject: selftests/seccomp: Use __BYTE_ORDER__ Use the compiler-defined __BYTE_ORDER__ instead of the libc-defined __BYTE_ORDER for consistency. Signed-off-by: Ilya Leoshkevich Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211026010831.748682-6-iii@linux.ibm.com --- tools/testing/selftests/seccomp/seccomp_bpf.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index 1d64891e6492..d425688cf59c 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -276,12 +276,12 @@ int seccomp(unsigned int op, unsigned int flags, void *args) } #endif -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n])) -#elif __BYTE_ORDER == __BIG_ENDIAN +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]) + sizeof(__u32)) #else -#error "wut? Unknown __BYTE_ORDER?!" +#error "wut? Unknown __BYTE_ORDER__?!" #endif #define SIBLING_EXIT_UNKILLED 0xbadbeef -- cgit v1.2.3 From 2e2c6d3fb38344df92b63162a28ec09491cc0700 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Tue, 26 Oct 2021 03:08:31 +0200 Subject: selftests/bpf: Fix test_core_reloc_mods on big-endian machines This is the same as commit d164dd9a5c08 ("selftests/bpf: Fix test_core_autosize on big-endian machines"), but for test_core_reloc_mods. Signed-off-by: Ilya Leoshkevich Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211026010831.748682-7-iii@linux.ibm.com --- tools/testing/selftests/bpf/progs/test_core_reloc_mods.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c b/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c index 8b533db4a7a5..b2ded497572a 100644 --- a/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c +++ b/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c @@ -42,7 +42,16 @@ struct core_reloc_mods { core_reloc_mods_substruct_t h; }; +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src) +#else +#define CORE_READ(dst, src) ({ \ + int __sz = sizeof(*(dst)) < sizeof(*(src)) ? sizeof(*(dst)) : \ + sizeof(*(src)); \ + bpf_core_read((char *)(dst) + sizeof(*(dst)) - __sz, __sz, \ + (const char *)(src) + sizeof(*(src)) - __sz); \ +}) +#endif SEC("raw_tracepoint/sys_enter") int test_core_mods(void *ctx) -- cgit v1.2.3 From b4e87072762d063f9c20ba0ab2120e1910fa379f Mon Sep 17 00:00:00 2001 From: Song Liu Date: Mon, 25 Oct 2021 17:07:33 -0700 Subject: selftests/bpf: Skip all serial_test_get_branch_snapshot in vm Skipping the second half of the test is not enough to silent the warning in dmesg. Skip the whole test before we can either properly silent the warning in kernel, or fix LBR snapshot for VM. Fixes: 025bd7c753aa ("selftests/bpf: Add test for bpf_get_branch_snapshot") Fixes: aa67fdb46436 ("selftests/bpf: Skip the second half of get_branch_snapshot in vm") Signed-off-by: Song Liu Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211026000733.477714-1-songliubraving@fb.com --- .../selftests/bpf/prog_tests/get_branch_snapshot.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c b/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c index d6d70a359aeb..81402e443984 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c +++ b/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c @@ -78,6 +78,12 @@ void serial_test_get_branch_snapshot(void) struct get_branch_snapshot *skel = NULL; int err; + /* Skip the test before we fix LBR snapshot for hypervisor. */ + if (is_hypervisor()) { + test__skip(); + return; + } + if (create_perf_events()) { test__skip(); /* system doesn't support LBR */ goto cleanup; @@ -107,16 +113,6 @@ void serial_test_get_branch_snapshot(void) goto cleanup; } - if (is_hypervisor()) { - /* As of today, LBR in hypervisor cannot be stopped before - * too many entries are flushed. Skip the hit/waste test - * for now in hypervisor until we optimize the LBR in - * hypervisor. - */ - test__skip(); - goto cleanup; - } - ASSERT_GT(skel->bss->test1_hits, 6, "find_looptest_in_lbr"); /* Given we stop LBR in software, we will waste a few entries. -- cgit v1.2.3 From 20d1b54a52bd9a1d5d23a45dab75993ce2f1d617 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Fri, 22 Oct 2021 16:48:14 -0700 Subject: selftests/bpf: Guess function end for test_get_branch_snapshot Function in modules could appear in /proc/kallsyms in random order. ffffffffa02608a0 t bpf_testmod_loop_test ffffffffa02600c0 t __traceiter_bpf_testmod_test_writable_bare ffffffffa0263b60 d __tracepoint_bpf_testmod_test_write_bare ffffffffa02608c0 T bpf_testmod_test_read ffffffffa0260d08 t __SCT__tp_func_bpf_testmod_test_writable_bare ffffffffa0263300 d __SCK__tp_func_bpf_testmod_test_read ffffffffa0260680 T bpf_testmod_test_write ffffffffa0260860 t bpf_testmod_test_mod_kfunc Therefore, we cannot reliably use kallsyms_find_next() to find the end of a function. Replace it with a simple guess (start + 128). This is good enough for this test. Signed-off-by: Song Liu Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211022234814.318457-1-songliubraving@fb.com --- .../selftests/bpf/prog_tests/get_branch_snapshot.c | 7 +++-- tools/testing/selftests/bpf/trace_helpers.c | 36 ---------------------- tools/testing/selftests/bpf/trace_helpers.h | 5 --- 3 files changed, 4 insertions(+), 44 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c b/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c index 81402e443984..3948da12a528 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c +++ b/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c @@ -97,9 +97,10 @@ void serial_test_get_branch_snapshot(void) if (!ASSERT_OK(err, "kallsyms_find")) goto cleanup; - err = kallsyms_find_next("bpf_testmod_loop_test", &skel->bss->address_high); - if (!ASSERT_OK(err, "kallsyms_find_next")) - goto cleanup; + /* Just a guess for the end of this function, as module functions + * in /proc/kallsyms could come in any order. + */ + skel->bss->address_high = skel->bss->address_low + 128; err = get_branch_snapshot__attach(skel); if (!ASSERT_OK(err, "get_branch_snapshot__attach")) diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c index 5100a169b72b..7b7f918eda77 100644 --- a/tools/testing/selftests/bpf/trace_helpers.c +++ b/tools/testing/selftests/bpf/trace_helpers.c @@ -118,42 +118,6 @@ out: return err; } -/* find the address of the next symbol of the same type, this can be used - * to determine the end of a function. - */ -int kallsyms_find_next(const char *sym, unsigned long long *addr) -{ - char type, found_type, name[500]; - unsigned long long value; - bool found = false; - int err = 0; - FILE *f; - - f = fopen("/proc/kallsyms", "r"); - if (!f) - return -EINVAL; - - while (fscanf(f, "%llx %c %499s%*[^\n]\n", &value, &type, name) > 0) { - /* Different types of symbols in kernel modules are mixed - * in /proc/kallsyms. Only return the next matching type. - * Use tolower() for type so that 'T' matches 't'. - */ - if (found && found_type == tolower(type)) { - *addr = value; - goto out; - } - if (strcmp(name, sym) == 0) { - found = true; - found_type = tolower(type); - } - } - err = -ENOENT; - -out: - fclose(f); - return err; -} - void read_trace_pipe(void) { int trace_fd; diff --git a/tools/testing/selftests/bpf/trace_helpers.h b/tools/testing/selftests/bpf/trace_helpers.h index bc8ed86105d9..d907b445524d 100644 --- a/tools/testing/selftests/bpf/trace_helpers.h +++ b/tools/testing/selftests/bpf/trace_helpers.h @@ -16,11 +16,6 @@ long ksym_get_addr(const char *name); /* open kallsyms and find addresses on the fly, faster than load + search. */ int kallsyms_find(const char *sym, unsigned long long *addr); -/* find the address of the next symbol, this can be used to determine the - * end of a function - */ -int kallsyms_find_next(const char *sym, unsigned long long *addr); - void read_trace_pipe(void); ssize_t get_uprobe_offset(const void *addr, ssize_t base); -- cgit v1.2.3 From 547208a386fa2066fa2d6d48bda145f78c38604f Mon Sep 17 00:00:00 2001 From: Yucong Sun Date: Mon, 25 Oct 2021 15:33:42 -0700 Subject: selfetests/bpf: Update vmtest.sh defaults Increase memory to 4G, 8 SMP core with host cpu passthrough. This make it run faster in parallel mode and more likely to succeed. Signed-off-by: Yucong Sun Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211025223345.2136168-2-fallentree@fb.com --- tools/testing/selftests/bpf/vmtest.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/vmtest.sh b/tools/testing/selftests/bpf/vmtest.sh index 8889b3f55236..027198768fad 100755 --- a/tools/testing/selftests/bpf/vmtest.sh +++ b/tools/testing/selftests/bpf/vmtest.sh @@ -224,10 +224,10 @@ EOF -nodefaults \ -display none \ -serial mon:stdio \ - -cpu kvm64 \ + -cpu host \ -enable-kvm \ - -smp 4 \ - -m 2G \ + -smp 8 \ + -m 4G \ -drive file="${rootfs_img}",format=raw,index=1,media=disk,if=virtio,cache=none \ -kernel "${kernel_bzimage}" \ -append "root=/dev/vda rw console=ttyS0,115200" -- cgit v1.2.3 From 9e7240fb2d6e0ed3944724ae1d224fdf22b3dea3 Mon Sep 17 00:00:00 2001 From: Yucong Sun Date: Mon, 25 Oct 2021 15:33:44 -0700 Subject: selftests/bpf: Fix attach_probe in parallel mode This patch makes attach_probe uses its own method as attach point, avoiding conflict with other tests like bpf_cookie. Signed-off-by: Yucong Sun Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211025223345.2136168-4-fallentree@fb.com --- tools/testing/selftests/bpf/prog_tests/attach_probe.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c index 6c511dcd1465..d0bd51eb23c8 100644 --- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c +++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c @@ -5,6 +5,11 @@ /* this is how USDT semaphore is actually defined, except volatile modifier */ volatile unsigned short uprobe_ref_ctr __attribute__((unused)) __attribute((section(".probes"))); +/* attach point */ +static void method(void) { + return ; +} + void test_attach_probe(void) { DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts); @@ -33,7 +38,7 @@ void test_attach_probe(void) if (CHECK(base_addr < 0, "get_base_addr", "failed to find base addr: %zd", base_addr)) return; - uprobe_offset = get_uprobe_offset(&get_base_addr, base_addr); + uprobe_offset = get_uprobe_offset(&method, base_addr); ref_ctr_offset = get_rel_offset((uintptr_t)&uprobe_ref_ctr); if (!ASSERT_GE(ref_ctr_offset, 0, "ref_ctr_offset")) @@ -98,7 +103,7 @@ void test_attach_probe(void) goto cleanup; /* trigger & validate uprobe & uretprobe */ - get_base_addr(); + method(); if (CHECK(skel->bss->uprobe_res != 3, "check_uprobe_res", "wrong uprobe res: %d\n", skel->bss->uprobe_res)) -- cgit v1.2.3 From e1ef62a4dd0e66ede4e73791ec1bcec947d4d0b3 Mon Sep 17 00:00:00 2001 From: Yucong Sun Date: Mon, 25 Oct 2021 15:33:45 -0700 Subject: selftests/bpf: Adding a namespace reset for tc_redirect This patch delete ns_src/ns_dst/ns_redir namespaces before recreating them, making the test more robust. Signed-off-by: Yucong Sun Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211025223345.2136168-5-fallentree@fb.com --- tools/testing/selftests/bpf/prog_tests/tc_redirect.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c index 53672634bc52..4b18b73df10b 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c @@ -176,6 +176,18 @@ static int netns_setup_namespaces(const char *verb) return 0; } +static void netns_setup_namespaces_nofail(const char *verb) +{ + const char * const *ns = namespaces; + char cmd[128]; + + while (*ns) { + snprintf(cmd, sizeof(cmd), "ip netns %s %s > /dev/null 2>&1", verb, *ns); + system(cmd); + ns++; + } +} + struct netns_setup_result { int ifindex_veth_src_fwd; int ifindex_veth_dst_fwd; @@ -762,6 +774,8 @@ fail: static void *test_tc_redirect_run_tests(void *arg) { + netns_setup_namespaces_nofail("delete"); + RUN_TEST(tc_redirect_peer); RUN_TEST(tc_redirect_peer_l3); RUN_TEST(tc_redirect_neigh); -- cgit v1.2.3 From ed9109ad643cfbe69670a37cdbaf2da9f409fed0 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Wed, 27 Oct 2021 16:45:02 -0700 Subject: selftests/bpf: Add bloom filter map test cases This patch adds test cases for bpf bloom filter maps. They include tests checking against invalid operations by userspace, tests for using the bloom filter map as an inner map, and a bpf program that queries the bloom filter map for values added by a userspace program. Signed-off-by: Joanne Koong Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211027234504.30744-4-joannekoong@fb.com --- .../selftests/bpf/prog_tests/bloom_filter_map.c | 204 +++++++++++++++++++++ .../testing/selftests/bpf/progs/bloom_filter_map.c | 82 +++++++++ 2 files changed, 286 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c create mode 100644 tools/testing/selftests/bpf/progs/bloom_filter_map.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c new file mode 100644 index 000000000000..9aa3fbed918b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c @@ -0,0 +1,204 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include +#include +#include "bloom_filter_map.skel.h" + +static void test_fail_cases(void) +{ + struct bpf_create_map_attr xattr = { + .name = "bloom_filter_map", + .map_type = BPF_MAP_TYPE_BLOOM_FILTER, + .max_entries = 100, + .value_size = 11, + }; + __u32 value; + int fd, err; + + /* Invalid key size */ + xattr.key_size = 4; + fd = bpf_create_map_xattr(&xattr); + if (!ASSERT_LT(fd, 0, "bpf_create_map bloom filter invalid key size")) + close(fd); + xattr.key_size = 0; + + /* Invalid value size */ + xattr.value_size = 0; + fd = bpf_create_map_xattr(&xattr); + if (!ASSERT_LT(fd, 0, "bpf_create_map bloom filter invalid value size 0")) + close(fd); + xattr.value_size = 11; + + /* Invalid max entries size */ + xattr.max_entries = 0; + fd = bpf_create_map_xattr(&xattr); + if (!ASSERT_LT(fd, 0, "bpf_create_map bloom filter invalid max entries size")) + close(fd); + xattr.max_entries = 100; + + /* Bloom filter maps do not support BPF_F_NO_PREALLOC */ + xattr.map_flags = BPF_F_NO_PREALLOC; + fd = bpf_create_map_xattr(&xattr); + if (!ASSERT_LT(fd, 0, "bpf_create_map bloom filter invalid flags")) + close(fd); + xattr.map_flags = 0; + + fd = bpf_create_map_xattr(&xattr); + if (!ASSERT_GE(fd, 0, "bpf_create_map bloom filter")) + return; + + /* Test invalid flags */ + err = bpf_map_update_elem(fd, NULL, &value, -1); + ASSERT_EQ(err, -EINVAL, "bpf_map_update_elem bloom filter invalid flags"); + + err = bpf_map_update_elem(fd, NULL, &value, BPF_EXIST); + ASSERT_EQ(err, -EINVAL, "bpf_map_update_elem bloom filter invalid flags"); + + err = bpf_map_update_elem(fd, NULL, &value, BPF_F_LOCK); + ASSERT_EQ(err, -EINVAL, "bpf_map_update_elem bloom filter invalid flags"); + + err = bpf_map_update_elem(fd, NULL, &value, BPF_NOEXIST); + ASSERT_EQ(err, -EINVAL, "bpf_map_update_elem bloom filter invalid flags"); + + err = bpf_map_update_elem(fd, NULL, &value, 10000); + ASSERT_EQ(err, -EINVAL, "bpf_map_update_elem bloom filter invalid flags"); + + close(fd); +} + +static void check_bloom(struct bloom_filter_map *skel) +{ + struct bpf_link *link; + + link = bpf_program__attach(skel->progs.check_bloom); + if (!ASSERT_OK_PTR(link, "link")) + return; + + syscall(SYS_getpgid); + + ASSERT_EQ(skel->bss->error, 0, "error"); + + bpf_link__destroy(link); +} + +static void test_inner_map(struct bloom_filter_map *skel, const __u32 *rand_vals, + __u32 nr_rand_vals) +{ + int outer_map_fd, inner_map_fd, err, i, key = 0; + struct bpf_create_map_attr xattr = { + .name = "bloom_filter_inner_map", + .map_type = BPF_MAP_TYPE_BLOOM_FILTER, + .value_size = sizeof(__u32), + .max_entries = nr_rand_vals, + }; + struct bpf_link *link; + + /* Create a bloom filter map that will be used as the inner map */ + inner_map_fd = bpf_create_map_xattr(&xattr); + if (!ASSERT_GE(inner_map_fd, 0, "bpf_create_map bloom filter inner map")) + return; + + for (i = 0; i < nr_rand_vals; i++) { + err = bpf_map_update_elem(inner_map_fd, NULL, rand_vals + i, BPF_ANY); + if (!ASSERT_OK(err, "Add random value to inner_map_fd")) + goto done; + } + + /* Add the bloom filter map to the outer map */ + outer_map_fd = bpf_map__fd(skel->maps.outer_map); + err = bpf_map_update_elem(outer_map_fd, &key, &inner_map_fd, BPF_ANY); + if (!ASSERT_OK(err, "Add bloom filter map to outer map")) + goto done; + + /* Attach the bloom_filter_inner_map prog */ + link = bpf_program__attach(skel->progs.inner_map); + if (!ASSERT_OK_PTR(link, "link")) + goto delete_inner_map; + + syscall(SYS_getpgid); + + ASSERT_EQ(skel->bss->error, 0, "error"); + + bpf_link__destroy(link); + +delete_inner_map: + /* Ensure the inner bloom filter map can be deleted */ + err = bpf_map_delete_elem(outer_map_fd, &key); + ASSERT_OK(err, "Delete inner bloom filter map"); + +done: + close(inner_map_fd); +} + +static int setup_progs(struct bloom_filter_map **out_skel, __u32 **out_rand_vals, + __u32 *out_nr_rand_vals) +{ + struct bloom_filter_map *skel; + int random_data_fd, bloom_fd; + __u32 *rand_vals = NULL; + __u32 map_size, val; + int err, i; + + /* Set up a bloom filter map skeleton */ + skel = bloom_filter_map__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bloom_filter_map__open_and_load")) + return -EINVAL; + + /* Set up rand_vals */ + map_size = bpf_map__max_entries(skel->maps.map_random_data); + rand_vals = malloc(sizeof(*rand_vals) * map_size); + if (!rand_vals) { + err = -ENOMEM; + goto error; + } + + /* Generate random values and populate both skeletons */ + random_data_fd = bpf_map__fd(skel->maps.map_random_data); + bloom_fd = bpf_map__fd(skel->maps.map_bloom); + for (i = 0; i < map_size; i++) { + val = rand(); + + err = bpf_map_update_elem(random_data_fd, &i, &val, BPF_ANY); + if (!ASSERT_OK(err, "Add random value to map_random_data")) + goto error; + + err = bpf_map_update_elem(bloom_fd, NULL, &val, BPF_ANY); + if (!ASSERT_OK(err, "Add random value to map_bloom")) + goto error; + + rand_vals[i] = val; + } + + *out_skel = skel; + *out_rand_vals = rand_vals; + *out_nr_rand_vals = map_size; + + return 0; + +error: + bloom_filter_map__destroy(skel); + if (rand_vals) + free(rand_vals); + return err; +} + +void test_bloom_filter_map(void) +{ + __u32 *rand_vals, nr_rand_vals; + struct bloom_filter_map *skel; + int err; + + test_fail_cases(); + + err = setup_progs(&skel, &rand_vals, &nr_rand_vals); + if (err) + return; + + test_inner_map(skel, rand_vals, nr_rand_vals); + free(rand_vals); + + check_bloom(skel); + + bloom_filter_map__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/bloom_filter_map.c b/tools/testing/selftests/bpf/progs/bloom_filter_map.c new file mode 100644 index 000000000000..1316f3db79d9 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bloom_filter_map.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include +#include + +char _license[] SEC("license") = "GPL"; + +struct bpf_map; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, __u32); + __type(value, __u32); + __uint(max_entries, 1000); +} map_random_data SEC(".maps"); + +struct map_bloom_type { + __uint(type, BPF_MAP_TYPE_BLOOM_FILTER); + __type(value, __u32); + __uint(max_entries, 10000); + __uint(map_extra, 5); +} map_bloom SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __type(key, int); + __type(value, int); + __uint(max_entries, 1); + __array(values, struct map_bloom_type); +} outer_map SEC(".maps"); + +struct callback_ctx { + struct bpf_map *map; +}; + +int error = 0; + +static __u64 +check_elem(struct bpf_map *map, __u32 *key, __u32 *val, + struct callback_ctx *data) +{ + int err; + + err = bpf_map_peek_elem(data->map, val); + if (err) { + error |= 1; + return 1; /* stop the iteration */ + } + + return 0; +} + +SEC("fentry/__x64_sys_getpgid") +int inner_map(void *ctx) +{ + struct bpf_map *inner_map; + struct callback_ctx data; + int key = 0; + + inner_map = bpf_map_lookup_elem(&outer_map, &key); + if (!inner_map) { + error |= 2; + return 0; + } + + data.map = inner_map; + bpf_for_each_map_elem(&map_random_data, check_elem, &data, 0); + + return 0; +} + +SEC("fentry/__x64_sys_getpgid") +int check_bloom(void *ctx) +{ + struct callback_ctx data; + + data.map = (struct bpf_map *)&map_bloom; + bpf_for_each_map_elem(&map_random_data, check_elem, &data, 0); + + return 0; +} -- cgit v1.2.3 From 57fd1c63c9a687c5fdc86fa628c490d6733e8d0b Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Wed, 27 Oct 2021 16:45:03 -0700 Subject: bpf/benchs: Add benchmark tests for bloom filter throughput + false positive This patch adds benchmark tests for the throughput (for lookups + updates) and the false positive rate of bloom filter lookups, as well as some minor refactoring of the bash script for running the benchmarks. These benchmarks show that as the number of hash functions increases, the throughput and the false positive rate of the bloom filter decreases. >From the benchmark data, the approximate average false-positive rates are roughly as follows: 1 hash function = ~30% 2 hash functions = ~15% 3 hash functions = ~5% 4 hash functions = ~2.5% 5 hash functions = ~1% 6 hash functions = ~0.5% 7 hash functions = ~0.35% 8 hash functions = ~0.15% 9 hash functions = ~0.1% 10 hash functions = ~0% For reference data, the benchmarks run on one thread on a machine with one numa node for 1 to 5 hash functions for 8-byte and 64-byte values are as follows: 1 hash function: 50k entries 8-byte value Lookups - 51.1 M/s operations Updates - 33.6 M/s operations False positive rate: 24.15% 64-byte value Lookups - 15.7 M/s operations Updates - 15.1 M/s operations False positive rate: 24.2% 100k entries 8-byte value Lookups - 51.0 M/s operations Updates - 33.4 M/s operations False positive rate: 24.04% 64-byte value Lookups - 15.6 M/s operations Updates - 14.6 M/s operations False positive rate: 24.06% 500k entries 8-byte value Lookups - 50.5 M/s operations Updates - 33.1 M/s operations False positive rate: 27.45% 64-byte value Lookups - 15.6 M/s operations Updates - 14.2 M/s operations False positive rate: 27.42% 1 mil entries 8-byte value Lookups - 49.7 M/s operations Updates - 32.9 M/s operations False positive rate: 27.45% 64-byte value Lookups - 15.4 M/s operations Updates - 13.7 M/s operations False positive rate: 27.58% 2.5 mil entries 8-byte value Lookups - 47.2 M/s operations Updates - 31.8 M/s operations False positive rate: 30.94% 64-byte value Lookups - 15.3 M/s operations Updates - 13.2 M/s operations False positive rate: 30.95% 5 mil entries 8-byte value Lookups - 41.1 M/s operations Updates - 28.1 M/s operations False positive rate: 31.01% 64-byte value Lookups - 13.3 M/s operations Updates - 11.4 M/s operations False positive rate: 30.98% 2 hash functions: 50k entries 8-byte value Lookups - 34.1 M/s operations Updates - 20.1 M/s operations False positive rate: 9.13% 64-byte value Lookups - 8.4 M/s operations Updates - 7.9 M/s operations False positive rate: 9.21% 100k entries 8-byte value Lookups - 33.7 M/s operations Updates - 18.9 M/s operations False positive rate: 9.13% 64-byte value Lookups - 8.4 M/s operations Updates - 7.7 M/s operations False positive rate: 9.19% 500k entries 8-byte value Lookups - 32.7 M/s operations Updates - 18.1 M/s operations False positive rate: 12.61% 64-byte value Lookups - 8.4 M/s operations Updates - 7.5 M/s operations False positive rate: 12.61% 1 mil entries 8-byte value Lookups - 30.6 M/s operations Updates - 18.9 M/s operations False positive rate: 12.54% 64-byte value Lookups - 8.0 M/s operations Updates - 7.0 M/s operations False positive rate: 12.52% 2.5 mil entries 8-byte value Lookups - 25.3 M/s operations Updates - 16.7 M/s operations False positive rate: 16.77% 64-byte value Lookups - 7.9 M/s operations Updates - 6.5 M/s operations False positive rate: 16.88% 5 mil entries 8-byte value Lookups - 20.8 M/s operations Updates - 14.7 M/s operations False positive rate: 16.78% 64-byte value Lookups - 7.0 M/s operations Updates - 6.0 M/s operations False positive rate: 16.78% 3 hash functions: 50k entries 8-byte value Lookups - 25.1 M/s operations Updates - 14.6 M/s operations False positive rate: 7.65% 64-byte value Lookups - 5.8 M/s operations Updates - 5.5 M/s operations False positive rate: 7.58% 100k entries 8-byte value Lookups - 24.7 M/s operations Updates - 14.1 M/s operations False positive rate: 7.71% 64-byte value Lookups - 5.8 M/s operations Updates - 5.3 M/s operations False positive rate: 7.62% 500k entries 8-byte value Lookups - 22.9 M/s operations Updates - 13.9 M/s operations False positive rate: 2.62% 64-byte value Lookups - 5.6 M/s operations Updates - 4.8 M/s operations False positive rate: 2.7% 1 mil entries 8-byte value Lookups - 19.8 M/s operations Updates - 12.6 M/s operations False positive rate: 2.60% 64-byte value Lookups - 5.3 M/s operations Updates - 4.4 M/s operations False positive rate: 2.69% 2.5 mil entries 8-byte value Lookups - 16.2 M/s operations Updates - 10.7 M/s operations False positive rate: 4.49% 64-byte value Lookups - 4.9 M/s operations Updates - 4.1 M/s operations False positive rate: 4.41% 5 mil entries 8-byte value Lookups - 18.8 M/s operations Updates - 9.2 M/s operations False positive rate: 4.45% 64-byte value Lookups - 5.2 M/s operations Updates - 3.9 M/s operations False positive rate: 4.54% 4 hash functions: 50k entries 8-byte value Lookups - 19.7 M/s operations Updates - 11.1 M/s operations False positive rate: 1.01% 64-byte value Lookups - 4.4 M/s operations Updates - 4.0 M/s operations False positive rate: 1.00% 100k entries 8-byte value Lookups - 19.5 M/s operations Updates - 10.9 M/s operations False positive rate: 1.00% 64-byte value Lookups - 4.3 M/s operations Updates - 3.9 M/s operations False positive rate: 0.97% 500k entries 8-byte value Lookups - 18.2 M/s operations Updates - 10.6 M/s operations False positive rate: 2.05% 64-byte value Lookups - 4.3 M/s operations Updates - 3.7 M/s operations False positive rate: 2.05% 1 mil entries 8-byte value Lookups - 15.5 M/s operations Updates - 9.6 M/s operations False positive rate: 1.99% 64-byte value Lookups - 4.0 M/s operations Updates - 3.4 M/s operations False positive rate: 1.99% 2.5 mil entries 8-byte value Lookups - 13.8 M/s operations Updates - 7.7 M/s operations False positive rate: 3.91% 64-byte value Lookups - 3.7 M/s operations Updates - 3.6 M/s operations False positive rate: 3.78% 5 mil entries 8-byte value Lookups - 13.0 M/s operations Updates - 6.9 M/s operations False positive rate: 3.93% 64-byte value Lookups - 3.5 M/s operations Updates - 3.7 M/s operations False positive rate: 3.39% 5 hash functions: 50k entries 8-byte value Lookups - 16.4 M/s operations Updates - 9.1 M/s operations False positive rate: 0.78% 64-byte value Lookups - 3.5 M/s operations Updates - 3.2 M/s operations False positive rate: 0.77% 100k entries 8-byte value Lookups - 16.3 M/s operations Updates - 9.0 M/s operations False positive rate: 0.79% 64-byte value Lookups - 3.5 M/s operations Updates - 3.2 M/s operations False positive rate: 0.78% 500k entries 8-byte value Lookups - 15.1 M/s operations Updates - 8.8 M/s operations False positive rate: 1.82% 64-byte value Lookups - 3.4 M/s operations Updates - 3.0 M/s operations False positive rate: 1.78% 1 mil entries 8-byte value Lookups - 13.2 M/s operations Updates - 7.8 M/s operations False positive rate: 1.81% 64-byte value Lookups - 3.2 M/s operations Updates - 2.8 M/s operations False positive rate: 1.80% 2.5 mil entries 8-byte value Lookups - 10.5 M/s operations Updates - 5.9 M/s operations False positive rate: 0.29% 64-byte value Lookups - 3.2 M/s operations Updates - 2.4 M/s operations False positive rate: 0.28% 5 mil entries 8-byte value Lookups - 9.6 M/s operations Updates - 5.7 M/s operations False positive rate: 0.30% 64-byte value Lookups - 3.2 M/s operations Updates - 2.7 M/s operations False positive rate: 0.30% Signed-off-by: Joanne Koong Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211027234504.30744-5-joannekoong@fb.com --- tools/testing/selftests/bpf/Makefile | 6 +- tools/testing/selftests/bpf/bench.c | 37 ++ tools/testing/selftests/bpf/bench.h | 3 + .../selftests/bpf/benchs/bench_bloom_filter_map.c | 420 +++++++++++++++++++++ .../bpf/benchs/run_bench_bloom_filter_map.sh | 28 ++ .../selftests/bpf/benchs/run_bench_ringbufs.sh | 30 +- tools/testing/selftests/bpf/benchs/run_common.sh | 48 +++ .../selftests/bpf/progs/bloom_filter_bench.c | 153 ++++++++ 8 files changed, 695 insertions(+), 30 deletions(-) create mode 100644 tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c create mode 100755 tools/testing/selftests/bpf/benchs/run_bench_bloom_filter_map.sh create mode 100644 tools/testing/selftests/bpf/benchs/run_common.sh create mode 100644 tools/testing/selftests/bpf/progs/bloom_filter_bench.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index ac47cf9760fc..2c464cb78d6b 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -524,18 +524,20 @@ $(OUTPUT)/test_cpp: test_cpp.cpp $(OUTPUT)/test_core_extern.skel.h $(BPFOBJ) # Benchmark runner $(OUTPUT)/bench_%.o: benchs/bench_%.c bench.h $(BPFOBJ) $(call msg,CC,,$@) - $(Q)$(CC) $(CFLAGS) -c $(filter %.c,$^) $(LDLIBS) -o $@ + $(Q)$(CC) $(CFLAGS) -O2 -c $(filter %.c,$^) $(LDLIBS) -o $@ $(OUTPUT)/bench_rename.o: $(OUTPUT)/test_overhead.skel.h $(OUTPUT)/bench_trigger.o: $(OUTPUT)/trigger_bench.skel.h $(OUTPUT)/bench_ringbufs.o: $(OUTPUT)/ringbuf_bench.skel.h \ $(OUTPUT)/perfbuf_bench.skel.h +$(OUTPUT)/bench_bloom_filter_map.o: $(OUTPUT)/bloom_filter_bench.skel.h $(OUTPUT)/bench.o: bench.h testing_helpers.h $(BPFOBJ) $(OUTPUT)/bench: LDLIBS += -lm $(OUTPUT)/bench: $(OUTPUT)/bench.o $(OUTPUT)/testing_helpers.o \ $(OUTPUT)/bench_count.o \ $(OUTPUT)/bench_rename.o \ $(OUTPUT)/bench_trigger.o \ - $(OUTPUT)/bench_ringbufs.o + $(OUTPUT)/bench_ringbufs.o \ + $(OUTPUT)/bench_bloom_filter_map.o $(call msg,BINARY,,$@) $(Q)$(CC) $(LDFLAGS) -o $@ $(filter %.a %.o,$^) $(LDLIBS) diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c index 6ea15b93a2f8..a1d5dffe5ef6 100644 --- a/tools/testing/selftests/bpf/bench.c +++ b/tools/testing/selftests/bpf/bench.c @@ -51,6 +51,35 @@ void setup_libbpf() fprintf(stderr, "failed to increase RLIMIT_MEMLOCK: %d", err); } +void false_hits_report_progress(int iter, struct bench_res *res, long delta_ns) +{ + long total = res->false_hits + res->hits + res->drops; + + printf("Iter %3d (%7.3lfus): ", + iter, (delta_ns - 1000000000) / 1000.0); + + printf("%ld false hits of %ld total operations. Percentage = %2.2f %%\n", + res->false_hits, total, ((float)res->false_hits / total) * 100); +} + +void false_hits_report_final(struct bench_res res[], int res_cnt) +{ + long total_hits = 0, total_drops = 0, total_false_hits = 0, total_ops = 0; + int i; + + for (i = 0; i < res_cnt; i++) { + total_hits += res[i].hits; + total_false_hits += res[i].false_hits; + total_drops += res[i].drops; + } + total_ops = total_hits + total_false_hits + total_drops; + + printf("Summary: %ld false hits of %ld total operations. ", + total_false_hits, total_ops); + printf("Percentage = %2.2f %%\n", + ((float)total_false_hits / total_ops) * 100); +} + void hits_drops_report_progress(int iter, struct bench_res *res, long delta_ns) { double hits_per_sec, drops_per_sec; @@ -132,9 +161,11 @@ static const struct argp_option opts[] = { }; extern struct argp bench_ringbufs_argp; +extern struct argp bench_bloom_map_argp; static const struct argp_child bench_parsers[] = { { &bench_ringbufs_argp, 0, "Ring buffers benchmark", 0 }, + { &bench_bloom_map_argp, 0, "Bloom filter map benchmark", 0 }, {}, }; @@ -323,6 +354,9 @@ extern const struct bench bench_rb_libbpf; extern const struct bench bench_rb_custom; extern const struct bench bench_pb_libbpf; extern const struct bench bench_pb_custom; +extern const struct bench bench_bloom_lookup; +extern const struct bench bench_bloom_update; +extern const struct bench bench_bloom_false_positive; static const struct bench *benchs[] = { &bench_count_global, @@ -344,6 +378,9 @@ static const struct bench *benchs[] = { &bench_rb_custom, &bench_pb_libbpf, &bench_pb_custom, + &bench_bloom_lookup, + &bench_bloom_update, + &bench_bloom_false_positive, }; static void setup_benchmark() diff --git a/tools/testing/selftests/bpf/bench.h b/tools/testing/selftests/bpf/bench.h index c1f48a473b02..624c6b11501f 100644 --- a/tools/testing/selftests/bpf/bench.h +++ b/tools/testing/selftests/bpf/bench.h @@ -33,6 +33,7 @@ struct env { struct bench_res { long hits; long drops; + long false_hits; }; struct bench { @@ -56,6 +57,8 @@ extern const struct bench *bench; void setup_libbpf(); void hits_drops_report_progress(int iter, struct bench_res *res, long delta_ns); void hits_drops_report_final(struct bench_res res[], int res_cnt); +void false_hits_report_progress(int iter, struct bench_res *res, long delta_ns); +void false_hits_report_final(struct bench_res res[], int res_cnt); static inline __u64 get_time_ns() { struct timespec t; diff --git a/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c b/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c new file mode 100644 index 000000000000..4bafad418a8a --- /dev/null +++ b/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c @@ -0,0 +1,420 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include +#include +#include +#include "bench.h" +#include "bloom_filter_bench.skel.h" +#include "bpf_util.h" + +static struct ctx { + bool use_array_map; + bool use_hashmap; + bool hashmap_use_bloom; + bool count_false_hits; + + struct bloom_filter_bench *skel; + + int bloom_fd; + int hashmap_fd; + int array_map_fd; + + pthread_mutex_t map_done_mtx; + pthread_cond_t map_done_cv; + bool map_done; + bool map_prepare_err; + + __u32 next_map_idx; +} ctx = { + .map_done_mtx = PTHREAD_MUTEX_INITIALIZER, + .map_done_cv = PTHREAD_COND_INITIALIZER, +}; + +struct stat { + __u32 stats[3]; +}; + +static struct { + __u32 nr_entries; + __u8 nr_hash_funcs; + __u8 value_size; +} args = { + .nr_entries = 1000, + .nr_hash_funcs = 3, + .value_size = 8, +}; + +enum { + ARG_NR_ENTRIES = 3000, + ARG_NR_HASH_FUNCS = 3001, + ARG_VALUE_SIZE = 3002, +}; + +static const struct argp_option opts[] = { + { "nr_entries", ARG_NR_ENTRIES, "NR_ENTRIES", 0, + "Set number of expected unique entries in the bloom filter"}, + { "nr_hash_funcs", ARG_NR_HASH_FUNCS, "NR_HASH_FUNCS", 0, + "Set number of hash functions in the bloom filter"}, + { "value_size", ARG_VALUE_SIZE, "VALUE_SIZE", 0, + "Set value size (in bytes) of bloom filter entries"}, + {}, +}; + +static error_t parse_arg(int key, char *arg, struct argp_state *state) +{ + switch (key) { + case ARG_NR_ENTRIES: + args.nr_entries = strtol(arg, NULL, 10); + if (args.nr_entries == 0) { + fprintf(stderr, "Invalid nr_entries count."); + argp_usage(state); + } + break; + case ARG_NR_HASH_FUNCS: + args.nr_hash_funcs = strtol(arg, NULL, 10); + if (args.nr_hash_funcs == 0 || args.nr_hash_funcs > 15) { + fprintf(stderr, + "The bloom filter must use 1 to 15 hash functions."); + argp_usage(state); + } + break; + case ARG_VALUE_SIZE: + args.value_size = strtol(arg, NULL, 10); + if (args.value_size < 2 || args.value_size > 256) { + fprintf(stderr, + "Invalid value size. Must be between 2 and 256 bytes"); + argp_usage(state); + } + break; + default: + return ARGP_ERR_UNKNOWN; + } + + return 0; +} + +/* exported into benchmark runner */ +const struct argp bench_bloom_map_argp = { + .options = opts, + .parser = parse_arg, +}; + +static void validate(void) +{ + if (env.consumer_cnt != 1) { + fprintf(stderr, + "The bloom filter benchmarks do not support multi-consumer use\n"); + exit(1); + } +} + +static inline void trigger_bpf_program(void) +{ + syscall(__NR_getpgid); +} + +static void *producer(void *input) +{ + while (true) + trigger_bpf_program(); + + return NULL; +} + +static void *map_prepare_thread(void *arg) +{ + __u32 val_size, i; + void *val = NULL; + int err; + + val_size = args.value_size; + val = malloc(val_size); + if (!val) { + ctx.map_prepare_err = true; + goto done; + } + + while (true) { + i = __atomic_add_fetch(&ctx.next_map_idx, 1, __ATOMIC_RELAXED); + if (i > args.nr_entries) + break; + +again: + /* Populate hashmap, bloom filter map, and array map with the same + * random values + */ + err = syscall(__NR_getrandom, val, val_size, 0); + if (err != val_size) { + ctx.map_prepare_err = true; + fprintf(stderr, "failed to get random value: %d\n", -errno); + break; + } + + if (ctx.use_hashmap) { + err = bpf_map_update_elem(ctx.hashmap_fd, val, val, BPF_NOEXIST); + if (err) { + if (err != -EEXIST) { + ctx.map_prepare_err = true; + fprintf(stderr, "failed to add elem to hashmap: %d\n", + -errno); + break; + } + goto again; + } + } + + i--; + + if (ctx.use_array_map) { + err = bpf_map_update_elem(ctx.array_map_fd, &i, val, 0); + if (err) { + ctx.map_prepare_err = true; + fprintf(stderr, "failed to add elem to array map: %d\n", -errno); + break; + } + } + + if (ctx.use_hashmap && !ctx.hashmap_use_bloom) + continue; + + err = bpf_map_update_elem(ctx.bloom_fd, NULL, val, 0); + if (err) { + ctx.map_prepare_err = true; + fprintf(stderr, + "failed to add elem to bloom filter map: %d\n", -errno); + break; + } + } +done: + pthread_mutex_lock(&ctx.map_done_mtx); + ctx.map_done = true; + pthread_cond_signal(&ctx.map_done_cv); + pthread_mutex_unlock(&ctx.map_done_mtx); + + if (val) + free(val); + + return NULL; +} + +static void populate_maps(void) +{ + unsigned int nr_cpus = bpf_num_possible_cpus(); + pthread_t map_thread; + int i, err, nr_rand_bytes; + + ctx.bloom_fd = bpf_map__fd(ctx.skel->maps.bloom_map); + ctx.hashmap_fd = bpf_map__fd(ctx.skel->maps.hashmap); + ctx.array_map_fd = bpf_map__fd(ctx.skel->maps.array_map); + + for (i = 0; i < nr_cpus; i++) { + err = pthread_create(&map_thread, NULL, map_prepare_thread, + NULL); + if (err) { + fprintf(stderr, "failed to create pthread: %d\n", -errno); + exit(1); + } + } + + pthread_mutex_lock(&ctx.map_done_mtx); + while (!ctx.map_done) + pthread_cond_wait(&ctx.map_done_cv, &ctx.map_done_mtx); + pthread_mutex_unlock(&ctx.map_done_mtx); + + if (ctx.map_prepare_err) + exit(1); + + nr_rand_bytes = syscall(__NR_getrandom, ctx.skel->bss->rand_vals, + ctx.skel->rodata->nr_rand_bytes, 0); + if (nr_rand_bytes != ctx.skel->rodata->nr_rand_bytes) { + fprintf(stderr, "failed to get random bytes\n"); + exit(1); + } +} + +static void check_args(void) +{ + if (args.value_size < 8) { + __u64 nr_unique_entries = 1ULL << (args.value_size * 8); + + if (args.nr_entries > nr_unique_entries) { + fprintf(stderr, + "Not enough unique values for the nr_entries requested\n"); + exit(1); + } + } +} + +static struct bloom_filter_bench *setup_skeleton(void) +{ + struct bloom_filter_bench *skel; + + check_args(); + + setup_libbpf(); + + skel = bloom_filter_bench__open(); + if (!skel) { + fprintf(stderr, "failed to open skeleton\n"); + exit(1); + } + + skel->rodata->hashmap_use_bloom = ctx.hashmap_use_bloom; + skel->rodata->count_false_hits = ctx.count_false_hits; + + /* Resize number of entries */ + bpf_map__set_max_entries(skel->maps.hashmap, args.nr_entries); + + bpf_map__set_max_entries(skel->maps.array_map, args.nr_entries); + + bpf_map__set_max_entries(skel->maps.bloom_map, args.nr_entries); + + /* Set value size */ + bpf_map__set_value_size(skel->maps.array_map, args.value_size); + + bpf_map__set_value_size(skel->maps.bloom_map, args.value_size); + + bpf_map__set_value_size(skel->maps.hashmap, args.value_size); + + /* For the hashmap, we use the value as the key as well */ + bpf_map__set_key_size(skel->maps.hashmap, args.value_size); + + skel->bss->value_size = args.value_size; + + /* Set number of hash functions */ + bpf_map__set_map_extra(skel->maps.bloom_map, args.nr_hash_funcs); + + if (bloom_filter_bench__load(skel)) { + fprintf(stderr, "failed to load skeleton\n"); + exit(1); + } + + return skel; +} + +static void bloom_lookup_setup(void) +{ + struct bpf_link *link; + + ctx.use_array_map = true; + + ctx.skel = setup_skeleton(); + + populate_maps(); + + link = bpf_program__attach(ctx.skel->progs.bloom_lookup); + if (!link) { + fprintf(stderr, "failed to attach program!\n"); + exit(1); + } +} + +static void bloom_update_setup(void) +{ + struct bpf_link *link; + + ctx.use_array_map = true; + + ctx.skel = setup_skeleton(); + + populate_maps(); + + link = bpf_program__attach(ctx.skel->progs.bloom_update); + if (!link) { + fprintf(stderr, "failed to attach program!\n"); + exit(1); + } +} + +static void false_positive_setup(void) +{ + struct bpf_link *link; + + ctx.use_hashmap = true; + ctx.hashmap_use_bloom = true; + ctx.count_false_hits = true; + + ctx.skel = setup_skeleton(); + + populate_maps(); + + link = bpf_program__attach(ctx.skel->progs.bloom_hashmap_lookup); + if (!link) { + fprintf(stderr, "failed to attach program!\n"); + exit(1); + } +} + +static void measure(struct bench_res *res) +{ + unsigned long total_hits = 0, total_drops = 0, total_false_hits = 0; + static unsigned long last_hits, last_drops, last_false_hits; + unsigned int nr_cpus = bpf_num_possible_cpus(); + int hit_key, drop_key, false_hit_key; + int i; + + hit_key = ctx.skel->rodata->hit_key; + drop_key = ctx.skel->rodata->drop_key; + false_hit_key = ctx.skel->rodata->false_hit_key; + + if (ctx.skel->bss->error != 0) { + fprintf(stderr, "error (%d) when searching the bloom filter\n", + ctx.skel->bss->error); + exit(1); + } + + for (i = 0; i < nr_cpus; i++) { + struct stat *s = (void *)&ctx.skel->bss->percpu_stats[i]; + + total_hits += s->stats[hit_key]; + total_drops += s->stats[drop_key]; + total_false_hits += s->stats[false_hit_key]; + } + + res->hits = total_hits - last_hits; + res->drops = total_drops - last_drops; + res->false_hits = total_false_hits - last_false_hits; + + last_hits = total_hits; + last_drops = total_drops; + last_false_hits = total_false_hits; +} + +static void *consumer(void *input) +{ + return NULL; +} + +const struct bench bench_bloom_lookup = { + .name = "bloom-lookup", + .validate = validate, + .setup = bloom_lookup_setup, + .producer_thread = producer, + .consumer_thread = consumer, + .measure = measure, + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, +}; + +const struct bench bench_bloom_update = { + .name = "bloom-update", + .validate = validate, + .setup = bloom_update_setup, + .producer_thread = producer, + .consumer_thread = consumer, + .measure = measure, + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, +}; + +const struct bench bench_bloom_false_positive = { + .name = "bloom-false-positive", + .validate = validate, + .setup = false_positive_setup, + .producer_thread = producer, + .consumer_thread = consumer, + .measure = measure, + .report_progress = false_hits_report_progress, + .report_final = false_hits_report_final, +}; diff --git a/tools/testing/selftests/bpf/benchs/run_bench_bloom_filter_map.sh b/tools/testing/selftests/bpf/benchs/run_bench_bloom_filter_map.sh new file mode 100755 index 000000000000..d03d0e5c91cd --- /dev/null +++ b/tools/testing/selftests/bpf/benchs/run_bench_bloom_filter_map.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +source ./benchs/run_common.sh + +set -eufo pipefail + +header "Bloom filter map" +for v in 2 4 8 16 40; do +for t in 1 4 8 12 16; do +for h in {1..10}; do +subtitle "value_size: $v bytes, # threads: $t, # hashes: $h" + for e in 10000 50000 75000 100000 250000 500000 750000 1000000 2500000 5000000; do + printf "%'d entries -\n" $e + printf "\t" + summarize "Lookups, total operations: " \ + "$($RUN_BENCH -p $t --nr_hash_funcs $h --nr_entries $e --value_size $v bloom-lookup)" + printf "\t" + summarize "Updates, total operations: " \ + "$($RUN_BENCH -p $t --nr_hash_funcs $h --nr_entries $e --value_size $v bloom-update)" + printf "\t" + summarize_percentage "False positive rate: " \ + "$($RUN_BENCH -p $t --nr_hash_funcs $h --nr_entries $e --value_size $v bloom-false-positive)" + done + printf "\n" +done +done +done diff --git a/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh b/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh index af4aa04caba6..ada028aa9007 100755 --- a/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh +++ b/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh @@ -1,34 +1,8 @@ #!/bin/bash -set -eufo pipefail - -RUN_BENCH="sudo ./bench -w3 -d10 -a" - -function hits() -{ - echo "$*" | sed -E "s/.*hits\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+M\/s).*/\1/" -} - -function drops() -{ - echo "$*" | sed -E "s/.*drops\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+M\/s).*/\1/" -} +source ./benchs/run_common.sh -function header() -{ - local len=${#1} - - printf "\n%s\n" "$1" - for i in $(seq 1 $len); do printf '='; done - printf '\n' -} - -function summarize() -{ - bench="$1" - summary=$(echo $2 | tail -n1) - printf "%-20s %s (drops %s)\n" "$bench" "$(hits $summary)" "$(drops $summary)" -} +set -eufo pipefail header "Single-producer, parallel producer" for b in rb-libbpf rb-custom pb-libbpf pb-custom; do diff --git a/tools/testing/selftests/bpf/benchs/run_common.sh b/tools/testing/selftests/bpf/benchs/run_common.sh new file mode 100644 index 000000000000..670f23b037c4 --- /dev/null +++ b/tools/testing/selftests/bpf/benchs/run_common.sh @@ -0,0 +1,48 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +RUN_BENCH="sudo ./bench -w3 -d10 -a" + +function header() +{ + local len=${#1} + + printf "\n%s\n" "$1" + for i in $(seq 1 $len); do printf '='; done + printf '\n' +} + +function subtitle() +{ + local len=${#1} + printf "\t%s\n" "$1" +} + +function hits() +{ + echo "$*" | sed -E "s/.*hits\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+M\/s).*/\1/" +} + +function drops() +{ + echo "$*" | sed -E "s/.*drops\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+M\/s).*/\1/" +} + +function percentage() +{ + echo "$*" | sed -E "s/.*Percentage\s=\s+([0-9]+\.[0-9]+).*/\1/" +} + +function summarize() +{ + bench="$1" + summary=$(echo $2 | tail -n1) + printf "%-20s %s (drops %s)\n" "$bench" "$(hits $summary)" "$(drops $summary)" +} + +function summarize_percentage() +{ + bench="$1" + summary=$(echo $2 | tail -n1) + printf "%-20s %s%%\n" "$bench" "$(percentage $summary)" +} diff --git a/tools/testing/selftests/bpf/progs/bloom_filter_bench.c b/tools/testing/selftests/bpf/progs/bloom_filter_bench.c new file mode 100644 index 000000000000..d9a88dd1ea65 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bloom_filter_bench.c @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include +#include +#include +#include + +char _license[] SEC("license") = "GPL"; + +struct bpf_map; + +__u8 rand_vals[2500000]; +const __u32 nr_rand_bytes = 2500000; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(key_size, sizeof(__u32)); + /* max entries and value_size will be set programmatically. + * They are configurable from the userspace bench program. + */ +} array_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_BLOOM_FILTER); + /* max entries, value_size, and # of hash functions will be set + * programmatically. They are configurable from the userspace + * bench program. + */ + __uint(map_extra, 3); +} bloom_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + /* max entries, key_size, and value_size, will be set + * programmatically. They are configurable from the userspace + * bench program. + */ +} hashmap SEC(".maps"); + +struct callback_ctx { + struct bpf_map *map; + bool update; +}; + +/* Tracks the number of hits, drops, and false hits */ +struct { + __u32 stats[3]; +} __attribute__((__aligned__(256))) percpu_stats[256]; + +const __u32 hit_key = 0; +const __u32 drop_key = 1; +const __u32 false_hit_key = 2; + +__u8 value_size; + +const volatile bool hashmap_use_bloom; +const volatile bool count_false_hits; + +int error = 0; + +static __always_inline void log_result(__u32 key) +{ + __u32 cpu = bpf_get_smp_processor_id(); + + percpu_stats[cpu & 255].stats[key]++; +} + +static __u64 +bloom_callback(struct bpf_map *map, __u32 *key, void *val, + struct callback_ctx *data) +{ + int err; + + if (data->update) + err = bpf_map_push_elem(data->map, val, 0); + else + err = bpf_map_peek_elem(data->map, val); + + if (err) { + error |= 1; + return 1; /* stop the iteration */ + } + + log_result(hit_key); + + return 0; +} + +SEC("fentry/__x64_sys_getpgid") +int bloom_lookup(void *ctx) +{ + struct callback_ctx data; + + data.map = (struct bpf_map *)&bloom_map; + data.update = false; + + bpf_for_each_map_elem(&array_map, bloom_callback, &data, 0); + + return 0; +} + +SEC("fentry/__x64_sys_getpgid") +int bloom_update(void *ctx) +{ + struct callback_ctx data; + + data.map = (struct bpf_map *)&bloom_map; + data.update = true; + + bpf_for_each_map_elem(&array_map, bloom_callback, &data, 0); + + return 0; +} + +SEC("fentry/__x64_sys_getpgid") +int bloom_hashmap_lookup(void *ctx) +{ + __u64 *result; + int i, err; + + __u32 index = bpf_get_prandom_u32(); + __u32 bitmask = (1ULL << 21) - 1; + + for (i = 0; i < 1024; i++, index += value_size) { + index = index & bitmask; + + if (hashmap_use_bloom) { + err = bpf_map_peek_elem(&bloom_map, + rand_vals + index); + if (err) { + if (err != -ENOENT) { + error |= 2; + return 0; + } + log_result(hit_key); + continue; + } + } + + result = bpf_map_lookup_elem(&hashmap, + rand_vals + index); + if (result) { + log_result(hit_key); + } else { + if (hashmap_use_bloom && count_false_hits) + log_result(false_hit_key); + log_result(drop_key); + } + } + + return 0; +} -- cgit v1.2.3 From f44bc543a079c2ebc534cbfabd6fbfcfc2b09f72 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Wed, 27 Oct 2021 16:45:04 -0700 Subject: bpf/benchs: Add benchmarks for comparing hashmap lookups w/ vs. w/out bloom filter This patch adds benchmark tests for comparing the performance of hashmap lookups without the bloom filter vs. hashmap lookups with the bloom filter. Checking the bloom filter first for whether the element exists should overall enable a higher throughput for hashmap lookups, since if the element does not exist in the bloom filter, we can avoid a costly lookup in the hashmap. On average, using 5 hash functions in the bloom filter tended to perform the best across the widest range of different entry sizes. The benchmark results using 5 hash functions (running on 8 threads on a machine with one numa node, and taking the average of 3 runs) were roughly as follows: value_size = 4 bytes - 10k entries: 30% faster 50k entries: 40% faster 100k entries: 40% faster 500k entres: 70% faster 1 million entries: 90% faster 5 million entries: 140% faster value_size = 8 bytes - 10k entries: 30% faster 50k entries: 40% faster 100k entries: 50% faster 500k entres: 80% faster 1 million entries: 100% faster 5 million entries: 150% faster value_size = 16 bytes - 10k entries: 20% faster 50k entries: 30% faster 100k entries: 35% faster 500k entres: 65% faster 1 million entries: 85% faster 5 million entries: 110% faster value_size = 40 bytes - 10k entries: 5% faster 50k entries: 15% faster 100k entries: 20% faster 500k entres: 65% faster 1 million entries: 75% faster 5 million entries: 120% faster Signed-off-by: Joanne Koong Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211027234504.30744-6-joannekoong@fb.com --- tools/testing/selftests/bpf/bench.c | 23 +++++++-- .../selftests/bpf/benchs/bench_bloom_filter_map.c | 57 ++++++++++++++++++++++ .../bpf/benchs/run_bench_bloom_filter_map.sh | 17 +++++++ tools/testing/selftests/bpf/benchs/run_common.sh | 12 +++++ 4 files changed, 104 insertions(+), 5 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c index a1d5dffe5ef6..cc4722f693e9 100644 --- a/tools/testing/selftests/bpf/bench.c +++ b/tools/testing/selftests/bpf/bench.c @@ -92,20 +92,22 @@ void hits_drops_report_progress(int iter, struct bench_res *res, long delta_ns) printf("Iter %3d (%7.3lfus): ", iter, (delta_ns - 1000000000) / 1000.0); - printf("hits %8.3lfM/s (%7.3lfM/prod), drops %8.3lfM/s\n", - hits_per_sec, hits_per_prod, drops_per_sec); + printf("hits %8.3lfM/s (%7.3lfM/prod), drops %8.3lfM/s, total operations %8.3lfM/s\n", + hits_per_sec, hits_per_prod, drops_per_sec, hits_per_sec + drops_per_sec); } void hits_drops_report_final(struct bench_res res[], int res_cnt) { int i; - double hits_mean = 0.0, drops_mean = 0.0; - double hits_stddev = 0.0, drops_stddev = 0.0; + double hits_mean = 0.0, drops_mean = 0.0, total_ops_mean = 0.0; + double hits_stddev = 0.0, drops_stddev = 0.0, total_ops_stddev = 0.0; + double total_ops; for (i = 0; i < res_cnt; i++) { hits_mean += res[i].hits / 1000000.0 / (0.0 + res_cnt); drops_mean += res[i].drops / 1000000.0 / (0.0 + res_cnt); } + total_ops_mean = hits_mean + drops_mean; if (res_cnt > 1) { for (i = 0; i < res_cnt; i++) { @@ -115,14 +117,21 @@ void hits_drops_report_final(struct bench_res res[], int res_cnt) drops_stddev += (drops_mean - res[i].drops / 1000000.0) * (drops_mean - res[i].drops / 1000000.0) / (res_cnt - 1.0); + total_ops = res[i].hits + res[i].drops; + total_ops_stddev += (total_ops_mean - total_ops / 1000000.0) * + (total_ops_mean - total_ops / 1000000.0) / + (res_cnt - 1.0); } hits_stddev = sqrt(hits_stddev); drops_stddev = sqrt(drops_stddev); + total_ops_stddev = sqrt(total_ops_stddev); } printf("Summary: hits %8.3lf \u00B1 %5.3lfM/s (%7.3lfM/prod), ", hits_mean, hits_stddev, hits_mean / env.producer_cnt); - printf("drops %8.3lf \u00B1 %5.3lfM/s\n", + printf("drops %8.3lf \u00B1 %5.3lfM/s, ", drops_mean, drops_stddev); + printf("total operations %8.3lf \u00B1 %5.3lfM/s\n", + total_ops_mean, total_ops_stddev); } const char *argp_program_version = "benchmark"; @@ -357,6 +366,8 @@ extern const struct bench bench_pb_custom; extern const struct bench bench_bloom_lookup; extern const struct bench bench_bloom_update; extern const struct bench bench_bloom_false_positive; +extern const struct bench bench_hashmap_without_bloom; +extern const struct bench bench_hashmap_with_bloom; static const struct bench *benchs[] = { &bench_count_global, @@ -381,6 +392,8 @@ static const struct bench *benchs[] = { &bench_bloom_lookup, &bench_bloom_update, &bench_bloom_false_positive, + &bench_hashmap_without_bloom, + &bench_hashmap_with_bloom, }; static void setup_benchmark() diff --git a/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c b/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c index 4bafad418a8a..6eeeed2913e6 100644 --- a/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c +++ b/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c @@ -346,6 +346,41 @@ static void false_positive_setup(void) } } +static void hashmap_with_bloom_setup(void) +{ + struct bpf_link *link; + + ctx.use_hashmap = true; + ctx.hashmap_use_bloom = true; + + ctx.skel = setup_skeleton(); + + populate_maps(); + + link = bpf_program__attach(ctx.skel->progs.bloom_hashmap_lookup); + if (!link) { + fprintf(stderr, "failed to attach program!\n"); + exit(1); + } +} + +static void hashmap_no_bloom_setup(void) +{ + struct bpf_link *link; + + ctx.use_hashmap = true; + + ctx.skel = setup_skeleton(); + + populate_maps(); + + link = bpf_program__attach(ctx.skel->progs.bloom_hashmap_lookup); + if (!link) { + fprintf(stderr, "failed to attach program!\n"); + exit(1); + } +} + static void measure(struct bench_res *res) { unsigned long total_hits = 0, total_drops = 0, total_false_hits = 0; @@ -418,3 +453,25 @@ const struct bench bench_bloom_false_positive = { .report_progress = false_hits_report_progress, .report_final = false_hits_report_final, }; + +const struct bench bench_hashmap_without_bloom = { + .name = "hashmap-without-bloom", + .validate = validate, + .setup = hashmap_no_bloom_setup, + .producer_thread = producer, + .consumer_thread = consumer, + .measure = measure, + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, +}; + +const struct bench bench_hashmap_with_bloom = { + .name = "hashmap-with-bloom", + .validate = validate, + .setup = hashmap_with_bloom_setup, + .producer_thread = producer, + .consumer_thread = consumer, + .measure = measure, + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, +}; diff --git a/tools/testing/selftests/bpf/benchs/run_bench_bloom_filter_map.sh b/tools/testing/selftests/bpf/benchs/run_bench_bloom_filter_map.sh index d03d0e5c91cd..8ffd385ab2f4 100755 --- a/tools/testing/selftests/bpf/benchs/run_bench_bloom_filter_map.sh +++ b/tools/testing/selftests/bpf/benchs/run_bench_bloom_filter_map.sh @@ -26,3 +26,20 @@ subtitle "value_size: $v bytes, # threads: $t, # hashes: $h" done done done + +header "Hashmap without bloom filter vs. hashmap with bloom filter (throughput, 8 threads)" +for v in 2 4 8 16 40; do +for h in {1..10}; do +subtitle "value_size: $v, # hashes: $h" + for e in 10000 50000 75000 100000 250000 500000 750000 1000000 2500000 5000000; do + printf "%'d entries -\n" $e + printf "\t" + summarize_total "Hashmap without bloom filter: " \ + "$($RUN_BENCH --nr_hash_funcs $h --nr_entries $e --value_size $v -p 8 hashmap-without-bloom)" + printf "\t" + summarize_total "Hashmap with bloom filter: " \ + "$($RUN_BENCH --nr_hash_funcs $h --nr_entries $e --value_size $v -p 8 hashmap-with-bloom)" + done + printf "\n" +done +done diff --git a/tools/testing/selftests/bpf/benchs/run_common.sh b/tools/testing/selftests/bpf/benchs/run_common.sh index 670f23b037c4..9a16be78b180 100644 --- a/tools/testing/selftests/bpf/benchs/run_common.sh +++ b/tools/testing/selftests/bpf/benchs/run_common.sh @@ -33,6 +33,11 @@ function percentage() echo "$*" | sed -E "s/.*Percentage\s=\s+([0-9]+\.[0-9]+).*/\1/" } +function total() +{ + echo "$*" | sed -E "s/.*total operations\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+M\/s).*/\1/" +} + function summarize() { bench="$1" @@ -46,3 +51,10 @@ function summarize_percentage() summary=$(echo $2 | tail -n1) printf "%-20s %s%%\n" "$bench" "$(percentage $summary)" } + +function summarize_total() +{ + bench="$1" + summary=$(echo $2 | tail -n1) + printf "%-20s %s\n" "$bench" "$(total $summary)" +} -- cgit v1.2.3 From 087cba799ced0573df499ddd3b2d8777e50cfb62 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Thu, 28 Oct 2021 12:04:59 +0530 Subject: selftests/bpf: Add weak/typeless ksym test for light skeleton Also, avoid using CO-RE features, as lskel doesn't support CO-RE, yet. Include both light and libbpf skeleton in same file to test both of them together. In c48e51c8b07a ("bpf: selftests: Add selftests for module kfunc support"), I added support for generating both lskel and libbpf skel for a BPF object, however the name parameter for bpftool caused collisions when included in same file together. This meant that every test needed a separate file for a libbpf/light skeleton separation instead of subtests. Change that by appending a "_lskel" suffix to the name for files using light skeleton, and convert all existing users. Signed-off-by: Kumar Kartikeya Dwivedi Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211028063501.2239335-7-memxor@gmail.com --- tools/testing/selftests/bpf/Makefile | 4 +-- tools/testing/selftests/bpf/prog_tests/atomics.c | 34 +++++++++--------- .../selftests/bpf/prog_tests/fentry_fexit.c | 16 ++++----- .../testing/selftests/bpf/prog_tests/fentry_test.c | 14 ++++---- .../testing/selftests/bpf/prog_tests/fexit_sleep.c | 12 +++---- .../testing/selftests/bpf/prog_tests/fexit_test.c | 14 ++++---- .../testing/selftests/bpf/prog_tests/kfunc_call.c | 6 ++-- tools/testing/selftests/bpf/prog_tests/ksyms_btf.c | 35 +++++++++++++++++-- .../selftests/bpf/prog_tests/ksyms_module.c | 40 +++++++++++++++++++--- .../selftests/bpf/prog_tests/ksyms_module_libbpf.c | 28 --------------- tools/testing/selftests/bpf/prog_tests/ringbuf.c | 12 +++---- .../selftests/bpf/prog_tests/trace_printk.c | 14 ++++---- .../selftests/bpf/prog_tests/trace_vprintk.c | 12 +++---- .../testing/selftests/bpf/prog_tests/verif_stats.c | 6 ++-- .../testing/selftests/bpf/progs/test_ksyms_weak.c | 2 +- 15 files changed, 142 insertions(+), 107 deletions(-) delete mode 100644 tools/testing/selftests/bpf/prog_tests/ksyms_module_libbpf.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 2c464cb78d6b..f49cb5fc85af 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -325,7 +325,7 @@ LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \ LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \ test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c # Generate both light skeleton and libbpf skeleton for these -LSKELS_EXTRA := test_ksyms_module.c +LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c SKEL_BLACKLIST += $$(LSKELS) test_static_linked.skel.h-deps := test_static_linked1.o test_static_linked2.o @@ -404,7 +404,7 @@ $(TRUNNER_BPF_LSKELS): %.lskel.h: %.o $(BPFTOOL) | $(TRUNNER_OUTPUT) $(Q)$$(BPFTOOL) gen object $$(<:.o=.linked2.o) $$(<:.o=.linked1.o) $(Q)$$(BPFTOOL) gen object $$(<:.o=.linked3.o) $$(<:.o=.linked2.o) $(Q)diff $$(<:.o=.linked2.o) $$(<:.o=.linked3.o) - $(Q)$$(BPFTOOL) gen skeleton -L $$(<:.o=.linked3.o) name $$(notdir $$(<:.o=)) > $$@ + $(Q)$$(BPFTOOL) gen skeleton -L $$(<:.o=.linked3.o) name $$(notdir $$(<:.o=_lskel)) > $$@ $(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_BPF_OBJS) $(BPFTOOL) | $(TRUNNER_OUTPUT) $$(call msg,LINK-BPF,$(TRUNNER_BINARY),$$(@:.skel.h=.o)) diff --git a/tools/testing/selftests/bpf/prog_tests/atomics.c b/tools/testing/selftests/bpf/prog_tests/atomics.c index 1486be5d3209..0f9525293881 100644 --- a/tools/testing/selftests/bpf/prog_tests/atomics.c +++ b/tools/testing/selftests/bpf/prog_tests/atomics.c @@ -4,13 +4,13 @@ #include "atomics.lskel.h" -static void test_add(struct atomics *skel) +static void test_add(struct atomics_lskel *skel) { int err, prog_fd; __u32 duration = 0, retval; int link_fd; - link_fd = atomics__add__attach(skel); + link_fd = atomics_lskel__add__attach(skel); if (!ASSERT_GT(link_fd, 0, "attach(add)")) return; @@ -36,13 +36,13 @@ cleanup: close(link_fd); } -static void test_sub(struct atomics *skel) +static void test_sub(struct atomics_lskel *skel) { int err, prog_fd; __u32 duration = 0, retval; int link_fd; - link_fd = atomics__sub__attach(skel); + link_fd = atomics_lskel__sub__attach(skel); if (!ASSERT_GT(link_fd, 0, "attach(sub)")) return; @@ -69,13 +69,13 @@ cleanup: close(link_fd); } -static void test_and(struct atomics *skel) +static void test_and(struct atomics_lskel *skel) { int err, prog_fd; __u32 duration = 0, retval; int link_fd; - link_fd = atomics__and__attach(skel); + link_fd = atomics_lskel__and__attach(skel); if (!ASSERT_GT(link_fd, 0, "attach(and)")) return; @@ -97,13 +97,13 @@ cleanup: close(link_fd); } -static void test_or(struct atomics *skel) +static void test_or(struct atomics_lskel *skel) { int err, prog_fd; __u32 duration = 0, retval; int link_fd; - link_fd = atomics__or__attach(skel); + link_fd = atomics_lskel__or__attach(skel); if (!ASSERT_GT(link_fd, 0, "attach(or)")) return; @@ -126,13 +126,13 @@ cleanup: close(link_fd); } -static void test_xor(struct atomics *skel) +static void test_xor(struct atomics_lskel *skel) { int err, prog_fd; __u32 duration = 0, retval; int link_fd; - link_fd = atomics__xor__attach(skel); + link_fd = atomics_lskel__xor__attach(skel); if (!ASSERT_GT(link_fd, 0, "attach(xor)")) return; @@ -154,13 +154,13 @@ cleanup: close(link_fd); } -static void test_cmpxchg(struct atomics *skel) +static void test_cmpxchg(struct atomics_lskel *skel) { int err, prog_fd; __u32 duration = 0, retval; int link_fd; - link_fd = atomics__cmpxchg__attach(skel); + link_fd = atomics_lskel__cmpxchg__attach(skel); if (!ASSERT_GT(link_fd, 0, "attach(cmpxchg)")) return; @@ -183,13 +183,13 @@ cleanup: close(link_fd); } -static void test_xchg(struct atomics *skel) +static void test_xchg(struct atomics_lskel *skel) { int err, prog_fd; __u32 duration = 0, retval; int link_fd; - link_fd = atomics__xchg__attach(skel); + link_fd = atomics_lskel__xchg__attach(skel); if (!ASSERT_GT(link_fd, 0, "attach(xchg)")) return; @@ -212,10 +212,10 @@ cleanup: void test_atomics(void) { - struct atomics *skel; + struct atomics_lskel *skel; __u32 duration = 0; - skel = atomics__open_and_load(); + skel = atomics_lskel__open_and_load(); if (CHECK(!skel, "skel_load", "atomics skeleton failed\n")) return; @@ -243,5 +243,5 @@ void test_atomics(void) test_xchg(skel); cleanup: - atomics__destroy(skel); + atomics_lskel__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c index 91154c2ba256..4374ac8a8a91 100644 --- a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c +++ b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c @@ -6,23 +6,23 @@ void test_fentry_fexit(void) { - struct fentry_test *fentry_skel = NULL; - struct fexit_test *fexit_skel = NULL; + struct fentry_test_lskel *fentry_skel = NULL; + struct fexit_test_lskel *fexit_skel = NULL; __u64 *fentry_res, *fexit_res; __u32 duration = 0, retval; int err, prog_fd, i; - fentry_skel = fentry_test__open_and_load(); + fentry_skel = fentry_test_lskel__open_and_load(); if (CHECK(!fentry_skel, "fentry_skel_load", "fentry skeleton failed\n")) goto close_prog; - fexit_skel = fexit_test__open_and_load(); + fexit_skel = fexit_test_lskel__open_and_load(); if (CHECK(!fexit_skel, "fexit_skel_load", "fexit skeleton failed\n")) goto close_prog; - err = fentry_test__attach(fentry_skel); + err = fentry_test_lskel__attach(fentry_skel); if (CHECK(err, "fentry_attach", "fentry attach failed: %d\n", err)) goto close_prog; - err = fexit_test__attach(fexit_skel); + err = fexit_test_lskel__attach(fexit_skel); if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err)) goto close_prog; @@ -44,6 +44,6 @@ void test_fentry_fexit(void) } close_prog: - fentry_test__destroy(fentry_skel); - fexit_test__destroy(fexit_skel); + fentry_test_lskel__destroy(fentry_skel); + fexit_test_lskel__destroy(fexit_skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_test.c b/tools/testing/selftests/bpf/prog_tests/fentry_test.c index 174c89e7456e..12921b3850d2 100644 --- a/tools/testing/selftests/bpf/prog_tests/fentry_test.c +++ b/tools/testing/selftests/bpf/prog_tests/fentry_test.c @@ -3,19 +3,19 @@ #include #include "fentry_test.lskel.h" -static int fentry_test(struct fentry_test *fentry_skel) +static int fentry_test(struct fentry_test_lskel *fentry_skel) { int err, prog_fd, i; __u32 duration = 0, retval; int link_fd; __u64 *result; - err = fentry_test__attach(fentry_skel); + err = fentry_test_lskel__attach(fentry_skel); if (!ASSERT_OK(err, "fentry_attach")) return err; /* Check that already linked program can't be attached again. */ - link_fd = fentry_test__test1__attach(fentry_skel); + link_fd = fentry_test_lskel__test1__attach(fentry_skel); if (!ASSERT_LT(link_fd, 0, "fentry_attach_link")) return -1; @@ -31,7 +31,7 @@ static int fentry_test(struct fentry_test *fentry_skel) return -1; } - fentry_test__detach(fentry_skel); + fentry_test_lskel__detach(fentry_skel); /* zero results for re-attach test */ memset(fentry_skel->bss, 0, sizeof(*fentry_skel->bss)); @@ -40,10 +40,10 @@ static int fentry_test(struct fentry_test *fentry_skel) void test_fentry_test(void) { - struct fentry_test *fentry_skel = NULL; + struct fentry_test_lskel *fentry_skel = NULL; int err; - fentry_skel = fentry_test__open_and_load(); + fentry_skel = fentry_test_lskel__open_and_load(); if (!ASSERT_OK_PTR(fentry_skel, "fentry_skel_load")) goto cleanup; @@ -55,5 +55,5 @@ void test_fentry_test(void) ASSERT_OK(err, "fentry_second_attach"); cleanup: - fentry_test__destroy(fentry_skel); + fentry_test_lskel__destroy(fentry_skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c b/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c index 4e7f4b42ea29..f949647dbbc2 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c @@ -10,7 +10,7 @@ static int do_sleep(void *skel) { - struct fexit_sleep *fexit_skel = skel; + struct fexit_sleep_lskel *fexit_skel = skel; struct timespec ts1 = { .tv_nsec = 1 }; struct timespec ts2 = { .tv_sec = 10 }; @@ -25,16 +25,16 @@ static char child_stack[STACK_SIZE]; void test_fexit_sleep(void) { - struct fexit_sleep *fexit_skel = NULL; + struct fexit_sleep_lskel *fexit_skel = NULL; int wstatus, duration = 0; pid_t cpid; int err, fexit_cnt; - fexit_skel = fexit_sleep__open_and_load(); + fexit_skel = fexit_sleep_lskel__open_and_load(); if (CHECK(!fexit_skel, "fexit_skel_load", "fexit skeleton failed\n")) goto cleanup; - err = fexit_sleep__attach(fexit_skel); + err = fexit_sleep_lskel__attach(fexit_skel); if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err)) goto cleanup; @@ -60,7 +60,7 @@ void test_fexit_sleep(void) */ close(fexit_skel->progs.nanosleep_fentry.prog_fd); close(fexit_skel->progs.nanosleep_fexit.prog_fd); - fexit_sleep__detach(fexit_skel); + fexit_sleep_lskel__detach(fexit_skel); /* kill the thread to unwind sys_nanosleep stack through the trampoline */ kill(cpid, 9); @@ -78,5 +78,5 @@ void test_fexit_sleep(void) goto cleanup; cleanup: - fexit_sleep__destroy(fexit_skel); + fexit_sleep_lskel__destroy(fexit_skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_test.c b/tools/testing/selftests/bpf/prog_tests/fexit_test.c index af3dba726701..d4887d8bb396 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_test.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_test.c @@ -3,19 +3,19 @@ #include #include "fexit_test.lskel.h" -static int fexit_test(struct fexit_test *fexit_skel) +static int fexit_test(struct fexit_test_lskel *fexit_skel) { int err, prog_fd, i; __u32 duration = 0, retval; int link_fd; __u64 *result; - err = fexit_test__attach(fexit_skel); + err = fexit_test_lskel__attach(fexit_skel); if (!ASSERT_OK(err, "fexit_attach")) return err; /* Check that already linked program can't be attached again. */ - link_fd = fexit_test__test1__attach(fexit_skel); + link_fd = fexit_test_lskel__test1__attach(fexit_skel); if (!ASSERT_LT(link_fd, 0, "fexit_attach_link")) return -1; @@ -31,7 +31,7 @@ static int fexit_test(struct fexit_test *fexit_skel) return -1; } - fexit_test__detach(fexit_skel); + fexit_test_lskel__detach(fexit_skel); /* zero results for re-attach test */ memset(fexit_skel->bss, 0, sizeof(*fexit_skel->bss)); @@ -40,10 +40,10 @@ static int fexit_test(struct fexit_test *fexit_skel) void test_fexit_test(void) { - struct fexit_test *fexit_skel = NULL; + struct fexit_test_lskel *fexit_skel = NULL; int err; - fexit_skel = fexit_test__open_and_load(); + fexit_skel = fexit_test_lskel__open_and_load(); if (!ASSERT_OK_PTR(fexit_skel, "fexit_skel_load")) goto cleanup; @@ -55,5 +55,5 @@ void test_fexit_test(void) ASSERT_OK(err, "fexit_second_attach"); cleanup: - fexit_test__destroy(fexit_skel); + fexit_test_lskel__destroy(fexit_skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c index 9611f2bc50df..5c9c0176991b 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c @@ -7,10 +7,10 @@ static void test_main(void) { - struct kfunc_call_test *skel; + struct kfunc_call_test_lskel *skel; int prog_fd, retval, err; - skel = kfunc_call_test__open_and_load(); + skel = kfunc_call_test_lskel__open_and_load(); if (!ASSERT_OK_PTR(skel, "skel")) return; @@ -26,7 +26,7 @@ static void test_main(void) ASSERT_OK(err, "bpf_prog_test_run(test2)"); ASSERT_EQ(retval, 3, "test2-retval"); - kfunc_call_test__destroy(skel); + kfunc_call_test_lskel__destroy(skel); } static void test_subprog(void) diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c index cf3acfa5a91d..79f6bd1e50d6 100644 --- a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c +++ b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c @@ -7,6 +7,7 @@ #include "test_ksyms_btf.skel.h" #include "test_ksyms_btf_null_check.skel.h" #include "test_ksyms_weak.skel.h" +#include "test_ksyms_weak.lskel.h" static int duration; @@ -89,11 +90,11 @@ static void test_weak_syms(void) int err; skel = test_ksyms_weak__open_and_load(); - if (CHECK(!skel, "test_ksyms_weak__open_and_load", "failed\n")) + if (!ASSERT_OK_PTR(skel, "test_ksyms_weak__open_and_load")) return; err = test_ksyms_weak__attach(skel); - if (CHECK(err, "test_ksyms_weak__attach", "skeleton attach failed: %d\n", err)) + if (!ASSERT_OK(err, "test_ksyms_weak__attach")) goto cleanup; /* trigger tracepoint */ @@ -109,6 +110,33 @@ cleanup: test_ksyms_weak__destroy(skel); } +static void test_weak_syms_lskel(void) +{ + struct test_ksyms_weak_lskel *skel; + struct test_ksyms_weak_lskel__data *data; + int err; + + skel = test_ksyms_weak_lskel__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_ksyms_weak_lskel__open_and_load")) + return; + + err = test_ksyms_weak_lskel__attach(skel); + if (!ASSERT_OK(err, "test_ksyms_weak_lskel__attach")) + goto cleanup; + + /* trigger tracepoint */ + usleep(1); + + data = skel->data; + ASSERT_EQ(data->out__existing_typed, 0, "existing typed ksym"); + ASSERT_NEQ(data->out__existing_typeless, -1, "existing typeless ksym"); + ASSERT_EQ(data->out__non_existent_typeless, 0, "nonexistent typeless ksym"); + ASSERT_EQ(data->out__non_existent_typed, 0, "nonexistent typed ksym"); + +cleanup: + test_ksyms_weak_lskel__destroy(skel); +} + void test_ksyms_btf(void) { int percpu_datasec; @@ -136,4 +164,7 @@ void test_ksyms_btf(void) if (test__start_subtest("weak_ksyms")) test_weak_syms(); + + if (test__start_subtest("weak_ksyms_lskel")) + test_weak_syms_lskel(); } diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_module.c b/tools/testing/selftests/bpf/prog_tests/ksyms_module.c index 831447878d7b..d490ad80eccb 100644 --- a/tools/testing/selftests/bpf/prog_tests/ksyms_module.c +++ b/tools/testing/selftests/bpf/prog_tests/ksyms_module.c @@ -4,10 +4,11 @@ #include #include #include "test_ksyms_module.lskel.h" +#include "test_ksyms_module.skel.h" -void test_ksyms_module(void) +void test_ksyms_module_lskel(void) { - struct test_ksyms_module *skel; + struct test_ksyms_module_lskel *skel; int retval; int err; @@ -16,8 +17,8 @@ void test_ksyms_module(void) return; } - skel = test_ksyms_module__open_and_load(); - if (!ASSERT_OK_PTR(skel, "test_ksyms_module__open_and_load")) + skel = test_ksyms_module_lskel__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_ksyms_module_lskel__open_and_load")) return; err = bpf_prog_test_run(skel->progs.load.prog_fd, 1, &pkt_v4, sizeof(pkt_v4), NULL, NULL, (__u32 *)&retval, NULL); @@ -25,6 +26,37 @@ void test_ksyms_module(void) goto cleanup; ASSERT_EQ(retval, 0, "retval"); ASSERT_EQ(skel->bss->out_bpf_testmod_ksym, 42, "bpf_testmod_ksym"); +cleanup: + test_ksyms_module_lskel__destroy(skel); +} + +void test_ksyms_module_libbpf(void) +{ + struct test_ksyms_module *skel; + int retval, err; + + if (!env.has_testmod) { + test__skip(); + return; + } + + skel = test_ksyms_module__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_ksyms_module__open")) + return; + err = bpf_prog_test_run(bpf_program__fd(skel->progs.load), 1, &pkt_v4, + sizeof(pkt_v4), NULL, NULL, (__u32 *)&retval, NULL); + if (!ASSERT_OK(err, "bpf_prog_test_run")) + goto cleanup; + ASSERT_EQ(retval, 0, "retval"); + ASSERT_EQ(skel->bss->out_bpf_testmod_ksym, 42, "bpf_testmod_ksym"); cleanup: test_ksyms_module__destroy(skel); } + +void test_ksyms_module(void) +{ + if (test__start_subtest("lskel")) + test_ksyms_module_lskel(); + if (test__start_subtest("libbpf")) + test_ksyms_module_libbpf(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_module_libbpf.c b/tools/testing/selftests/bpf/prog_tests/ksyms_module_libbpf.c deleted file mode 100644 index e6343ef63af9..000000000000 --- a/tools/testing/selftests/bpf/prog_tests/ksyms_module_libbpf.c +++ /dev/null @@ -1,28 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -#include -#include -#include "test_ksyms_module.skel.h" - -void test_ksyms_module_libbpf(void) -{ - struct test_ksyms_module *skel; - int retval, err; - - if (!env.has_testmod) { - test__skip(); - return; - } - - skel = test_ksyms_module__open_and_load(); - if (!ASSERT_OK_PTR(skel, "test_ksyms_module__open")) - return; - err = bpf_prog_test_run(bpf_program__fd(skel->progs.load), 1, &pkt_v4, - sizeof(pkt_v4), NULL, NULL, (__u32 *)&retval, NULL); - if (!ASSERT_OK(err, "bpf_prog_test_run")) - goto cleanup; - ASSERT_EQ(retval, 0, "retval"); - ASSERT_EQ(skel->bss->out_bpf_testmod_ksym, 42, "bpf_testmod_ksym"); -cleanup: - test_ksyms_module__destroy(skel); -} diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf.c b/tools/testing/selftests/bpf/prog_tests/ringbuf.c index 4706cee84360..9a80fe8a6427 100644 --- a/tools/testing/selftests/bpf/prog_tests/ringbuf.c +++ b/tools/testing/selftests/bpf/prog_tests/ringbuf.c @@ -58,7 +58,7 @@ static int process_sample(void *ctx, void *data, size_t len) } } -static struct test_ringbuf *skel; +static struct test_ringbuf_lskel *skel; static struct ring_buffer *ringbuf; static void trigger_samples() @@ -90,13 +90,13 @@ void test_ringbuf(void) int page_size = getpagesize(); void *mmap_ptr, *tmp_ptr; - skel = test_ringbuf__open(); + skel = test_ringbuf_lskel__open(); if (CHECK(!skel, "skel_open", "skeleton open failed\n")) return; skel->maps.ringbuf.max_entries = page_size; - err = test_ringbuf__load(skel); + err = test_ringbuf_lskel__load(skel); if (CHECK(err != 0, "skel_load", "skeleton load failed\n")) goto cleanup; @@ -154,7 +154,7 @@ void test_ringbuf(void) if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n")) goto cleanup; - err = test_ringbuf__attach(skel); + err = test_ringbuf_lskel__attach(skel); if (CHECK(err, "skel_attach", "skeleton attachment failed: %d\n", err)) goto cleanup; @@ -292,8 +292,8 @@ void test_ringbuf(void) CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n", 1L, skel->bss->discarded); - test_ringbuf__detach(skel); + test_ringbuf_lskel__detach(skel); cleanup: ring_buffer__free(ringbuf); - test_ringbuf__destroy(skel); + test_ringbuf_lskel__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/trace_printk.c b/tools/testing/selftests/bpf/prog_tests/trace_printk.c index 3f7a7141265e..cade7f12315f 100644 --- a/tools/testing/selftests/bpf/prog_tests/trace_printk.c +++ b/tools/testing/selftests/bpf/prog_tests/trace_printk.c @@ -10,27 +10,27 @@ void serial_test_trace_printk(void) { + struct trace_printk_lskel__bss *bss; int err = 0, iter = 0, found = 0; - struct trace_printk__bss *bss; - struct trace_printk *skel; + struct trace_printk_lskel *skel; char *buf = NULL; FILE *fp = NULL; size_t buflen; - skel = trace_printk__open(); + skel = trace_printk_lskel__open(); if (!ASSERT_OK_PTR(skel, "trace_printk__open")) return; ASSERT_EQ(skel->rodata->fmt[0], 'T', "skel->rodata->fmt[0]"); skel->rodata->fmt[0] = 't'; - err = trace_printk__load(skel); + err = trace_printk_lskel__load(skel); if (!ASSERT_OK(err, "trace_printk__load")) goto cleanup; bss = skel->bss; - err = trace_printk__attach(skel); + err = trace_printk_lskel__attach(skel); if (!ASSERT_OK(err, "trace_printk__attach")) goto cleanup; @@ -43,7 +43,7 @@ void serial_test_trace_printk(void) /* wait for tracepoint to trigger */ usleep(1); - trace_printk__detach(skel); + trace_printk_lskel__detach(skel); if (!ASSERT_GT(bss->trace_printk_ran, 0, "bss->trace_printk_ran")) goto cleanup; @@ -65,7 +65,7 @@ void serial_test_trace_printk(void) goto cleanup; cleanup: - trace_printk__destroy(skel); + trace_printk_lskel__destroy(skel); free(buf); if (fp) fclose(fp); diff --git a/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c b/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c index 46101270cb1a..7a4e313e8558 100644 --- a/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c +++ b/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c @@ -10,20 +10,20 @@ void serial_test_trace_vprintk(void) { + struct trace_vprintk_lskel__bss *bss; int err = 0, iter = 0, found = 0; - struct trace_vprintk__bss *bss; - struct trace_vprintk *skel; + struct trace_vprintk_lskel *skel; char *buf = NULL; FILE *fp = NULL; size_t buflen; - skel = trace_vprintk__open_and_load(); + skel = trace_vprintk_lskel__open_and_load(); if (!ASSERT_OK_PTR(skel, "trace_vprintk__open_and_load")) goto cleanup; bss = skel->bss; - err = trace_vprintk__attach(skel); + err = trace_vprintk_lskel__attach(skel); if (!ASSERT_OK(err, "trace_vprintk__attach")) goto cleanup; @@ -36,7 +36,7 @@ void serial_test_trace_vprintk(void) /* wait for tracepoint to trigger */ usleep(1); - trace_vprintk__detach(skel); + trace_vprintk_lskel__detach(skel); if (!ASSERT_GT(bss->trace_vprintk_ran, 0, "bss->trace_vprintk_ran")) goto cleanup; @@ -61,7 +61,7 @@ void serial_test_trace_vprintk(void) goto cleanup; cleanup: - trace_vprintk__destroy(skel); + trace_vprintk_lskel__destroy(skel); free(buf); if (fp) fclose(fp); diff --git a/tools/testing/selftests/bpf/prog_tests/verif_stats.c b/tools/testing/selftests/bpf/prog_tests/verif_stats.c index b4bae1340cf1..a47e7c0e1ffd 100644 --- a/tools/testing/selftests/bpf/prog_tests/verif_stats.c +++ b/tools/testing/selftests/bpf/prog_tests/verif_stats.c @@ -8,11 +8,11 @@ void test_verif_stats(void) { __u32 len = sizeof(struct bpf_prog_info); + struct trace_vprintk_lskel *skel; struct bpf_prog_info info = {}; - struct trace_vprintk *skel; int err; - skel = trace_vprintk__open_and_load(); + skel = trace_vprintk_lskel__open_and_load(); if (!ASSERT_OK_PTR(skel, "trace_vprintk__open_and_load")) goto cleanup; @@ -24,5 +24,5 @@ void test_verif_stats(void) goto cleanup; cleanup: - trace_vprintk__destroy(skel); + trace_vprintk_lskel__destroy(skel); } diff --git a/tools/testing/selftests/bpf/progs/test_ksyms_weak.c b/tools/testing/selftests/bpf/progs/test_ksyms_weak.c index 5f8379aadb29..8eadbd4caf7a 100644 --- a/tools/testing/selftests/bpf/progs/test_ksyms_weak.c +++ b/tools/testing/selftests/bpf/progs/test_ksyms_weak.c @@ -38,7 +38,7 @@ int pass_handler(const void *ctx) /* tests existing symbols. */ rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, 0); if (rq) - out__existing_typed = rq->cpu; + out__existing_typed = 0; out__existing_typeless = (__u64)&bpf_prog_active; /* tests non-existent symbols. */ -- cgit v1.2.3 From c3fc706e94f5653def2783ffcd809a38676b7551 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Thu, 28 Oct 2021 12:05:00 +0530 Subject: selftests/bpf: Fix fd cleanup in sk_lookup test Similar to the fix in commit: e31eec77e4ab ("bpf: selftests: Fix fd cleanup in get_branch_snapshot") We use designated initializer to set fds to -1 without breaking on future changes to MAX_SERVER constant denoting the array size. The particular close(0) occurs on non-reuseport tests, so it can be seen with -n 115/{2,3} but not 115/4. This can cause problems with future tests if they depend on BTF fd never being acquired as fd 0, breaking internal libbpf assumptions. Fixes: 0ab5539f8584 ("selftests/bpf: Tests for BPF_SK_LOOKUP attach point") Signed-off-by: Kumar Kartikeya Dwivedi Signed-off-by: Alexei Starovoitov Reviewed-by: Jakub Sitnicki Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20211028063501.2239335-8-memxor@gmail.com --- tools/testing/selftests/bpf/prog_tests/sk_lookup.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c index aee41547e7f4..6db07401bc49 100644 --- a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c +++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c @@ -598,7 +598,7 @@ close: static void run_lookup_prog(const struct test *t) { - int server_fds[MAX_SERVERS] = { -1 }; + int server_fds[] = { [0 ... MAX_SERVERS - 1] = -1 }; int client_fd, reuse_conn_fd = -1; struct bpf_link *lookup_link; int i, err; @@ -1053,7 +1053,7 @@ static void run_sk_assign(struct test_sk_lookup *skel, struct bpf_program *lookup_prog, const char *remote_ip, const char *local_ip) { - int server_fds[MAX_SERVERS] = { -1 }; + int server_fds[] = { [0 ... MAX_SERVERS - 1] = -1 }; struct bpf_sk_lookup ctx; __u64 server_cookie; int i, err; -- cgit v1.2.3 From efadf2ad17a2d5dc90bda4e6e8b2f96af4c62dae Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Thu, 28 Oct 2021 12:05:01 +0530 Subject: selftests/bpf: Fix memory leak in test_ima The allocated ring buffer is never freed, do so in the cleanup path. Fixes: f446b570ac7e ("bpf/selftests: Update the IMA test to use BPF ring buffer") Signed-off-by: Kumar Kartikeya Dwivedi Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20211028063501.2239335-9-memxor@gmail.com --- tools/testing/selftests/bpf/prog_tests/test_ima.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/test_ima.c b/tools/testing/selftests/bpf/prog_tests/test_ima.c index 0252f61d611a..97d8a6f84f4a 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_ima.c +++ b/tools/testing/selftests/bpf/prog_tests/test_ima.c @@ -43,7 +43,7 @@ static int process_sample(void *ctx, void *data, size_t len) void test_test_ima(void) { char measured_dir_template[] = "/tmp/ima_measuredXXXXXX"; - struct ring_buffer *ringbuf; + struct ring_buffer *ringbuf = NULL; const char *measured_dir; char cmd[256]; @@ -85,5 +85,6 @@ close_clean: err = system(cmd); CHECK(err, "failed to run command", "%s, errno = %d\n", cmd, errno); close_prog: + ring_buffer__free(ringbuf); ima__destroy(skel); } -- cgit v1.2.3 From f48ad69097fe79d1de13c4d8fef556d4c11c5e68 Mon Sep 17 00:00:00 2001 From: Andrea Righi Date: Tue, 26 Oct 2021 16:34:09 +0200 Subject: selftests/bpf: Fix fclose/pclose mismatch in test_progs Make sure to use pclose() to properly close the pipe opened by popen(). Fixes: 81f77fd0deeb ("bpf: add selftest for stackmap with BPF_F_STACK_BUILD_ID") Signed-off-by: Andrea Righi Signed-off-by: Daniel Borkmann Reviewed-by: Shuah Khan Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20211026143409.42666-1-andrea.righi@canonical.com --- tools/testing/selftests/bpf/test_progs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 1f4a48566991..c65986bd9d07 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -382,7 +382,7 @@ int extract_build_id(char *build_id, size_t size) if (getline(&line, &len, fp) == -1) goto err; - fclose(fp); + pclose(fp); if (len > size) len = size; @@ -391,7 +391,7 @@ int extract_build_id(char *build_id, size_t size) free(line); return 0; err: - fclose(fp); + pclose(fp); return -1; } -- cgit v1.2.3 From 0133c20480b14820d43c37c0e9502da4bffcad3a Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 29 Oct 2021 11:29:07 -0700 Subject: selftests/bpf: Fix strobemeta selftest regression After most recent nightly Clang update strobemeta selftests started failing with the following error (relevant portion of assembly included): 1624: (85) call bpf_probe_read_user_str#114 1625: (bf) r1 = r0 1626: (18) r2 = 0xfffffffe 1628: (5f) r1 &= r2 1629: (55) if r1 != 0x0 goto pc+7 1630: (07) r9 += 104 1631: (6b) *(u16 *)(r9 +0) = r0 1632: (67) r0 <<= 32 1633: (77) r0 >>= 32 1634: (79) r1 = *(u64 *)(r10 -456) 1635: (0f) r1 += r0 1636: (7b) *(u64 *)(r10 -456) = r1 1637: (79) r1 = *(u64 *)(r10 -368) 1638: (c5) if r1 s< 0x1 goto pc+778 1639: (bf) r6 = r8 1640: (0f) r6 += r7 1641: (b4) w1 = 0 1642: (6b) *(u16 *)(r6 +108) = r1 1643: (79) r3 = *(u64 *)(r10 -352) 1644: (79) r9 = *(u64 *)(r10 -456) 1645: (bf) r1 = r9 1646: (b4) w2 = 1 1647: (85) call bpf_probe_read_user_str#114 R1 unbounded memory access, make sure to bounds check any such access In the above code r0 and r1 are implicitly related. Clang knows that, but verifier isn't able to infer this relationship. Yonghong Song narrowed down this "regression" in code generation to a recent Clang optimization change ([0]), which for BPF target generates code pattern that BPF verifier can't handle and loses track of register boundaries. This patch works around the issue by adding an BPF assembly-based helper that helps to prove to the verifier that upper bound of the register is a given constant by controlling the exact share of generated BPF instruction sequence. This fixes the immediate issue for strobemeta selftest. [0] https://github.com/llvm/llvm-project/commit/acabad9ff6bf13e00305d9d8621ee8eafc1f8b08 Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20211029182907.166910-1-andrii@kernel.org --- tools/testing/selftests/bpf/progs/strobemeta.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h index 7de534f38c3f..3687ea755ab5 100644 --- a/tools/testing/selftests/bpf/progs/strobemeta.h +++ b/tools/testing/selftests/bpf/progs/strobemeta.h @@ -10,6 +10,14 @@ #include #include +#define bpf_clamp_umax(VAR, UMAX) \ + asm volatile ( \ + "if %0 <= %[max] goto +1\n" \ + "%0 = %[max]\n" \ + : "+r"(VAR) \ + : [max]"i"(UMAX) \ + ) + typedef uint32_t pid_t; struct task_struct {}; @@ -413,6 +421,7 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg, len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, map.tag); if (len <= STROBE_MAX_STR_LEN) { + bpf_clamp_umax(len, STROBE_MAX_STR_LEN); descr->tag_len = len; payload += len; } @@ -430,6 +439,7 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg, len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, map.entries[i].key); if (len <= STROBE_MAX_STR_LEN) { + bpf_clamp_umax(len, STROBE_MAX_STR_LEN); descr->key_lens[i] = len; payload += len; } @@ -437,6 +447,7 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg, len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, map.entries[i].val); if (len <= STROBE_MAX_STR_LEN) { + bpf_clamp_umax(len, STROBE_MAX_STR_LEN); descr->val_lens[i] = len; payload += len; } -- cgit v1.2.3 From b556c3fd467628341cc7680e4271790cafd79dc4 Mon Sep 17 00:00:00 2001 From: Liu Jian Date: Fri, 29 Oct 2021 22:12:15 +0800 Subject: selftests, bpf: Fix test_txmsg_ingress_parser error After "skmsg: lose offset info in sk_psock_skb_ingress", the test case with ktls failed. This because ktls parser(tls_read_size) return value is 285 not 256. The case like this: tls_sk1 --> redir_sk --> tls_sk2 tls_sk1 sent out 512 bytes data, after tls related processing redir_sk recved 570 btyes data, and redirect 512 (skb_use_parser) bytes data to tls_sk2; but tls_sk2 needs 285 * 2 bytes data, receive timeout occurred. Signed-off-by: Liu Jian Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20211029141216.211899-2-liujian56@huawei.com --- tools/testing/selftests/bpf/test_sockmap.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c index eefd445b96fc..06924917ad77 100644 --- a/tools/testing/selftests/bpf/test_sockmap.c +++ b/tools/testing/selftests/bpf/test_sockmap.c @@ -1680,6 +1680,8 @@ static void test_txmsg_ingress_parser(int cgrp, struct sockmap_options *opt) { txmsg_pass = 1; skb_use_parser = 512; + if (ktls == 1) + skb_use_parser = 570; opt->iov_length = 256; opt->iov_count = 1; opt->rate = 2; -- cgit v1.2.3 From d69672147faa2a7671c0779fa5b9ad99e4fca4e3 Mon Sep 17 00:00:00 2001 From: Liu Jian Date: Fri, 29 Oct 2021 22:12:16 +0800 Subject: selftests, bpf: Add one test for sockmap with strparser Add the test to check sockmap with strparser is working well. Signed-off-by: Liu Jian Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20211029141216.211899-3-liujian56@huawei.com --- tools/testing/selftests/bpf/test_sockmap.c | 33 +++++++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 3 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c index 06924917ad77..1ba7e7346afb 100644 --- a/tools/testing/selftests/bpf/test_sockmap.c +++ b/tools/testing/selftests/bpf/test_sockmap.c @@ -139,6 +139,7 @@ struct sockmap_options { bool sendpage; bool data_test; bool drop_expected; + bool check_recved_len; int iov_count; int iov_length; int rate; @@ -556,8 +557,12 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, int err, i, flags = MSG_NOSIGNAL; bool drop = opt->drop_expected; bool data = opt->data_test; + int iov_alloc_length = iov_length; - err = msg_alloc_iov(&msg, iov_count, iov_length, data, tx); + if (!tx && opt->check_recved_len) + iov_alloc_length *= 2; + + err = msg_alloc_iov(&msg, iov_count, iov_alloc_length, data, tx); if (err) goto out_errno; if (peek_flag) { @@ -665,6 +670,13 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, s->bytes_recvd += recv; + if (opt->check_recved_len && s->bytes_recvd > total_bytes) { + errno = EMSGSIZE; + fprintf(stderr, "recv failed(), bytes_recvd:%zd, total_bytes:%f\n", + s->bytes_recvd, total_bytes); + goto out_errno; + } + if (data) { int chunk_sz = opt->sendpage ? iov_length * cnt : @@ -744,7 +756,8 @@ static int sendmsg_test(struct sockmap_options *opt) rxpid = fork(); if (rxpid == 0) { - iov_buf -= (txmsg_pop - txmsg_start_pop + 1); + if (txmsg_pop || txmsg_start_pop) + iov_buf -= (txmsg_pop - txmsg_start_pop + 1); if (opt->drop_expected || txmsg_ktls_skb_drop) _exit(0); @@ -1688,6 +1701,19 @@ static void test_txmsg_ingress_parser(int cgrp, struct sockmap_options *opt) test_exec(cgrp, opt); } +static void test_txmsg_ingress_parser2(int cgrp, struct sockmap_options *opt) +{ + if (ktls == 1) + return; + skb_use_parser = 10; + opt->iov_length = 20; + opt->iov_count = 1; + opt->rate = 1; + opt->check_recved_len = true; + test_exec(cgrp, opt); + opt->check_recved_len = false; +} + char *map_names[] = { "sock_map", "sock_map_txmsg", @@ -1786,7 +1812,8 @@ struct _test test[] = { {"txmsg test pull-data", test_txmsg_pull}, {"txmsg test pop-data", test_txmsg_pop}, {"txmsg test push/pop data", test_txmsg_push_pop}, - {"txmsg text ingress parser", test_txmsg_ingress_parser}, + {"txmsg test ingress parser", test_txmsg_ingress_parser}, + {"txmsg test ingress parser2", test_txmsg_ingress_parser2}, }; static int check_whitelist(struct _test *t, struct sockmap_options *opt) -- cgit v1.2.3 From 36e70b9b06bf14a0fac87315f0e73d6e17e80aad Mon Sep 17 00:00:00 2001 From: Björn Töpel Date: Thu, 28 Oct 2021 18:10:57 +0200 Subject: selftests, bpf: Fix broken riscv build MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch is closely related to commit 6016df8fe874 ("selftests/bpf: Fix broken riscv build"). When clang includes the system include directories, but targeting BPF program, __BITS_PER_LONG defaults to 32, unless explicitly set. Work around this problem, by explicitly setting __BITS_PER_LONG to __riscv_xlen. Signed-off-by: Björn Töpel Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20211028161057.520552-5-bjorn@kernel.org --- tools/testing/selftests/bpf/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index f49cb5fc85af..54b0a41a3775 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -277,7 +277,7 @@ $(RESOLVE_BTFIDS): $(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/resolve_btfids \ define get_sys_includes $(shell $(1) -v -E - &1 \ | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \ -$(shell $(1) -dM -E - Date: Mon, 25 Oct 2021 14:40:25 +0800 Subject: selftests/bpf: Add test cases for struct_ops prog Running a BPF_PROG_TYPE_STRUCT_OPS prog for dummy_st_ops::test_N() through bpf_prog_test_run(). Four test cases are added: (1) attach dummy_st_ops should fail (2) function return value of bpf_dummy_ops::test_1() is expected (3) pointer argument of bpf_dummy_ops::test_1() works as expected (4) multiple arguments passed to bpf_dummy_ops::test_2() are correct Signed-off-by: Hou Tao Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20211025064025.2567443-5-houtao1@huawei.com --- .../selftests/bpf/prog_tests/dummy_st_ops.c | 115 +++++++++++++++++++++ tools/testing/selftests/bpf/progs/dummy_st_ops.c | 50 +++++++++ 2 files changed, 165 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c create mode 100644 tools/testing/selftests/bpf/progs/dummy_st_ops.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c new file mode 100644 index 000000000000..cbaa44ffb8c6 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021. Huawei Technologies Co., Ltd */ +#include +#include "dummy_st_ops.skel.h" + +/* Need to keep consistent with definition in include/linux/bpf.h */ +struct bpf_dummy_ops_state { + int val; +}; + +static void test_dummy_st_ops_attach(void) +{ + struct dummy_st_ops *skel; + struct bpf_link *link; + + skel = dummy_st_ops__open_and_load(); + if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load")) + return; + + link = bpf_map__attach_struct_ops(skel->maps.dummy_1); + ASSERT_EQ(libbpf_get_error(link), -EOPNOTSUPP, "dummy_st_ops_attach"); + + dummy_st_ops__destroy(skel); +} + +static void test_dummy_init_ret_value(void) +{ + __u64 args[1] = {0}; + struct bpf_prog_test_run_attr attr = { + .ctx_size_in = sizeof(args), + .ctx_in = args, + }; + struct dummy_st_ops *skel; + int fd, err; + + skel = dummy_st_ops__open_and_load(); + if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load")) + return; + + fd = bpf_program__fd(skel->progs.test_1); + attr.prog_fd = fd; + err = bpf_prog_test_run_xattr(&attr); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(attr.retval, 0xf2f3f4f5, "test_ret"); + + dummy_st_ops__destroy(skel); +} + +static void test_dummy_init_ptr_arg(void) +{ + int exp_retval = 0xbeef; + struct bpf_dummy_ops_state in_state = { + .val = exp_retval, + }; + __u64 args[1] = {(unsigned long)&in_state}; + struct bpf_prog_test_run_attr attr = { + .ctx_size_in = sizeof(args), + .ctx_in = args, + }; + struct dummy_st_ops *skel; + int fd, err; + + skel = dummy_st_ops__open_and_load(); + if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load")) + return; + + fd = bpf_program__fd(skel->progs.test_1); + attr.prog_fd = fd; + err = bpf_prog_test_run_xattr(&attr); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(in_state.val, 0x5a, "test_ptr_ret"); + ASSERT_EQ(attr.retval, exp_retval, "test_ret"); + + dummy_st_ops__destroy(skel); +} + +static void test_dummy_multiple_args(void) +{ + __u64 args[5] = {0, -100, 0x8a5f, 'c', 0x1234567887654321ULL}; + struct bpf_prog_test_run_attr attr = { + .ctx_size_in = sizeof(args), + .ctx_in = args, + }; + struct dummy_st_ops *skel; + int fd, err; + size_t i; + char name[8]; + + skel = dummy_st_ops__open_and_load(); + if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load")) + return; + + fd = bpf_program__fd(skel->progs.test_2); + attr.prog_fd = fd; + err = bpf_prog_test_run_xattr(&attr); + ASSERT_OK(err, "test_run"); + for (i = 0; i < ARRAY_SIZE(args); i++) { + snprintf(name, sizeof(name), "arg %zu", i); + ASSERT_EQ(skel->bss->test_2_args[i], args[i], name); + } + + dummy_st_ops__destroy(skel); +} + +void test_dummy_st_ops(void) +{ + if (test__start_subtest("dummy_st_ops_attach")) + test_dummy_st_ops_attach(); + if (test__start_subtest("dummy_init_ret_value")) + test_dummy_init_ret_value(); + if (test__start_subtest("dummy_init_ptr_arg")) + test_dummy_init_ptr_arg(); + if (test__start_subtest("dummy_multiple_args")) + test_dummy_multiple_args(); +} diff --git a/tools/testing/selftests/bpf/progs/dummy_st_ops.c b/tools/testing/selftests/bpf/progs/dummy_st_ops.c new file mode 100644 index 000000000000..ead87edb75e2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/dummy_st_ops.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021. Huawei Technologies Co., Ltd */ +#include +#include +#include + +struct bpf_dummy_ops_state { + int val; +} __attribute__((preserve_access_index)); + +struct bpf_dummy_ops { + int (*test_1)(struct bpf_dummy_ops_state *state); + int (*test_2)(struct bpf_dummy_ops_state *state, int a1, unsigned short a2, + char a3, unsigned long a4); +}; + +char _license[] SEC("license") = "GPL"; + +SEC("struct_ops/test_1") +int BPF_PROG(test_1, struct bpf_dummy_ops_state *state) +{ + int ret; + + if (!state) + return 0xf2f3f4f5; + + ret = state->val; + state->val = 0x5a; + return ret; +} + +__u64 test_2_args[5]; + +SEC("struct_ops/test_2") +int BPF_PROG(test_2, struct bpf_dummy_ops_state *state, int a1, unsigned short a2, + char a3, unsigned long a4) +{ + test_2_args[0] = (unsigned long)state; + test_2_args[1] = a1; + test_2_args[2] = a2; + test_2_args[3] = a3; + test_2_args[4] = a4; + return 0; +} + +SEC(".struct_ops") +struct bpf_dummy_ops dummy_1 = { + .test_1 = (void *)test_1, + .test_2 = (void *)test_2, +}; -- cgit v1.2.3 From 7a67087250f0003acc1b9e20eda01635e1e51f9a Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Fri, 29 Oct 2021 15:49:09 -0700 Subject: selftests/bpf: Add bloom map success test for userspace calls This patch has two changes: 1) Adds a new function "test_success_cases" to test successfully creating + adding + looking up a value in a bloom filter map from the userspace side. 2) Use bpf_create_map instead of bpf_create_map_xattr in the "test_fail_cases" and test_inner_map to make the code look cleaner. Signed-off-by: Joanne Koong Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20211029224909.1721024-4-joannekoong@fb.com --- .../selftests/bpf/prog_tests/bloom_filter_map.c | 59 ++++++++++++---------- 1 file changed, 33 insertions(+), 26 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c index 9aa3fbed918b..be73e3de6668 100644 --- a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c +++ b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c @@ -7,44 +7,31 @@ static void test_fail_cases(void) { - struct bpf_create_map_attr xattr = { - .name = "bloom_filter_map", - .map_type = BPF_MAP_TYPE_BLOOM_FILTER, - .max_entries = 100, - .value_size = 11, - }; __u32 value; int fd, err; /* Invalid key size */ - xattr.key_size = 4; - fd = bpf_create_map_xattr(&xattr); + fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 4, sizeof(value), 100, 0); if (!ASSERT_LT(fd, 0, "bpf_create_map bloom filter invalid key size")) close(fd); - xattr.key_size = 0; /* Invalid value size */ - xattr.value_size = 0; - fd = bpf_create_map_xattr(&xattr); + fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 0, 0, 100, 0); if (!ASSERT_LT(fd, 0, "bpf_create_map bloom filter invalid value size 0")) close(fd); - xattr.value_size = 11; /* Invalid max entries size */ - xattr.max_entries = 0; - fd = bpf_create_map_xattr(&xattr); + fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 0, sizeof(value), 0, 0); if (!ASSERT_LT(fd, 0, "bpf_create_map bloom filter invalid max entries size")) close(fd); - xattr.max_entries = 100; /* Bloom filter maps do not support BPF_F_NO_PREALLOC */ - xattr.map_flags = BPF_F_NO_PREALLOC; - fd = bpf_create_map_xattr(&xattr); + fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 0, sizeof(value), 100, + BPF_F_NO_PREALLOC); if (!ASSERT_LT(fd, 0, "bpf_create_map bloom filter invalid flags")) close(fd); - xattr.map_flags = 0; - fd = bpf_create_map_xattr(&xattr); + fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 0, sizeof(value), 100, 0); if (!ASSERT_GE(fd, 0, "bpf_create_map bloom filter")) return; @@ -67,6 +54,30 @@ static void test_fail_cases(void) close(fd); } +static void test_success_cases(void) +{ + char value[11]; + int fd, err; + + /* Create a map */ + fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 0, sizeof(value), 100, + BPF_F_ZERO_SEED | BPF_F_NUMA_NODE); + if (!ASSERT_GE(fd, 0, "bpf_create_map bloom filter success case")) + return; + + /* Add a value to the bloom filter */ + err = bpf_map_update_elem(fd, NULL, &value, 0); + if (!ASSERT_OK(err, "bpf_map_update_elem bloom filter success case")) + goto done; + + /* Lookup a value in the bloom filter */ + err = bpf_map_lookup_elem(fd, NULL, &value); + ASSERT_OK(err, "bpf_map_update_elem bloom filter success case"); + +done: + close(fd); +} + static void check_bloom(struct bloom_filter_map *skel) { struct bpf_link *link; @@ -86,16 +97,11 @@ static void test_inner_map(struct bloom_filter_map *skel, const __u32 *rand_vals __u32 nr_rand_vals) { int outer_map_fd, inner_map_fd, err, i, key = 0; - struct bpf_create_map_attr xattr = { - .name = "bloom_filter_inner_map", - .map_type = BPF_MAP_TYPE_BLOOM_FILTER, - .value_size = sizeof(__u32), - .max_entries = nr_rand_vals, - }; struct bpf_link *link; /* Create a bloom filter map that will be used as the inner map */ - inner_map_fd = bpf_create_map_xattr(&xattr); + inner_map_fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 0, sizeof(*rand_vals), + nr_rand_vals, 0); if (!ASSERT_GE(inner_map_fd, 0, "bpf_create_map bloom filter inner map")) return; @@ -190,6 +196,7 @@ void test_bloom_filter_map(void) int err; test_fail_cases(); + test_success_cases(); err = setup_progs(&skel, &rand_vals, &nr_rand_vals); if (err) -- cgit v1.2.3 From a20eac0af02810669e187cb623bc904908c423af Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 1 Nov 2021 16:01:18 -0700 Subject: selftests/bpf: Fix also no-alu32 strobemeta selftest Previous fix aded bpf_clamp_umax() helper use to re-validate boundaries. While that works correctly, it introduces more branches, which blows up past 1 million instructions in no-alu32 variant of strobemeta selftests. Switching len variable from u32 to u64 also fixes the issue and reduces the number of validated instructions, so use that instead. Fix this patch and bpf_clamp_umax() removed, both alu32 and no-alu32 selftests pass. Fixes: 0133c20480b1 ("selftests/bpf: Fix strobemeta selftest regression") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211101230118.1273019-1-andrii@kernel.org --- tools/testing/selftests/bpf/progs/strobemeta.h | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h index 3687ea755ab5..60c93aee2f4a 100644 --- a/tools/testing/selftests/bpf/progs/strobemeta.h +++ b/tools/testing/selftests/bpf/progs/strobemeta.h @@ -10,14 +10,6 @@ #include #include -#define bpf_clamp_umax(VAR, UMAX) \ - asm volatile ( \ - "if %0 <= %[max] goto +1\n" \ - "%0 = %[max]\n" \ - : "+r"(VAR) \ - : [max]"i"(UMAX) \ - ) - typedef uint32_t pid_t; struct task_struct {}; @@ -366,7 +358,7 @@ static __always_inline uint64_t read_str_var(struct strobemeta_cfg *cfg, void *payload) { void *location; - uint32_t len; + uint64_t len; data->str_lens[idx] = 0; location = calc_location(&cfg->str_locs[idx], tls_base); @@ -398,7 +390,7 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg, struct strobe_map_descr* descr = &data->map_descrs[idx]; struct strobe_map_raw map; void *location; - uint32_t len; + uint64_t len; int i; descr->tag_len = 0; /* presume no tag is set */ @@ -421,7 +413,6 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg, len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, map.tag); if (len <= STROBE_MAX_STR_LEN) { - bpf_clamp_umax(len, STROBE_MAX_STR_LEN); descr->tag_len = len; payload += len; } @@ -439,7 +430,6 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg, len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, map.entries[i].key); if (len <= STROBE_MAX_STR_LEN) { - bpf_clamp_umax(len, STROBE_MAX_STR_LEN); descr->key_lens[i] = len; payload += len; } @@ -447,7 +437,6 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg, len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, map.entries[i].val); if (len <= STROBE_MAX_STR_LEN) { - bpf_clamp_umax(len, STROBE_MAX_STR_LEN); descr->val_lens[i] = len; payload += len; } -- cgit v1.2.3 From b9979db8340154526d9ab38a1883d6f6ba9b6d47 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 1 Nov 2021 15:21:51 -0700 Subject: bpf: Fix propagation of bounds from 64-bit min/max into 32-bit and var_off. Before this fix: 166: (b5) if r2 <= 0x1 goto pc+22 from 166 to 189: R2=invP(id=1,umax_value=1,var_off=(0x0; 0xffffffff)) After this fix: 166: (b5) if r2 <= 0x1 goto pc+22 from 166 to 189: R2=invP(id=1,umax_value=1,var_off=(0x0; 0x1)) While processing BPF_JLE the reg_set_min_max() would set true_reg->umax_value = 1 and call __reg_combine_64_into_32(true_reg). Without the fix it would not pass the condition: if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) since umin_value == 0 at this point. Before commit 10bf4e83167c the umin was incorrectly ingored. The commit 10bf4e83167c fixed the correctness issue, but pessimized propagation of 64-bit min max into 32-bit min max and corresponding var_off. Fixes: 10bf4e83167c ("bpf: Fix propagation of 32 bit unsigned bounds from 64 bit bounds") Signed-off-by: Alexei Starovoitov Signed-off-by: Andrii Nakryiko Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20211101222153.78759-1-alexei.starovoitov@gmail.com --- kernel/bpf/verifier.c | 2 +- tools/testing/selftests/bpf/verifier/array_access.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'tools/testing') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 3c8aa7df1773..29671ed49ee8 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1425,7 +1425,7 @@ static bool __reg64_bound_s32(s64 a) static bool __reg64_bound_u32(u64 a) { - return a > U32_MIN && a < U32_MAX; + return a >= U32_MIN && a <= U32_MAX; } static void __reg_combine_64_into_32(struct bpf_reg_state *reg) diff --git a/tools/testing/selftests/bpf/verifier/array_access.c b/tools/testing/selftests/bpf/verifier/array_access.c index 1b1c798e9248..1b138cd2b187 100644 --- a/tools/testing/selftests/bpf/verifier/array_access.c +++ b/tools/testing/selftests/bpf/verifier/array_access.c @@ -186,7 +186,7 @@ }, .fixup_map_hash_48b = { 3 }, .errstr_unpriv = "R0 leaks addr", - .errstr = "R0 unbounded memory access", + .errstr = "invalid access to map value, value_size=48 off=44 size=8", .result_unpriv = REJECT, .result = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -- cgit v1.2.3 From 0869e5078afbd1b22c20832b391b2d85291bc0a8 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 1 Nov 2021 15:21:53 -0700 Subject: selftests/bpf: Add a testcase for 64-bit bounds propagation issue. ./test_progs-no_alu32 -vv -t twfw Before the 64-bit_into_32-bit fix: 19: (25) if r1 > 0x3f goto pc+6 R1_w=inv(id=0,umax_value=63,var_off=(0x0; 0xff),s32_max_value=255,u32_max_value=255) and eventually: invalid access to map value, value_size=8 off=7 size=8 R6 max value is outside of the allowed memory range libbpf: failed to load object 'no_alu32/twfw.o' After the fix: 19: (25) if r1 > 0x3f goto pc+6 R1_w=inv(id=0,umax_value=63,var_off=(0x0; 0x3f)) verif_twfw:OK Signed-off-by: Alexei Starovoitov Signed-off-by: Andrii Nakryiko Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20211101222153.78759-3-alexei.starovoitov@gmail.com --- .../selftests/bpf/prog_tests/bpf_verif_scale.c | 5 ++ tools/testing/selftests/bpf/progs/twfw.c | 58 ++++++++++++++++++++++ 2 files changed, 63 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/twfw.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c index 867349e4ed9e..27f5d8ea7964 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c @@ -202,3 +202,8 @@ void test_verif_scale_seg6_loop() { scale_test("test_seg6_loop.o", BPF_PROG_TYPE_LWT_SEG6LOCAL, false); } + +void test_verif_twfw() +{ + scale_test("twfw.o", BPF_PROG_TYPE_CGROUP_SKB, false); +} diff --git a/tools/testing/selftests/bpf/progs/twfw.c b/tools/testing/selftests/bpf/progs/twfw.c new file mode 100644 index 000000000000..de1b18a62b46 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/twfw.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include +#include +#include +#include + +#define TWFW_MAX_TIERS (64) +/* + * load is successful + * #define TWFW_MAX_TIERS (64u)$ + */ + +struct twfw_tier_value { + unsigned long mask[1]; +}; + +struct rule { + uint8_t seqnum; +}; + +struct rules_map { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, __u32); + __type(value, struct rule); + __uint(max_entries, 1); +}; + +struct tiers_map { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, __u32); + __type(value, struct twfw_tier_value); + __uint(max_entries, 1); +}; + +struct rules_map rules SEC(".maps"); +struct tiers_map tiers SEC(".maps"); + +SEC("cgroup_skb/ingress") +int twfw_verifier(struct __sk_buff* skb) +{ + const uint32_t key = 0; + const struct twfw_tier_value* tier = bpf_map_lookup_elem(&tiers, &key); + if (!tier) + return 1; + + struct rule* rule = bpf_map_lookup_elem(&rules, &key); + if (!rule) + return 1; + + if (rule && rule->seqnum < TWFW_MAX_TIERS) { + /* rule->seqnum / 64 should always be 0 */ + unsigned long mask = tier->mask[rule->seqnum / 64]; + if (mask) + return 0; + } + return 1; +} -- cgit v1.2.3