diff options
Diffstat (limited to 'tools/lib/bpf/libbpf.c')
| -rw-r--r-- | tools/lib/bpf/libbpf.c | 167 | 
1 files changed, 111 insertions, 56 deletions
| diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 6b85060f07b3..8f5a81b672e1 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -60,6 +60,8 @@  #define BPF_FS_MAGIC		0xcafe4a11  #endif +#define MAX_EVENT_NAME_LEN	64 +  #define BPF_FS_DEFAULT_PATH "/sys/fs/bpf"  #define BPF_INSN_SZ (sizeof(struct bpf_insn)) @@ -284,7 +286,7 @@ void libbpf_print(enum libbpf_print_level level, const char *format, ...)  	old_errno = errno;  	va_start(args, format); -	__libbpf_pr(level, format, args); +	print_fn(level, format, args);  	va_end(args);  	errno = old_errno; @@ -595,7 +597,7 @@ struct extern_desc {  	int sym_idx;  	int btf_id;  	int sec_btf_id; -	const char *name; +	char *name;  	char *essent_name;  	bool is_set;  	bool is_weak; @@ -733,7 +735,7 @@ struct bpf_object {  	struct usdt_manager *usdt_man; -	struct bpf_map *arena_map; +	int arena_map_idx;  	void *arena_data;  	size_t arena_data_sz; @@ -896,7 +898,7 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,  			return -LIBBPF_ERRNO__FORMAT;  		} -		if (sec_off + prog_sz > sec_sz) { +		if (sec_off + prog_sz > sec_sz || sec_off + prog_sz < sec_off) {  			pr_warn("sec '%s': program at offset %zu crosses section boundary\n",  				sec_name, sec_off);  			return -LIBBPF_ERRNO__FORMAT; @@ -1515,6 +1517,7 @@ static struct bpf_object *bpf_object__new(const char *path,  	obj->efile.obj_buf_sz = obj_buf_sz;  	obj->efile.btf_maps_shndx = -1;  	obj->kconfig_map_idx = -1; +	obj->arena_map_idx = -1;  	obj->kern_version = get_kernel_version();  	obj->state  = OBJ_OPEN; @@ -1725,15 +1728,6 @@ static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *nam  	return ERR_PTR(-ENOENT);  } -/* Some versions of Android don't provide memfd_create() in their libc - * implementation, so avoid complications and just go straight to Linux - * syscall. - */ -static int sys_memfd_create(const char *name, unsigned flags) -{ -	return syscall(__NR_memfd_create, name, flags); -} -  #ifndef MFD_CLOEXEC  #define MFD_CLOEXEC 0x0001U  #endif @@ -2971,7 +2965,7 @@ static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map,  	const long page_sz = sysconf(_SC_PAGE_SIZE);  	size_t mmap_sz; -	mmap_sz = bpf_map_mmap_sz(obj->arena_map); +	mmap_sz = bpf_map_mmap_sz(map);  	if (roundup(data_sz, page_sz) > mmap_sz) {  		pr_warn("elf: sec '%s': declared ARENA map size (%zu) is too small to hold global __arena variables of size %zu\n",  			sec_name, mmap_sz, data_sz); @@ -3045,12 +3039,12 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,  		if (map->def.type != BPF_MAP_TYPE_ARENA)  			continue; -		if (obj->arena_map) { +		if (obj->arena_map_idx >= 0) {  			pr_warn("map '%s': only single ARENA map is supported (map '%s' is also ARENA)\n", -				map->name, obj->arena_map->name); +				map->name, obj->maps[obj->arena_map_idx].name);  			return -EINVAL;  		} -		obj->arena_map = map; +		obj->arena_map_idx = i;  		if (obj->efile.arena_data) {  			err = init_arena_map_data(obj, map, ARENA_SEC, obj->efile.arena_data_shndx, @@ -3060,7 +3054,7 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,  				return err;  		}  	} -	if (obj->efile.arena_data && !obj->arena_map) { +	if (obj->efile.arena_data && obj->arena_map_idx < 0) {  		pr_warn("elf: sec '%s': to use global __arena variables the ARENA map should be explicitly declared in SEC(\".maps\")\n",  			ARENA_SEC);  		return -ENOENT; @@ -4266,7 +4260,9 @@ static int bpf_object__collect_externs(struct bpf_object *obj)  			return ext->btf_id;  		}  		t = btf__type_by_id(obj->btf, ext->btf_id); -		ext->name = btf__name_by_offset(obj->btf, t->name_off); +		ext->name = strdup(btf__name_by_offset(obj->btf, t->name_off)); +		if (!ext->name) +			return -ENOMEM;  		ext->sym_idx = i;  		ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK; @@ -4586,10 +4582,20 @@ static int bpf_program__record_reloc(struct bpf_program *prog,  	/* arena data relocation */  	if (shdr_idx == obj->efile.arena_data_shndx) { +		if (obj->arena_map_idx < 0) { +			pr_warn("prog '%s': bad arena data relocation at insn %u, no arena maps defined\n", +				prog->name, insn_idx); +			return -LIBBPF_ERRNO__RELOC; +		}  		reloc_desc->type = RELO_DATA;  		reloc_desc->insn_idx = insn_idx; -		reloc_desc->map_idx = obj->arena_map - obj->maps; +		reloc_desc->map_idx = obj->arena_map_idx;  		reloc_desc->sym_off = sym->st_value; + +		map = &obj->maps[obj->arena_map_idx]; +		pr_debug("prog '%s': found arena map %d (%s, sec %d, off %zu) for insn %u\n", +			 prog->name, obj->arena_map_idx, map->name, map->sec_idx, +			 map->sec_offset, insn_idx);  		return 0;  	} @@ -9145,8 +9151,10 @@ void bpf_object__close(struct bpf_object *obj)  	zfree(&obj->btf_custom_path);  	zfree(&obj->kconfig); -	for (i = 0; i < obj->nr_extern; i++) +	for (i = 0; i < obj->nr_extern; i++) { +		zfree(&obj->externs[i].name);  		zfree(&obj->externs[i].essent_name); +	}  	zfree(&obj->externs);  	obj->nr_extern = 0; @@ -9213,7 +9221,7 @@ int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts)  		return libbpf_err(-EFAULT);  	if (!OPTS_VALID(opts, gen_loader_opts))  		return libbpf_err(-EINVAL); -	gen = calloc(sizeof(*gen), 1); +	gen = calloc(1, sizeof(*gen));  	if (!gen)  		return libbpf_err(-ENOMEM);  	gen->opts = opts; @@ -9455,6 +9463,30 @@ int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log  	return 0;  } +struct bpf_func_info *bpf_program__func_info(const struct bpf_program *prog) +{ +	if (prog->func_info_rec_size != sizeof(struct bpf_func_info)) +		return libbpf_err_ptr(-EOPNOTSUPP); +	return prog->func_info; +} + +__u32 bpf_program__func_info_cnt(const struct bpf_program *prog) +{ +	return prog->func_info_cnt; +} + +struct bpf_line_info *bpf_program__line_info(const struct bpf_program *prog) +{ +	if (prog->line_info_rec_size != sizeof(struct bpf_line_info)) +		return libbpf_err_ptr(-EOPNOTSUPP); +	return prog->line_info; +} + +__u32 bpf_program__line_info_cnt(const struct bpf_program *prog) +{ +	return prog->line_info_cnt; +} +  #define SEC_DEF(sec_pfx, ptype, atype, flags, ...) {			    \  	.sec = (char *)sec_pfx,						    \  	.prog_type = BPF_PROG_TYPE_##ptype,				    \ @@ -10064,7 +10096,7 @@ static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,  			      enum bpf_attach_type attach_type,  			      int *btf_obj_fd, int *btf_type_id)  { -	int ret, i, mod_len; +	int ret, i, mod_len = 0;  	const char *fn_name, *mod_name = NULL;  	fn_name = strchr(attach_name, ':'); @@ -10933,11 +10965,14 @@ struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *p  		}  		link->link.fd = pfd;  	} -	if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) { -		err = -errno; -		pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n", -			prog->name, pfd, errstr(err)); -		goto err_out; + +	if (!OPTS_GET(opts, dont_enable, false)) { +		if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) { +			err = -errno; +			pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n", +				prog->name, pfd, errstr(err)); +			goto err_out; +		}  	}  	return &link->link; @@ -11121,16 +11156,16 @@ static const char *tracefs_available_filter_functions_addrs(void)  			     : TRACEFS"/available_filter_functions_addrs";  } -static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz, -					 const char *kfunc_name, size_t offset) +static void gen_probe_legacy_event_name(char *buf, size_t buf_sz, +					const char *name, size_t offset)  {  	static int index = 0;  	int i; -	snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx_%d", getpid(), kfunc_name, offset, -		 __sync_fetch_and_add(&index, 1)); +	snprintf(buf, buf_sz, "libbpf_%u_%d_%s_0x%zx", getpid(), +		 __sync_fetch_and_add(&index, 1), name, offset); -	/* sanitize binary_path in the probe name */ +	/* sanitize name in the probe name */  	for (i = 0; buf[i]; i++) {  		if (!isalnum(buf[i]))  			buf[i] = '_'; @@ -11255,9 +11290,9 @@ int probe_kern_syscall_wrapper(int token_fd)  		return pfd >= 0 ? 1 : 0;  	} else { /* legacy mode */ -		char probe_name[128]; +		char probe_name[MAX_EVENT_NAME_LEN]; -		gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0); +		gen_probe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0);  		if (add_kprobe_event_legacy(probe_name, false, syscall_name, 0) < 0)  			return 0; @@ -11313,10 +11348,10 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,  					    func_name, offset,  					    -1 /* pid */, 0 /* ref_ctr_off */);  	} else { -		char probe_name[256]; +		char probe_name[MAX_EVENT_NAME_LEN]; -		gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), -					     func_name, offset); +		gen_probe_legacy_event_name(probe_name, sizeof(probe_name), +					    func_name, offset);  		legacy_probe = strdup(probe_name);  		if (!legacy_probe) @@ -11860,20 +11895,6 @@ static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, stru  	return ret;  } -static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz, -					 const char *binary_path, uint64_t offset) -{ -	int i; - -	snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset); - -	/* sanitize binary_path in the probe name */ -	for (i = 0; buf[i]; i++) { -		if (!isalnum(buf[i])) -			buf[i] = '_'; -	} -} -  static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe,  					  const char *binary_path, size_t offset)  { @@ -12297,13 +12318,14 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,  		pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,  					    func_offset, pid, ref_ctr_off);  	} else { -		char probe_name[PATH_MAX + 64]; +		char probe_name[MAX_EVENT_NAME_LEN];  		if (ref_ctr_off)  			return libbpf_err_ptr(-EINVAL); -		gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name), -					     binary_path, func_offset); +		gen_probe_legacy_event_name(probe_name, sizeof(probe_name), +					    strrchr(binary_path, '/') ? : binary_path, +					    func_offset);  		legacy_probe = strdup(probe_name);  		if (!legacy_probe) @@ -12834,6 +12856,34 @@ struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifi  }  struct bpf_link * +bpf_program__attach_cgroup_opts(const struct bpf_program *prog, int cgroup_fd, +				const struct bpf_cgroup_opts *opts) +{ +	LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); +	__u32 relative_id; +	int relative_fd; + +	if (!OPTS_VALID(opts, bpf_cgroup_opts)) +		return libbpf_err_ptr(-EINVAL); + +	relative_id = OPTS_GET(opts, relative_id, 0); +	relative_fd = OPTS_GET(opts, relative_fd, 0); + +	if (relative_fd && relative_id) { +		pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n", +			prog->name); +		return libbpf_err_ptr(-EINVAL); +	} + +	link_create_opts.cgroup.expected_revision = OPTS_GET(opts, expected_revision, 0); +	link_create_opts.cgroup.relative_fd = relative_fd; +	link_create_opts.cgroup.relative_id = relative_id; +	link_create_opts.flags = OPTS_GET(opts, flags, 0); + +	return bpf_program_attach_fd(prog, cgroup_fd, "cgroup", &link_create_opts); +} + +struct bpf_link *  bpf_program__attach_tcx(const struct bpf_program *prog, int ifindex,  			const struct bpf_tcx_opts *opts)  { @@ -13371,7 +13421,6 @@ struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,  	attr.config = PERF_COUNT_SW_BPF_OUTPUT;  	attr.type = PERF_TYPE_SOFTWARE;  	attr.sample_type = PERF_SAMPLE_RAW; -	attr.sample_period = sample_period;  	attr.wakeup_events = sample_period;  	p.attr = &attr; @@ -14099,6 +14148,12 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)  		}  		link = map_skel->link; +		if (!link) { +			pr_warn("map '%s': BPF map skeleton link is uninitialized\n", +				bpf_map__name(map)); +			continue; +		} +  		if (*link)  			continue; | 
