summaryrefslogtreecommitdiff
path: root/kernel/bpf/btf.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf/btf.c')
-rw-r--r--kernel/bpf/btf.c99
1 files changed, 44 insertions, 55 deletions
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 64739308902f..0de8fc8a0e0b 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -3478,60 +3478,45 @@ btf_find_graph_root(const struct btf *btf, const struct btf_type *pt,
return BTF_FIELD_FOUND;
}
-#define field_mask_test_name(field_type, field_type_str) \
- if (field_mask & field_type && !strcmp(name, field_type_str)) { \
- type = field_type; \
- goto end; \
- }
-
static int btf_get_field_type(const struct btf *btf, const struct btf_type *var_type,
- u32 field_mask, u32 *seen_mask,
- int *align, int *sz)
-{
- int type = 0;
+ u32 field_mask, u32 *seen_mask, int *align, int *sz)
+{
+ const struct {
+ enum btf_field_type type;
+ const char *const name;
+ const bool is_unique;
+ } field_types[] = {
+ { BPF_SPIN_LOCK, "bpf_spin_lock", true },
+ { BPF_RES_SPIN_LOCK, "bpf_res_spin_lock", true },
+ { BPF_TIMER, "bpf_timer", true },
+ { BPF_WORKQUEUE, "bpf_wq", true },
+ { BPF_TASK_WORK, "bpf_task_work", true },
+ { BPF_LIST_HEAD, "bpf_list_head", false },
+ { BPF_LIST_NODE, "bpf_list_node", false },
+ { BPF_RB_ROOT, "bpf_rb_root", false },
+ { BPF_RB_NODE, "bpf_rb_node", false },
+ { BPF_REFCOUNT, "bpf_refcount", false },
+ };
+ int type = 0, i;
const char *name = __btf_name_by_offset(btf, var_type->name_off);
-
- if (field_mask & BPF_SPIN_LOCK) {
- if (!strcmp(name, "bpf_spin_lock")) {
- if (*seen_mask & BPF_SPIN_LOCK)
- return -E2BIG;
- *seen_mask |= BPF_SPIN_LOCK;
- type = BPF_SPIN_LOCK;
- goto end;
- }
- }
- if (field_mask & BPF_RES_SPIN_LOCK) {
- if (!strcmp(name, "bpf_res_spin_lock")) {
- if (*seen_mask & BPF_RES_SPIN_LOCK)
- return -E2BIG;
- *seen_mask |= BPF_RES_SPIN_LOCK;
- type = BPF_RES_SPIN_LOCK;
- goto end;
- }
- }
- if (field_mask & BPF_TIMER) {
- if (!strcmp(name, "bpf_timer")) {
- if (*seen_mask & BPF_TIMER)
- return -E2BIG;
- *seen_mask |= BPF_TIMER;
- type = BPF_TIMER;
- goto end;
- }
- }
- if (field_mask & BPF_WORKQUEUE) {
- if (!strcmp(name, "bpf_wq")) {
- if (*seen_mask & BPF_WORKQUEUE)
+ const char *field_type_name;
+ enum btf_field_type field_type;
+ bool is_unique;
+
+ for (i = 0; i < ARRAY_SIZE(field_types); ++i) {
+ field_type = field_types[i].type;
+ field_type_name = field_types[i].name;
+ is_unique = field_types[i].is_unique;
+ if (!(field_mask & field_type) || strcmp(name, field_type_name))
+ continue;
+ if (is_unique) {
+ if (*seen_mask & field_type)
return -E2BIG;
- *seen_mask |= BPF_WORKQUEUE;
- type = BPF_WORKQUEUE;
- goto end;
+ *seen_mask |= field_type;
}
+ type = field_type;
+ goto end;
}
- field_mask_test_name(BPF_LIST_HEAD, "bpf_list_head");
- field_mask_test_name(BPF_LIST_NODE, "bpf_list_node");
- field_mask_test_name(BPF_RB_ROOT, "bpf_rb_root");
- field_mask_test_name(BPF_RB_NODE, "bpf_rb_node");
- field_mask_test_name(BPF_REFCOUNT, "bpf_refcount");
/* Only return BPF_KPTR when all other types with matchable names fail */
if (field_mask & (BPF_KPTR | BPF_UPTR) && !__btf_type_is_struct(var_type)) {
@@ -3545,8 +3530,6 @@ end:
return type;
}
-#undef field_mask_test_name
-
/* Repeat a number of fields for a specified number of times.
*
* Copy the fields starting from the first field and repeat them for
@@ -3693,6 +3676,7 @@ static int btf_find_field_one(const struct btf *btf,
case BPF_LIST_NODE:
case BPF_RB_NODE:
case BPF_REFCOUNT:
+ case BPF_TASK_WORK:
ret = btf_find_struct(btf, var_type, off, sz, field_type,
info_cnt ? &info[0] : &tmp);
if (ret < 0)
@@ -3985,6 +3969,7 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
rec->timer_off = -EINVAL;
rec->wq_off = -EINVAL;
rec->refcount_off = -EINVAL;
+ rec->task_work_off = -EINVAL;
for (i = 0; i < cnt; i++) {
field_type_size = btf_field_type_size(info_arr[i].type);
if (info_arr[i].off + field_type_size > value_size) {
@@ -4024,6 +4009,10 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
/* Cache offset for faster lookup at runtime */
rec->wq_off = rec->fields[i].offset;
break;
+ case BPF_TASK_WORK:
+ WARN_ON_ONCE(rec->task_work_off >= 0);
+ rec->task_work_off = rec->fields[i].offset;
+ break;
case BPF_REFCOUNT:
WARN_ON_ONCE(rec->refcount_off >= 0);
/* Cache offset for faster lookup at runtime */
@@ -6762,7 +6751,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
/* skip modifiers */
while (btf_type_is_modifier(t))
t = btf_type_by_id(btf, t->type);
- if (btf_type_is_small_int(t) || btf_is_any_enum(t) || __btf_type_is_struct(t))
+ if (btf_type_is_small_int(t) || btf_is_any_enum(t) || btf_type_is_struct(t))
/* accessing a scalar */
return true;
if (!btf_type_is_ptr(t)) {
@@ -7334,7 +7323,7 @@ static int __get_type_size(struct btf *btf, u32 btf_id,
if (btf_type_is_ptr(t))
/* kernel size of pointer. Not BPF's size of pointer*/
return sizeof(void *);
- if (btf_type_is_int(t) || btf_is_any_enum(t) || __btf_type_is_struct(t))
+ if (btf_type_is_int(t) || btf_is_any_enum(t) || btf_type_is_struct(t))
return t->size;
return -EINVAL;
}
@@ -7343,7 +7332,7 @@ static u8 __get_type_fmodel_flags(const struct btf_type *t)
{
u8 flags = 0;
- if (__btf_type_is_struct(t))
+ if (btf_type_is_struct(t))
flags |= BTF_FMODEL_STRUCT_ARG;
if (btf_type_is_signed_int(t))
flags |= BTF_FMODEL_SIGNED_ARG;
@@ -7384,7 +7373,7 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
return -EINVAL;
}
ret = __get_type_size(btf, func->type, &t);
- if (ret < 0 || __btf_type_is_struct(t)) {
+ if (ret < 0 || btf_type_is_struct(t)) {
bpf_log(log,
"The function %s return type %s is unsupported.\n",
tname, btf_type_str(t));