summaryrefslogtreecommitdiff
path: root/kernel/bpf/hashtab.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf/hashtab.c')
-rw-r--r--kernel/bpf/hashtab.c67
1 files changed, 28 insertions, 39 deletions
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index c2fcd0cd51e5..c8a9b27f8663 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -215,19 +215,6 @@ static bool htab_has_extra_elems(struct bpf_htab *htab)
return !htab_is_percpu(htab) && !htab_is_lru(htab) && !is_fd_htab(htab);
}
-static void htab_free_internal_structs(struct bpf_htab *htab, struct htab_elem *elem)
-{
- if (btf_record_has_field(htab->map.record, BPF_TIMER))
- bpf_obj_free_timer(htab->map.record,
- htab_elem_value(elem, htab->map.key_size));
- if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE))
- bpf_obj_free_workqueue(htab->map.record,
- htab_elem_value(elem, htab->map.key_size));
- if (btf_record_has_field(htab->map.record, BPF_TASK_WORK))
- bpf_obj_free_task_work(htab->map.record,
- htab_elem_value(elem, htab->map.key_size));
-}
-
static void htab_free_prealloced_internal_structs(struct bpf_htab *htab)
{
u32 num_entries = htab->map.max_entries;
@@ -240,7 +227,8 @@ static void htab_free_prealloced_internal_structs(struct bpf_htab *htab)
struct htab_elem *elem;
elem = get_htab_elem(htab, i);
- htab_free_internal_structs(htab, elem);
+ bpf_map_free_internal_structs(&htab->map,
+ htab_elem_value(elem, htab->map.key_size));
cond_resched();
}
}
@@ -669,8 +657,7 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
struct htab_elem *l;
u32 hash, key_size;
- WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
- !rcu_read_lock_bh_held());
+ WARN_ON_ONCE(!bpf_rcu_lock_held());
key_size = map->key_size;
@@ -947,15 +934,21 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
void *value, bool onallcpus)
{
+ void *ptr;
+
if (!onallcpus) {
/* copy true value_size bytes */
- copy_map_value(&htab->map, this_cpu_ptr(pptr), value);
+ ptr = this_cpu_ptr(pptr);
+ copy_map_value(&htab->map, ptr, value);
+ bpf_obj_free_fields(htab->map.record, ptr);
} else {
u32 size = round_up(htab->map.value_size, 8);
int off = 0, cpu;
for_each_possible_cpu(cpu) {
- copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value + off);
+ ptr = per_cpu_ptr(pptr, cpu);
+ copy_map_value_long(&htab->map, ptr, value + off);
+ bpf_obj_free_fields(htab->map.record, ptr);
off += size;
}
}
@@ -1098,8 +1091,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
/* unknown flags */
return -EINVAL;
- WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
- !rcu_read_lock_bh_held());
+ WARN_ON_ONCE(!bpf_rcu_lock_held());
key_size = map->key_size;
@@ -1206,8 +1198,7 @@ static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value
/* unknown flags */
return -EINVAL;
- WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
- !rcu_read_lock_bh_held());
+ WARN_ON_ONCE(!bpf_rcu_lock_held());
key_size = map->key_size;
@@ -1275,8 +1266,7 @@ static long htab_map_update_elem_in_place(struct bpf_map *map, void *key,
/* unknown flags */
return -EINVAL;
- WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
- !rcu_read_lock_bh_held());
+ WARN_ON_ONCE(!bpf_rcu_lock_held());
key_size = map->key_size;
@@ -1338,8 +1328,7 @@ static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
/* unknown flags */
return -EINVAL;
- WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
- !rcu_read_lock_bh_held());
+ WARN_ON_ONCE(!bpf_rcu_lock_held());
key_size = map->key_size;
@@ -1416,8 +1405,7 @@ static long htab_map_delete_elem(struct bpf_map *map, void *key)
u32 hash, key_size;
int ret;
- WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
- !rcu_read_lock_bh_held());
+ WARN_ON_ONCE(!bpf_rcu_lock_held());
key_size = map->key_size;
@@ -1452,8 +1440,7 @@ static long htab_lru_map_delete_elem(struct bpf_map *map, void *key)
u32 hash, key_size;
int ret;
- WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
- !rcu_read_lock_bh_held());
+ WARN_ON_ONCE(!bpf_rcu_lock_held());
key_size = map->key_size;
@@ -1509,8 +1496,9 @@ static void htab_free_malloced_internal_structs(struct bpf_htab *htab)
struct htab_elem *l;
hlist_nulls_for_each_entry(l, n, head, hash_node) {
- /* We only free timer on uref dropping to zero */
- htab_free_internal_structs(htab, l);
+ /* We only free internal structs on uref dropping to zero */
+ bpf_map_free_internal_structs(&htab->map,
+ htab_elem_value(l, htab->map.key_size));
}
cond_resched_rcu();
}
@@ -1521,13 +1509,14 @@ static void htab_map_free_internal_structs(struct bpf_map *map)
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- /* We only free timer and workqueue on uref dropping to zero */
- if (btf_record_has_field(htab->map.record, BPF_TIMER | BPF_WORKQUEUE | BPF_TASK_WORK)) {
- if (!htab_is_prealloc(htab))
- htab_free_malloced_internal_structs(htab);
- else
- htab_free_prealloced_internal_structs(htab);
- }
+ /* We only free internal structs on uref dropping to zero */
+ if (!bpf_map_has_internal_structs(map))
+ return;
+
+ if (htab_is_prealloc(htab))
+ htab_free_prealloced_internal_structs(htab);
+ else
+ htab_free_malloced_internal_structs(htab);
}
/* Called when map->refcnt goes to zero, either from workqueue or from syscall */