diff options
Diffstat (limited to 'net')
| -rw-r--r-- | net/core/filter.c | 3 | ||||
| -rw-r--r-- | net/core/skmsg.c | 2 | ||||
| -rw-r--r-- | net/core/sock_map.c | 1 | ||||
| -rw-r--r-- | net/ipv4/tcp_bpf.c | 4 | ||||
| -rw-r--r-- | net/xdp/xsk_buff_pool.c | 10 | ||||
| -rw-r--r-- | net/xdp/xsk_queue.h | 5 |
6 files changed, 15 insertions, 10 deletions
diff --git a/net/core/filter.c b/net/core/filter.c index 8569cd2482ee..0f4d9f3b206e 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -12063,7 +12063,7 @@ int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags, } BTF_KFUNCS_START(bpf_kfunc_check_set_skb) -BTF_ID_FLAGS(func, bpf_dynptr_from_skb) +BTF_ID_FLAGS(func, bpf_dynptr_from_skb, KF_TRUSTED_ARGS) BTF_KFUNCS_END(bpf_kfunc_check_set_skb) BTF_KFUNCS_START(bpf_kfunc_check_set_xdp) @@ -12112,6 +12112,7 @@ static int __init bpf_kfunc_init(void) ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_XMIT, &bpf_kfunc_set_skb); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_SEG6LOCAL, &bpf_kfunc_set_skb); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_NETFILTER, &bpf_kfunc_set_skb); + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_kfunc_set_skb); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &bpf_kfunc_set_xdp); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, &bpf_kfunc_set_sock_addr); diff --git a/net/core/skmsg.c b/net/core/skmsg.c index bbf40b999713..b1dcbd3be89e 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -293,7 +293,7 @@ out: /* If we trim data a full sg elem before curr pointer update * copybreak and current so that any future copy operations * start at new copy location. - * However trimed data that has not yet been used in a copy op + * However trimmed data that has not yet been used in a copy op * does not require an update. */ if (!msg->sg.size) { diff --git a/net/core/sock_map.c b/net/core/sock_map.c index d3dbb92153f2..724b6856fcc3 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -1183,6 +1183,7 @@ static void sock_hash_free(struct bpf_map *map) sock_put(elem->sk); sock_hash_free_elem(htab, elem); } + cond_resched(); } /* wait for psock readers accessing its map link */ diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index fe6178715ba0..e7658c5d6b79 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -30,7 +30,7 @@ void tcp_eat_skb(struct sock *sk, struct sk_buff *skb) } static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, - struct sk_msg *msg, u32 apply_bytes, int flags) + struct sk_msg *msg, u32 apply_bytes) { bool apply = apply_bytes; struct scatterlist *sge; @@ -167,7 +167,7 @@ int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress, if (unlikely(!psock)) return -EPIPE; - ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) : + ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes) : tcp_bpf_push_locked(sk, msg, bytes, flags, false); sk_psock_put(sk, psock); return ret; diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index 6b2756f95629..56edb98e5b47 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -661,9 +661,17 @@ EXPORT_SYMBOL(xp_alloc_batch); bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count) { + u32 req_count, avail_count; + if (pool->free_list_cnt >= count) return true; - return xskq_cons_has_entries(pool->fq, count - pool->free_list_cnt); + + req_count = count - pool->free_list_cnt; + avail_count = xskq_cons_nb_entries(pool->fq, req_count); + if (!avail_count) + pool->fq->queue_empty_descs++; + + return avail_count >= req_count; } EXPORT_SYMBOL(xp_can_alloc); diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index 6f2d1621c992..406b20dfee8d 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -306,11 +306,6 @@ static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max) return entries >= max ? max : entries; } -static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt) -{ - return xskq_cons_nb_entries(q, cnt) >= cnt; -} - static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr) { if (q->cached_prod == q->cached_cons) |
