summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/bpf_sk_storage.c16
-rw-r--r--net/core/dev.h1
-rw-r--r--net/core/filter.c16
-rw-r--r--net/core/net_namespace.c2
-rw-r--r--net/core/scm.c20
5 files changed, 27 insertions, 28 deletions
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index 2e538399757f..850dd736ccd1 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -50,16 +50,14 @@ void bpf_sk_storage_free(struct sock *sk)
{
struct bpf_local_storage *sk_storage;
- migrate_disable();
- rcu_read_lock();
+ rcu_read_lock_dont_migrate();
sk_storage = rcu_dereference(sk->sk_bpf_storage);
if (!sk_storage)
goto out;
bpf_local_storage_destroy(sk_storage);
out:
- rcu_read_unlock();
- migrate_enable();
+ rcu_read_unlock_migrate();
}
static void bpf_sk_storage_map_free(struct bpf_map *map)
@@ -138,7 +136,7 @@ bpf_sk_storage_clone_elem(struct sock *newsk,
{
struct bpf_local_storage_elem *copy_selem;
- copy_selem = bpf_selem_alloc(smap, newsk, NULL, true, false, GFP_ATOMIC);
+ copy_selem = bpf_selem_alloc(smap, newsk, NULL, false, GFP_ATOMIC);
if (!copy_selem)
return NULL;
@@ -161,8 +159,7 @@ int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL);
- migrate_disable();
- rcu_read_lock();
+ rcu_read_lock_dont_migrate();
sk_storage = rcu_dereference(sk->sk_bpf_storage);
if (!sk_storage || hlist_empty(&sk_storage->list))
@@ -199,7 +196,7 @@ int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
} else {
ret = bpf_local_storage_alloc(newsk, smap, copy_selem, GFP_ATOMIC);
if (ret) {
- bpf_selem_free(copy_selem, smap, true);
+ bpf_selem_free(copy_selem, true);
atomic_sub(smap->elem_size,
&newsk->sk_omem_alloc);
bpf_map_put(map);
@@ -213,8 +210,7 @@ int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
}
out:
- rcu_read_unlock();
- migrate_enable();
+ rcu_read_unlock_migrate();
/* In case of an error, don't free anything explicitly here, the
* caller is responsible to call bpf_sk_storage_free.
diff --git a/net/core/dev.h b/net/core/dev.h
index 4d872a79bafb..da18536cbd35 100644
--- a/net/core/dev.h
+++ b/net/core/dev.h
@@ -29,7 +29,6 @@ struct napi_struct *
netdev_napi_by_id_lock(struct net *net, unsigned int napi_id);
struct net_device *dev_get_by_napi_id(unsigned int napi_id);
-struct net_device *netdev_get_by_index_lock(struct net *net, int ifindex);
struct net_device *__netdev_put_lock(struct net_device *dev, struct net *net);
struct net_device *
netdev_xa_find_lock(struct net *net, struct net_device *dev,
diff --git a/net/core/filter.c b/net/core/filter.c
index 4124becf8604..616e0520a0bb 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2458,6 +2458,13 @@ BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL)))
return -EINVAL;
+ /* BPF test infra's convert___skb_to_skb() can create type-less
+ * GSO packets. gso_features_check() will detect this as a bad
+ * offload. However, lets not leak them out in the first place.
+ */
+ if (unlikely(skb_is_gso(skb) && !skb_shinfo(skb)->gso_type))
+ return -EBADMSG;
+
dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
if (unlikely(!dev))
return -EINVAL;
@@ -6498,9 +6505,12 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb,
*/
if (skb_is_gso(skb)) {
ret = BPF_MTU_CHK_RET_SUCCESS;
- if (flags & BPF_MTU_CHK_SEGS &&
- !skb_gso_validate_network_len(skb, mtu))
- ret = BPF_MTU_CHK_RET_SEGS_TOOBIG;
+ if (flags & BPF_MTU_CHK_SEGS) {
+ if (!skb_transport_header_was_set(skb))
+ return -EINVAL;
+ if (!skb_gso_validate_network_len(skb, mtu))
+ ret = BPF_MTU_CHK_RET_SEGS_TOOBIG;
+ }
}
out:
*mtu_len = mtu;
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index dfad7c03b809..a6e6a964a287 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -440,7 +440,7 @@ static __net_init int setup_net(struct net *net)
LIST_HEAD(net_exit_list);
int error = 0;
- net->net_cookie = ns_tree_gen_id(&net->ns);
+ net->net_cookie = ns_tree_gen_id(net);
list_for_each_entry(ops, &pernet_list, list) {
error = ops_init(ops, net);
diff --git a/net/core/scm.c b/net/core/scm.c
index 66eaee783e8b..cd87f66671aa 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -273,17 +273,13 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
check_object_size(data, cmlen - sizeof(*cm), true);
- if (can_do_masked_user_access())
- cm = masked_user_access_begin(cm);
- else if (!user_write_access_begin(cm, cmlen))
- goto efault;
-
- unsafe_put_user(cmlen, &cm->cmsg_len, efault_end);
- unsafe_put_user(level, &cm->cmsg_level, efault_end);
- unsafe_put_user(type, &cm->cmsg_type, efault_end);
- unsafe_copy_to_user(CMSG_USER_DATA(cm), data,
- cmlen - sizeof(*cm), efault_end);
- user_write_access_end();
+ scoped_user_write_access_size(cm, cmlen, efault) {
+ unsafe_put_user(cmlen, &cm->cmsg_len, efault);
+ unsafe_put_user(level, &cm->cmsg_level, efault);
+ unsafe_put_user(type, &cm->cmsg_type, efault);
+ unsafe_copy_to_user(CMSG_USER_DATA(cm), data,
+ cmlen - sizeof(*cm), efault);
+ }
} else {
struct cmsghdr *cm = msg->msg_control;
@@ -301,8 +297,6 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
msg->msg_controllen -= cmlen;
return 0;
-efault_end:
- user_write_access_end();
efault:
return -EFAULT;
}