summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/bpf_sk_storage.c2
-rw-r--r--net/core/dev.c78
-rw-r--r--net/core/devmem.c10
-rw-r--r--net/core/drop_monitor.c7
-rw-r--r--net/core/dst.c5
-rw-r--r--net/core/failover.c2
-rw-r--r--net/core/filter.c7
-rw-r--r--net/core/flow_offload.c12
-rw-r--r--net/core/gen_estimator.c2
-rw-r--r--net/core/gro_cells.c2
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/net_namespace.c2
-rw-r--r--net/core/netclassid_cgroup.c2
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/netprio_cgroup.c2
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/core/scm.c2
-rw-r--r--net/core/selftests.c2
-rw-r--r--net/core/skbuff.c30
-rw-r--r--net/core/skmsg.c2
-rw-r--r--net/core/sock.c2
-rw-r--r--net/core/sock_diag.c2
-rw-r--r--net/core/sock_map.c2
-rw-r--r--net/core/sock_reuseport.c2
-rw-r--r--net/core/xdp.c4
25 files changed, 108 insertions, 81 deletions
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index 1eb3e060994e..f8338acebf07 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -500,7 +500,7 @@ bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs)
nr_maps++;
}
- diag = kzalloc(struct_size(diag, maps, nr_maps), GFP_KERNEL);
+ diag = kzalloc_flex(*diag, maps, nr_maps);
if (!diag)
return ERR_PTR(-ENOMEM);
diff --git a/net/core/dev.c b/net/core/dev.c
index ac6bcb2a0784..c1a9f7fdcffa 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -231,10 +231,13 @@ static bool use_backlog_threads(void)
static inline void backlog_lock_irq_save(struct softnet_data *sd,
unsigned long *flags)
{
- if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
- else
+ } else {
local_irq_save(*flags);
+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
+ spin_lock(&sd->input_pkt_queue.lock);
+ }
}
static inline void backlog_lock_irq_disable(struct softnet_data *sd)
@@ -248,9 +251,13 @@ static inline void backlog_lock_irq_disable(struct softnet_data *sd)
static inline void backlog_unlock_irq_restore(struct softnet_data *sd,
unsigned long flags)
{
- if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- spin_unlock(&sd->input_pkt_queue.lock);
- local_irq_restore(flags);
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ spin_unlock_irqrestore(&sd->input_pkt_queue.lock, flags);
+ } else {
+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
+ spin_unlock(&sd->input_pkt_queue.lock);
+ local_irq_restore(flags);
+ }
}
static inline void backlog_unlock_irq_enable(struct softnet_data *sd)
@@ -266,7 +273,7 @@ static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
{
struct netdev_name_node *name_node;
- name_node = kmalloc(sizeof(*name_node), GFP_KERNEL);
+ name_node = kmalloc_obj(*name_node);
if (!name_node)
return NULL;
INIT_HLIST_NODE(&name_node->hlist);
@@ -737,7 +744,7 @@ static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack)
{
int k = stack->num_paths++;
- if (WARN_ON_ONCE(k >= NET_DEVICE_PATH_STACK_MAX))
+ if (k >= NET_DEVICE_PATH_STACK_MAX)
return NULL;
return &stack->path[k];
@@ -4815,6 +4822,8 @@ int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
* to -1 or to their cpu id, but not to our id.
*/
if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
+ bool is_list = false;
+
if (dev_xmit_recursion())
goto recursion_alert;
@@ -4825,17 +4834,28 @@ int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_xmit_stopped(txq)) {
+ is_list = !!skb->next;
+
dev_xmit_recursion_inc();
skb = dev_hard_start_xmit(skb, dev, txq, &rc);
dev_xmit_recursion_dec();
- if (dev_xmit_complete(rc)) {
- HARD_TX_UNLOCK(dev, txq);
- goto out;
- }
+
+ /* GSO segments a single SKB into
+ * a list of frames. TCP expects error
+ * to mean none of the data was sent.
+ */
+ if (is_list)
+ rc = NETDEV_TX_OK;
}
HARD_TX_UNLOCK(dev, txq);
+ if (!skb) /* xmit completed */
+ goto out;
+
net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
dev->name);
+ /* NETDEV_TX_BUSY or queue was stopped */
+ if (!is_list)
+ rc = -ENETDOWN;
} else {
/* Recursion is detected! It is possible,
* unfortunately
@@ -4843,10 +4863,10 @@ int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
recursion_alert:
net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
dev->name);
+ rc = -ENETDOWN;
}
}
- rc = -ENETDOWN;
rcu_read_unlock_bh();
dev_core_stats_tx_dropped_inc(dev);
@@ -4985,8 +5005,7 @@ static bool rps_flow_is_active(struct rps_dev_flow *rflow,
static struct rps_dev_flow *
set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
- struct rps_dev_flow *rflow, u16 next_cpu, u32 hash,
- u32 flow_id)
+ struct rps_dev_flow *rflow, u16 next_cpu, u32 hash)
{
if (next_cpu < nr_cpu_ids) {
u32 head;
@@ -4997,6 +5016,7 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow *tmp_rflow;
unsigned int tmp_cpu;
u16 rxq_index;
+ u32 flow_id;
int rc;
/* Should we steer this flow to a different hardware queue? */
@@ -5012,6 +5032,7 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
if (!flow_table)
goto out;
+ flow_id = rfs_slot(hash, flow_table);
tmp_rflow = &flow_table->flows[flow_id];
tmp_cpu = READ_ONCE(tmp_rflow->cpu);
@@ -5059,7 +5080,6 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow_table *flow_table;
struct rps_map *map;
int cpu = -1;
- u32 flow_id;
u32 tcpu;
u32 hash;
@@ -5106,8 +5126,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
/* OK, now we know there is a match,
* we can look at the local (per receive queue) flow table
*/
- flow_id = rfs_slot(hash, flow_table);
- rflow = &flow_table->flows[flow_id];
+ rflow = &flow_table->flows[rfs_slot(hash, flow_table)];
tcpu = rflow->cpu;
/*
@@ -5126,8 +5145,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
((int)(READ_ONCE(per_cpu(softnet_data, tcpu).input_queue_head) -
rflow->last_qtail)) >= 0)) {
tcpu = next_cpu;
- rflow = set_rps_cpu(dev, skb, rflow, next_cpu, hash,
- flow_id);
+ rflow = set_rps_cpu(dev, skb, rflow, next_cpu, hash);
}
if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
@@ -6503,8 +6521,7 @@ struct flush_backlogs {
static struct flush_backlogs *flush_backlogs_alloc(void)
{
- return kmalloc(struct_size_t(struct flush_backlogs, w, nr_cpu_ids),
- GFP_KERNEL);
+ return kmalloc_flex(struct flush_backlogs, w, nr_cpu_ids);
}
static struct flush_backlogs *flush_backlogs_fallback;
@@ -8687,7 +8704,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
return 0;
}
- adj = kmalloc(sizeof(*adj), GFP_KERNEL);
+ adj = kmalloc_obj(*adj);
if (!adj)
return -ENOMEM;
@@ -9127,8 +9144,7 @@ static int netdev_offload_xstats_enable_l3(struct net_device *dev,
int err;
int rc;
- dev->offload_xstats_l3 = kzalloc(sizeof(*dev->offload_xstats_l3),
- GFP_KERNEL);
+ dev->offload_xstats_l3 = kzalloc_obj(*dev->offload_xstats_l3);
if (!dev->offload_xstats_l3)
return -ENOMEM;
@@ -10653,7 +10669,7 @@ int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
return -EINVAL;
}
- link = kzalloc(sizeof(*link), GFP_USER);
+ link = kzalloc_obj(*link, GFP_USER);
if (!link) {
err = -ENOMEM;
goto unlock;
@@ -11934,7 +11950,7 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
#ifdef CONFIG_NET_CLS_ACT
if (queue)
return queue;
- queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ queue = kzalloc_obj(*queue);
if (!queue)
return NULL;
netdev_init_one_queue(dev, queue, NULL);
@@ -12009,8 +12025,8 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
maxqs = max(txqs, rxqs);
- dev = kvzalloc(struct_size(dev, priv, sizeof_priv),
- GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
+ dev = kvzalloc_flex(*dev, priv, sizeof_priv,
+ GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
if (!dev)
return NULL;
@@ -12081,11 +12097,11 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev->real_num_rx_queues = rxqs;
if (netif_alloc_rx_queues(dev))
goto free_all;
- dev->ethtool = kzalloc(sizeof(*dev->ethtool), GFP_KERNEL_ACCOUNT);
+ dev->ethtool = kzalloc_obj(*dev->ethtool, GFP_KERNEL_ACCOUNT);
if (!dev->ethtool)
goto free_all;
- dev->cfg = kzalloc(sizeof(*dev->cfg), GFP_KERNEL_ACCOUNT);
+ dev->cfg = kzalloc_obj(*dev->cfg, GFP_KERNEL_ACCOUNT);
if (!dev->cfg)
goto free_all;
dev->cfg_pending = dev->cfg;
@@ -12851,7 +12867,7 @@ static struct hlist_head * __net_init netdev_create_hash(void)
int i;
struct hlist_head *hash;
- hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
+ hash = kmalloc_objs(*hash, NETDEV_HASHENTRIES);
if (hash != NULL)
for (i = 0; i < NETDEV_HASHENTRIES; i++)
INIT_HLIST_HEAD(&hash[i]);
diff --git a/net/core/devmem.c b/net/core/devmem.c
index 63f093f7d2b2..8c9aad776bf4 100644
--- a/net/core/devmem.c
+++ b/net/core/devmem.c
@@ -241,9 +241,8 @@ net_devmem_bind_dmabuf(struct net_device *dev,
}
if (direction == DMA_TO_DEVICE) {
- binding->tx_vec = kvmalloc_array(dmabuf->size / PAGE_SIZE,
- sizeof(struct net_iov *),
- GFP_KERNEL);
+ binding->tx_vec = kvmalloc_objs(struct net_iov *,
+ dmabuf->size / PAGE_SIZE);
if (!binding->tx_vec) {
err = -ENOMEM;
goto err_unmap;
@@ -289,9 +288,8 @@ net_devmem_bind_dmabuf(struct net_device *dev,
goto err_free_chunks;
}
- owner->area.niovs = kvmalloc_array(owner->area.num_niovs,
- sizeof(*owner->area.niovs),
- GFP_KERNEL);
+ owner->area.niovs = kvmalloc_objs(*owner->area.niovs,
+ owner->area.num_niovs);
if (!owner->area.niovs) {
err = -ENOMEM;
goto err_free_chunks;
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 60d31c2feed3..f23cea9e1aaf 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -306,8 +306,7 @@ net_dm_hw_reset_per_cpu_data(struct per_cpu_dm_data *hw_data)
struct net_dm_hw_entries *hw_entries;
unsigned long flags;
- hw_entries = kzalloc(struct_size(hw_entries, entries, dm_hit_limit),
- GFP_KERNEL);
+ hw_entries = kzalloc_flex(*hw_entries, entries, dm_hit_limit);
if (!hw_entries) {
/* If the memory allocation failed, we try to perform another
* allocation in 1/10 second. Otherwise, the probe function
@@ -856,7 +855,7 @@ net_dm_hw_metadata_copy(const struct devlink_trap_metadata *metadata)
const char *trap_group_name;
const char *trap_name;
- hw_metadata = kzalloc(sizeof(*hw_metadata), GFP_ATOMIC);
+ hw_metadata = kzalloc_obj(*hw_metadata, GFP_ATOMIC);
if (!hw_metadata)
return NULL;
@@ -1583,7 +1582,7 @@ static int dropmon_net_event(struct notifier_block *ev_block,
case NETDEV_REGISTER:
if (WARN_ON_ONCE(rtnl_dereference(dev->dm_private)))
break;
- stat = kzalloc(sizeof(*stat), GFP_KERNEL);
+ stat = kzalloc_obj(*stat);
if (!stat)
break;
diff --git a/net/core/dst.c b/net/core/dst.c
index 1dae26c51ebe..092861133023 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -191,7 +191,7 @@ EXPORT_SYMBOL(dst_release_immediate);
u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
{
- struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
+ struct dst_metrics *p = kmalloc_obj(*p, GFP_ATOMIC);
if (p) {
struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
@@ -295,8 +295,7 @@ struct metadata_dst *metadata_dst_alloc(u8 optslen, enum metadata_type type,
{
struct metadata_dst *md_dst;
- md_dst = kmalloc(struct_size(md_dst, u.tun_info.options, optslen),
- flags);
+ md_dst = kmalloc_flex(*md_dst, u.tun_info.options, optslen, flags);
if (!md_dst)
return NULL;
diff --git a/net/core/failover.c b/net/core/failover.c
index 2a140b3ea669..0eb2e0ec875b 100644
--- a/net/core/failover.c
+++ b/net/core/failover.c
@@ -247,7 +247,7 @@ struct failover *failover_register(struct net_device *dev,
if (dev->type != ARPHRD_ETHER)
return ERR_PTR(-EINVAL);
- failover = kzalloc(sizeof(*failover), GFP_KERNEL);
+ failover = kzalloc_obj(*failover);
if (!failover)
return ERR_PTR(-ENOMEM);
diff --git a/net/core/filter.c b/net/core/filter.c
index ba019ded773d..0d5d5a17acb2 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -600,8 +600,7 @@ static int bpf_convert_filter(struct sock_filter *prog, int len,
if (new_prog) {
first_insn = new_prog->insnsi;
- addrs = kcalloc(len, sizeof(*addrs),
- GFP_KERNEL | __GFP_NOWARN);
+ addrs = kzalloc_objs(*addrs, len, GFP_KERNEL | __GFP_NOWARN);
if (!addrs)
return -ENOMEM;
}
@@ -1162,7 +1161,7 @@ static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
unsigned int fsize = bpf_classic_proglen(fprog);
struct sock_fprog_kern *fkprog;
- fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
+ fp->orig_prog = kmalloc_obj(*fkprog);
if (!fp->orig_prog)
return -ENOMEM;
@@ -1482,7 +1481,7 @@ static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
{
struct sk_filter *fp, *old_fp;
- fp = kmalloc(sizeof(*fp), GFP_KERNEL);
+ fp = kmalloc_obj(*fp);
if (!fp)
return -ENOMEM;
diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c
index bc5169482710..5071d7fe6ce2 100644
--- a/net/core/flow_offload.c
+++ b/net/core/flow_offload.c
@@ -12,8 +12,7 @@ struct flow_rule *flow_rule_alloc(unsigned int num_actions)
struct flow_rule *rule;
int i;
- rule = kzalloc(struct_size(rule, action.entries, num_actions),
- GFP_KERNEL);
+ rule = kzalloc_flex(*rule, action.entries, num_actions);
if (!rule)
return NULL;
@@ -33,8 +32,7 @@ struct flow_offload_action *offload_action_alloc(unsigned int num_actions)
struct flow_offload_action *fl_action;
int i;
- fl_action = kzalloc(struct_size(fl_action, action.entries, num_actions),
- GFP_KERNEL);
+ fl_action = kzalloc_flex(*fl_action, action.entries, num_actions);
if (!fl_action)
return NULL;
@@ -264,7 +262,7 @@ struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
{
struct flow_block_cb *block_cb;
- block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
+ block_cb = kzalloc_obj(*block_cb);
if (!block_cb)
return ERR_PTR(-ENOMEM);
@@ -391,7 +389,7 @@ static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
{
struct flow_indr_dev *indr_dev;
- indr_dev = kmalloc(sizeof(*indr_dev), GFP_KERNEL);
+ indr_dev = kmalloc_obj(*indr_dev);
if (!indr_dev)
return NULL;
@@ -571,7 +569,7 @@ static int indir_dev_add(void *data, struct net_device *dev, struct Qdisc *sch,
if (info)
return -EEXIST;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc_obj(*info);
if (!info)
return -ENOMEM;
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index f112156db587..c34e58c6c3e6 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -154,7 +154,7 @@ int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
if (parm->ewma_log == 0 || parm->ewma_log >= 31)
return -EINVAL;
- est = kzalloc(sizeof(*est), GFP_KERNEL);
+ est = kzalloc_obj(*est);
if (!est)
return -ENOBUFS;
diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
index a725d21159a6..1b84385c04bd 100644
--- a/net/core/gro_cells.c
+++ b/net/core/gro_cells.c
@@ -132,7 +132,7 @@ void gro_cells_destroy(struct gro_cells *gcells)
* because we might be called from cleanup_net(), and we
* definitely do not want to block this critical task.
*/
- defer = kmalloc(sizeof(*defer), GFP_KERNEL | __GFP_NOWARN);
+ defer = kmalloc_obj(*defer, GFP_KERNEL | __GFP_NOWARN);
if (likely(defer)) {
defer->ptr = gcells->cells;
call_rcu(&defer->rcu, percpu_free_defer_callback);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index e0897eb41c8d..a95cfe77f7f0 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -562,7 +562,7 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
struct neigh_hash_table *ret;
int i;
- ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
+ ret = kmalloc_obj(*ret, GFP_ATOMIC);
if (!ret)
return NULL;
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index aef44e617361..1057d16d5dd2 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -492,7 +492,7 @@ static struct net *net_alloc(void)
goto out_free;
#ifdef CONFIG_KEYS
- net->key_domain = kzalloc(sizeof(struct key_tag), GFP_KERNEL);
+ net->key_domain = kzalloc_obj(struct key_tag);
if (!net->key_domain)
goto out_free_2;
refcount_set(&net->key_domain->usage, 1);
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index db9a5354f9de..e1e30f0b60cd 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -32,7 +32,7 @@ cgrp_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct cgroup_cls_state *cs;
- cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+ cs = kzalloc_obj(*cs);
if (!cs)
return ERR_PTR(-ENOMEM);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 09f72f10813c..a8558a52884f 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -565,7 +565,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
npinfo = rtnl_dereference(ndev->npinfo);
if (!npinfo) {
- npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
+ npinfo = kmalloc_obj(*npinfo);
if (!npinfo) {
err = -ENOMEM;
goto out;
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 8456dfbe2eb4..76c33ab44761 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -135,7 +135,7 @@ cgrp_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct cgroup_subsys_state *css;
- css = kzalloc(sizeof(*css), GFP_KERNEL);
+ css = kzalloc_obj(*css);
if (!css)
return ERR_PTR(-ENOMEM);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index b1ed55141d8a..dad4b1054955 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -414,7 +414,7 @@ static int rtnl_register_internal(struct module *owner,
if (!link)
goto unlock;
} else {
- link = kzalloc(sizeof(*link), GFP_KERNEL);
+ link = kzalloc_obj(*link);
if (!link)
goto unlock;
}
@@ -3969,7 +3969,7 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
int ops_srcu_index;
int ret;
- tbs = kmalloc(sizeof(*tbs), GFP_KERNEL);
+ tbs = kmalloc_obj(*tbs);
if (!tbs)
return -ENOMEM;
diff --git a/net/core/scm.c b/net/core/scm.c
index cd87f66671aa..a29aa8fb8065 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -83,7 +83,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
if (!fpl)
{
- fpl = kmalloc(sizeof(struct scm_fp_list), GFP_KERNEL_ACCOUNT);
+ fpl = kmalloc_obj(struct scm_fp_list, GFP_KERNEL_ACCOUNT);
if (!fpl)
return -ENOMEM;
*fplp = fpl;
diff --git a/net/core/selftests.c b/net/core/selftests.c
index 8b81feb82c4a..0a203d3fb9dc 100644
--- a/net/core/selftests.c
+++ b/net/core/selftests.c
@@ -237,7 +237,7 @@ static int __net_test_loopback(struct net_device *ndev,
struct sk_buff *skb = NULL;
int ret = 0;
- tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
+ tpriv = kzalloc_obj(*tpriv);
if (!tpriv)
return -ENOMEM;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 699c401a5eae..0e217041958a 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -5590,15 +5590,28 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb,
static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
{
- bool ret;
+ struct socket *sock;
+ struct file *file;
+ bool ret = false;
if (likely(tsonly || READ_ONCE(sock_net(sk)->core.sysctl_tstamp_allow_data)))
return true;
- read_lock_bh(&sk->sk_callback_lock);
- ret = sk->sk_socket && sk->sk_socket->file &&
- file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
- read_unlock_bh(&sk->sk_callback_lock);
+ /* The sk pointer remains valid as long as the skb is. The sk_socket and
+ * file pointer may become NULL if the socket is closed. Both structures
+ * (including file->cred) are RCU freed which means they can be accessed
+ * within a RCU read section.
+ */
+ rcu_read_lock();
+ sock = READ_ONCE(sk->sk_socket);
+ if (!sock)
+ goto out;
+ file = READ_ONCE(sock->file);
+ if (!file)
+ goto out;
+ ret = file_ns_capable(file, &init_user_ns, CAP_NET_RAW);
+out:
+ rcu_read_unlock();
return ret;
}
@@ -7266,10 +7279,15 @@ void skb_attempt_defer_free(struct sk_buff *skb)
{
struct skb_defer_node *sdn;
unsigned long defer_count;
- int cpu = skb->alloc_cpu;
unsigned int defer_max;
bool kick;
+ int cpu;
+
+ /* zero copy notifications should not be delayed. */
+ if (skb_zcopy(skb))
+ goto nodefer;
+ cpu = skb->alloc_cpu;
if (cpu == raw_smp_processor_id() ||
WARN_ON_ONCE(cpu >= nr_cpu_ids) ||
!cpu_online(cpu)) {
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index ddde93dd8bc6..2e26174c9919 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -522,7 +522,7 @@ static struct sk_msg *alloc_sk_msg(gfp_t gfp)
{
struct sk_msg *msg;
- msg = kzalloc(sizeof(*msg), gfp | __GFP_NOWARN);
+ msg = kzalloc_obj(*msg, gfp | __GFP_NOWARN);
if (unlikely(!msg))
return NULL;
sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
diff --git a/net/core/sock.c b/net/core/sock.c
index 693e6d80f501..5976100a9d55 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1097,7 +1097,7 @@ sock_devmem_dontneed(struct sock *sk, sockptr_t optval, unsigned int optlen)
return -EINVAL;
num_tokens = optlen / sizeof(*tokens);
- tokens = kvmalloc_array(num_tokens, sizeof(*tokens), GFP_KERNEL);
+ tokens = kvmalloc_objs(*tokens, num_tokens);
if (!tokens)
return -ENOMEM;
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 026ce9bd9e5e..c83335c62360 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -177,7 +177,7 @@ void sock_diag_broadcast_destroy(struct sock *sk)
{
/* Note, this function is often called from an interrupt context. */
struct broadcast_sk *bsk =
- kmalloc(sizeof(struct broadcast_sk), GFP_ATOMIC);
+ kmalloc_obj(struct broadcast_sk, GFP_ATOMIC);
if (!bsk)
return sk_destruct(sk);
bsk->sk = sk;
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 5947b38e4f8b..b0e96337a269 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -1858,7 +1858,7 @@ int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog)
goto out;
}
- sockmap_link = kzalloc(sizeof(*sockmap_link), GFP_USER);
+ sockmap_link = kzalloc_obj(*sockmap_link, GFP_USER);
if (!sockmap_link) {
ret = -ENOMEM;
goto out;
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index 4211710393a8..29948cb44b7d 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -175,7 +175,7 @@ static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
{
struct sock_reuseport *reuse;
- reuse = kzalloc(struct_size(reuse, socks, max_socks), GFP_ATOMIC);
+ reuse = kzalloc_flex(*reuse, socks, max_socks, GFP_ATOMIC);
if (!reuse)
return NULL;
diff --git a/net/core/xdp.c b/net/core/xdp.c
index fee6d080ee85..9890a30584ba 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -214,7 +214,7 @@ static int __mem_id_init_hash_table(void)
if (unlikely(mem_id_init))
return 0;
- rht = kzalloc(sizeof(*rht), GFP_KERNEL);
+ rht = kzalloc_obj(*rht);
if (!rht)
return -ENOMEM;
@@ -297,7 +297,7 @@ static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem,
return ERR_PTR(ret);
}
- xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
+ xdp_alloc = kzalloc_obj(*xdp_alloc, gfp);
if (!xdp_alloc)
return ERR_PTR(-ENOMEM);