summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorKees Cook <kees@kernel.org>2026-02-20 23:49:23 -0800
committerKees Cook <kees@kernel.org>2026-02-21 01:02:28 -0800
commit69050f8d6d075dc01af7a5f2f550a8067510366f (patch)
treebb265f94d9dfa7876c06a5d9f88673d496a15341 /kernel/sched
parentd39a1d7486d98668dd34aaa6732aad7977c45f5a (diff)
treewide: Replace kmalloc with kmalloc_obj for non-scalar types
This is the result of running the Coccinelle script from scripts/coccinelle/api/kmalloc_objs.cocci. The script is designed to avoid scalar types (which need careful case-by-case checking), and instead replace kmalloc-family calls that allocate struct or union object instances: Single allocations: kmalloc(sizeof(TYPE), ...) are replaced with: kmalloc_obj(TYPE, ...) Array allocations: kmalloc_array(COUNT, sizeof(TYPE), ...) are replaced with: kmalloc_objs(TYPE, COUNT, ...) Flex array allocations: kmalloc(struct_size(PTR, FAM, COUNT), ...) are replaced with: kmalloc_flex(*PTR, FAM, COUNT, ...) (where TYPE may also be *VAR) The resulting allocations no longer return "void *", instead returning "TYPE *". Signed-off-by: Kees Cook <kees@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/autogroup.c2
-rw-r--r--kernel/sched/core_sched.c2
-rw-r--r--kernel/sched/cpuacct.c2
-rw-r--r--kernel/sched/cpudeadline.c4
-rw-r--r--kernel/sched/cpufreq_schedutil.c4
-rw-r--r--kernel/sched/cpupri.c2
-rw-r--r--kernel/sched/ext.c10
-rw-r--r--kernel/sched/ext_idle.c4
-rw-r--r--kernel/sched/fair.c6
-rw-r--r--kernel/sched/psi.c4
-rw-r--r--kernel/sched/rt.c4
-rw-r--r--kernel/sched/topology.c8
12 files changed, 25 insertions, 27 deletions
diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c
index 954137775f38..c5a1019cbe83 100644
--- a/kernel/sched/autogroup.c
+++ b/kernel/sched/autogroup.c
@@ -86,7 +86,7 @@ static inline struct autogroup *autogroup_task_get(struct task_struct *p)
static inline struct autogroup *autogroup_create(void)
{
- struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL);
+ struct autogroup *ag = kzalloc_obj(*ag, GFP_KERNEL);
struct task_group *tg;
if (!ag)
diff --git a/kernel/sched/core_sched.c b/kernel/sched/core_sched.c
index 9ede71ecba7f..6065cf725eee 100644
--- a/kernel/sched/core_sched.c
+++ b/kernel/sched/core_sched.c
@@ -12,7 +12,7 @@ struct sched_core_cookie {
static unsigned long sched_core_alloc_cookie(void)
{
- struct sched_core_cookie *ck = kmalloc(sizeof(*ck), GFP_KERNEL);
+ struct sched_core_cookie *ck = kmalloc_obj(*ck, GFP_KERNEL);
if (!ck)
return 0;
diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c
index 23a56ba12d81..6e9a2e067886 100644
--- a/kernel/sched/cpuacct.c
+++ b/kernel/sched/cpuacct.c
@@ -61,7 +61,7 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css)
if (!parent_css)
return &root_cpuacct.css;
- ca = kzalloc(sizeof(*ca), GFP_KERNEL);
+ ca = kzalloc_obj(*ca, GFP_KERNEL);
if (!ca)
goto out;
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index 37b572cc8aca..bbb2d68df86a 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -252,9 +252,7 @@ int cpudl_init(struct cpudl *cp)
raw_spin_lock_init(&cp->lock);
cp->size = 0;
- cp->elements = kcalloc(nr_cpu_ids,
- sizeof(struct cpudl_item),
- GFP_KERNEL);
+ cp->elements = kzalloc_objs(struct cpudl_item, nr_cpu_ids, GFP_KERNEL);
if (!cp->elements)
return -ENOMEM;
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index cfc40181f66e..d71d09ed1b3b 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -638,7 +638,7 @@ static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
{
struct sugov_policy *sg_policy;
- sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
+ sg_policy = kzalloc_obj(*sg_policy, GFP_KERNEL);
if (!sg_policy)
return NULL;
@@ -722,7 +722,7 @@ static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_polic
{
struct sugov_tunables *tunables;
- tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
+ tunables = kzalloc_obj(*tunables, GFP_KERNEL);
if (tunables) {
gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
if (!have_governor_per_policy())
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 76a9ac5eb794..c2642deeaabc 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -288,7 +288,7 @@ int cpupri_init(struct cpupri *cp)
goto cleanup;
}
- cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL);
+ cp->cpu_to_pri = kzalloc_objs(int, nr_cpu_ids, GFP_KERNEL);
if (!cp->cpu_to_pri)
goto cleanup;
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index c18e81e8ef51..b9fadb2583ea 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -4223,11 +4223,11 @@ static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len)
{
struct scx_exit_info *ei;
- ei = kzalloc(sizeof(*ei), GFP_KERNEL);
+ ei = kzalloc_obj(*ei, GFP_KERNEL);
if (!ei)
return NULL;
- ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL);
+ ei->bt = kzalloc_objs(ei->bt[0], SCX_EXIT_BT_LEN, GFP_KERNEL);
ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL);
ei->dump = kvzalloc(exit_dump_len, GFP_KERNEL);
@@ -4824,7 +4824,7 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops)
struct scx_sched *sch;
int node, ret;
- sch = kzalloc(sizeof(*sch), GFP_KERNEL);
+ sch = kzalloc_obj(*sch, GFP_KERNEL);
if (!sch)
return ERR_PTR(-ENOMEM);
@@ -4838,8 +4838,8 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops)
if (ret < 0)
goto err_free_ei;
- sch->global_dsqs = kcalloc(nr_node_ids, sizeof(sch->global_dsqs[0]),
- GFP_KERNEL);
+ sch->global_dsqs = kzalloc_objs(sch->global_dsqs[0], nr_node_ids,
+ GFP_KERNEL);
if (!sch->global_dsqs) {
ret = -ENOMEM;
goto err_free_hash;
diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c
index 3d9d404d5cd2..cd630772e164 100644
--- a/kernel/sched/ext_idle.c
+++ b/kernel/sched/ext_idle.c
@@ -664,8 +664,8 @@ void scx_idle_init_masks(void)
BUG_ON(!alloc_cpumask_var(&scx_idle_global_masks.smt, GFP_KERNEL));
/* Allocate per-node idle cpumasks */
- scx_idle_node_masks = kcalloc(num_possible_nodes(),
- sizeof(*scx_idle_node_masks), GFP_KERNEL);
+ scx_idle_node_masks = kzalloc_objs(*scx_idle_node_masks,
+ num_possible_nodes(), GFP_KERNEL);
BUG_ON(!scx_idle_node_masks);
for_each_node(i) {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1e22b7fadd70..f6f050f2faec 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3427,7 +3427,7 @@ retry_pids:
if (!vma->numab_state) {
struct vma_numab_state *ptr;
- ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
+ ptr = kzalloc_obj(*ptr, GFP_KERNEL);
if (!ptr)
continue;
@@ -13622,10 +13622,10 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
struct cfs_rq *cfs_rq;
int i;
- tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL);
+ tg->cfs_rq = kzalloc_objs(cfs_rq, nr_cpu_ids, GFP_KERNEL);
if (!tg->cfs_rq)
goto err;
- tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL);
+ tg->se = kzalloc_objs(se, nr_cpu_ids, GFP_KERNEL);
if (!tg->se)
goto err;
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 59fdb7ebbf22..bf8a70598a09 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -1114,7 +1114,7 @@ int psi_cgroup_alloc(struct cgroup *cgroup)
if (!static_branch_likely(&psi_cgroups_enabled))
return 0;
- cgroup->psi = kzalloc(sizeof(struct psi_group), GFP_KERNEL);
+ cgroup->psi = kzalloc_obj(struct psi_group, GFP_KERNEL);
if (!cgroup->psi)
return -ENOMEM;
@@ -1340,7 +1340,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf,
if (threshold_us == 0 || threshold_us > window_us)
return ERR_PTR(-EINVAL);
- t = kmalloc(sizeof(*t), GFP_KERNEL);
+ t = kmalloc_obj(*t, GFP_KERNEL);
if (!t)
return ERR_PTR(-ENOMEM);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index a7680477fa6f..e72df7045592 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -259,10 +259,10 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
if (!rt_group_sched_enabled())
return 1;
- tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
+ tg->rt_rq = kzalloc_objs(rt_rq, nr_cpu_ids, GFP_KERNEL);
if (!tg->rt_rq)
goto err;
- tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
+ tg->rt_se = kzalloc_objs(rt_se, nr_cpu_ids, GFP_KERNEL);
if (!tg->rt_se)
goto err;
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index ac268da91778..ac54fcae5de7 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -350,7 +350,7 @@ static struct perf_domain *pd_init(int cpu)
return NULL;
}
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ pd = kzalloc_obj(*pd, GFP_KERNEL);
if (!pd)
return NULL;
pd->em_pd = obj;
@@ -589,7 +589,7 @@ static struct root_domain *alloc_rootdomain(void)
{
struct root_domain *rd;
- rd = kzalloc(sizeof(*rd), GFP_KERNEL);
+ rd = kzalloc_obj(*rd, GFP_KERNEL);
if (!rd)
return NULL;
@@ -1998,7 +1998,7 @@ static int sched_record_numa_dist(int offline_node, int (*n_dist)(int, int),
*/
nr_levels = bitmap_weight(distance_map, NR_DISTANCE_VALUES);
- distances = kcalloc(nr_levels, sizeof(int), GFP_KERNEL);
+ distances = kzalloc_objs(int, nr_levels, GFP_KERNEL);
if (!distances)
return -ENOMEM;
@@ -2734,7 +2734,7 @@ cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
int i;
cpumask_var_t *doms;
- doms = kmalloc_array(ndoms, sizeof(*doms), GFP_KERNEL);
+ doms = kmalloc_objs(*doms, ndoms, GFP_KERNEL);
if (!doms)
return NULL;
for (i = 0; i < ndoms; i++) {