summaryrefslogtreecommitdiff
path: root/kernel/async.c
diff options
context:
space:
mode:
authorKees Cook <kees@kernel.org>2026-02-20 23:49:23 -0800
committerKees Cook <kees@kernel.org>2026-02-21 01:02:28 -0800
commit69050f8d6d075dc01af7a5f2f550a8067510366f (patch)
treebb265f94d9dfa7876c06a5d9f88673d496a15341 /kernel/async.c
parentd39a1d7486d98668dd34aaa6732aad7977c45f5a (diff)
treewide: Replace kmalloc with kmalloc_obj for non-scalar types
This is the result of running the Coccinelle script from scripts/coccinelle/api/kmalloc_objs.cocci. The script is designed to avoid scalar types (which need careful case-by-case checking), and instead replace kmalloc-family calls that allocate struct or union object instances: Single allocations: kmalloc(sizeof(TYPE), ...) are replaced with: kmalloc_obj(TYPE, ...) Array allocations: kmalloc_array(COUNT, sizeof(TYPE), ...) are replaced with: kmalloc_objs(TYPE, COUNT, ...) Flex array allocations: kmalloc(struct_size(PTR, FAM, COUNT), ...) are replaced with: kmalloc_flex(*PTR, FAM, COUNT, ...) (where TYPE may also be *VAR) The resulting allocations no longer return "void *", instead returning "TYPE *". Signed-off-by: Kees Cook <kees@kernel.org>
Diffstat (limited to 'kernel/async.c')
-rw-r--r--kernel/async.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/async.c b/kernel/async.c
index 4c3e6a44595f..862532ad328a 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -205,7 +205,7 @@ async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
async_cookie_t newcookie;
/* allow irq-off callers */
- entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
+ entry = kzalloc_obj(struct async_entry, GFP_ATOMIC);
/*
* If we're out of memory or if there's too much work
@@ -261,7 +261,7 @@ bool async_schedule_dev_nocall(async_func_t func, struct device *dev)
{
struct async_entry *entry;
- entry = kzalloc(sizeof(struct async_entry), GFP_KERNEL);
+ entry = kzalloc_obj(struct async_entry, GFP_KERNEL);
/* Give up if there is no memory or too much work. */
if (!entry || atomic_read(&entry_count) > MAX_WORK) {