summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-02-21 20:03:00 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2026-02-21 20:03:00 -0800
commit32a92f8c89326985e05dce8b22d3f0aa07a3e1bd (patch)
tree65f84985b9ed2d5cf3c5243aca78d9428e25c312 /mm
parent323bbfcf1ef8836d0d2ad9e2c1f1c684f0e3b5b3 (diff)
Convert more 'alloc_obj' cases to default GFP_KERNEL argumentsHEADtorvalds/mastertorvalds/HEADmaster
This converts some of the visually simpler cases that have been split over multiple lines. I only did the ones that are easy to verify the resulting diff by having just that final GFP_KERNEL argument on the next line. Somebody should probably do a proper coccinelle script for this, but for me the trivial script actually resulted in an assertion failure in the middle of the script. I probably had made it a bit _too_ trivial. So after fighting that far a while I decided to just do some of the syntactically simpler cases with variations of the previous 'sed' scripts. The more syntactically complex multi-line cases would mostly really want whitespace cleanup anyway. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/damon/sysfs-schemes.c18
-rw-r--r--mm/damon/sysfs.c3
-rw-r--r--mm/damon/tests/core-kunit.h6
-rw-r--r--mm/damon/vaddr.c3
-rw-r--r--mm/ksm.c3
-rw-r--r--mm/memory-tiers.c3
-rw-r--r--mm/mempolicy.c9
-rw-r--r--mm/mmu_notifier.c3
-rw-r--r--mm/swapfile.c3
9 files changed, 17 insertions, 34 deletions
diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c
index 5a12d1103d33..3a0782e576fa 100644
--- a/mm/damon/sysfs-schemes.c
+++ b/mm/damon/sysfs-schemes.c
@@ -26,8 +26,7 @@ struct damon_sysfs_scheme_region {
static struct damon_sysfs_scheme_region *damon_sysfs_scheme_region_alloc(
struct damon_region *region)
{
- struct damon_sysfs_scheme_region *sysfs_region = kmalloc_obj(*sysfs_region,
- GFP_KERNEL);
+ struct damon_sysfs_scheme_region *sysfs_region = kmalloc_obj(*sysfs_region);
if (!sysfs_region)
return NULL;
@@ -138,8 +137,7 @@ struct damon_sysfs_scheme_regions {
static struct damon_sysfs_scheme_regions *
damon_sysfs_scheme_regions_alloc(void)
{
- struct damon_sysfs_scheme_regions *regions = kmalloc_obj(*regions,
- GFP_KERNEL);
+ struct damon_sysfs_scheme_regions *regions = kmalloc_obj(*regions);
if (!regions)
return NULL;
@@ -851,8 +849,7 @@ static struct damon_sysfs_watermarks *damon_sysfs_watermarks_alloc(
enum damos_wmark_metric metric, unsigned long interval_us,
unsigned long high, unsigned long mid, unsigned long low)
{
- struct damon_sysfs_watermarks *watermarks = kmalloc_obj(*watermarks,
- GFP_KERNEL);
+ struct damon_sysfs_watermarks *watermarks = kmalloc_obj(*watermarks);
if (!watermarks)
return NULL;
@@ -1659,8 +1656,7 @@ struct damon_sysfs_access_pattern {
static
struct damon_sysfs_access_pattern *damon_sysfs_access_pattern_alloc(void)
{
- struct damon_sysfs_access_pattern *access_pattern = kmalloc_obj(*access_pattern,
- GFP_KERNEL);
+ struct damon_sysfs_access_pattern *access_pattern = kmalloc_obj(*access_pattern);
if (!access_pattern)
return NULL;
@@ -2681,12 +2677,10 @@ static int damos_sysfs_add_migrate_dest(struct damos *scheme,
struct damos_migrate_dests *dests = &scheme->migrate_dests;
int i;
- dests->node_id_arr = kmalloc_objs(*dests->node_id_arr, sysfs_dests->nr,
- GFP_KERNEL);
+ dests->node_id_arr = kmalloc_objs(*dests->node_id_arr, sysfs_dests->nr);
if (!dests->node_id_arr)
return -ENOMEM;
- dests->weight_arr = kmalloc_objs(*dests->weight_arr, sysfs_dests->nr,
- GFP_KERNEL);
+ dests->weight_arr = kmalloc_objs(*dests->weight_arr, sysfs_dests->nr);
if (!dests->weight_arr)
/* ->node_id_arr will be freed by scheme destruction */
return -ENOMEM;
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index f2e7551eea3f..576d1ddd736b 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -609,8 +609,7 @@ static struct damon_sysfs_intervals *damon_sysfs_intervals_alloc(
unsigned long sample_us, unsigned long aggr_us,
unsigned long update_us)
{
- struct damon_sysfs_intervals *intervals = kmalloc_obj(*intervals,
- GFP_KERNEL);
+ struct damon_sysfs_intervals *intervals = kmalloc_obj(*intervals);
if (!intervals)
return NULL;
diff --git a/mm/damon/tests/core-kunit.h b/mm/damon/tests/core-kunit.h
index d3a30b170564..596f33ec2d81 100644
--- a/mm/damon/tests/core-kunit.h
+++ b/mm/damon/tests/core-kunit.h
@@ -725,12 +725,10 @@ static int damos_test_help_dests_setup(struct damos_migrate_dests *dests,
{
size_t i;
- dests->node_id_arr = kmalloc_objs(*dests->node_id_arr, nr_dests,
- GFP_KERNEL);
+ dests->node_id_arr = kmalloc_objs(*dests->node_id_arr, nr_dests);
if (!dests->node_id_arr)
return -ENOMEM;
- dests->weight_arr = kmalloc_objs(*dests->weight_arr, nr_dests,
- GFP_KERNEL);
+ dests->weight_arr = kmalloc_objs(*dests->weight_arr, nr_dests);
if (!dests->weight_arr) {
kfree(dests->node_id_arr);
dests->node_id_arr = NULL;
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 862835d69af1..729b7ffd3565 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -821,8 +821,7 @@ static unsigned long damos_va_migrate(struct damon_target *target,
use_target_nid = dests->nr_dests == 0;
nr_dests = use_target_nid ? 1 : dests->nr_dests;
priv.scheme = s;
- priv.migration_lists = kmalloc_objs(*priv.migration_lists, nr_dests,
- GFP_KERNEL);
+ priv.migration_lists = kmalloc_objs(*priv.migration_lists, nr_dests);
if (!priv.migration_lists)
return 0;
diff --git a/mm/ksm.c b/mm/ksm.c
index 85481c3e3f2c..bda71ae609ff 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -3586,8 +3586,7 @@ static ssize_t merge_across_nodes_store(struct kobject *kobj,
* Allocate stable and unstable together:
* MAXSMP NODES_SHIFT 10 will use 16kB.
*/
- buf = kzalloc_objs(*buf, nr_node_ids + nr_node_ids,
- GFP_KERNEL);
+ buf = kzalloc_objs(*buf, nr_node_ids + nr_node_ids);
/* Let us assume that RB_ROOT is NULL is zero */
if (!buf)
err = -ENOMEM;
diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c
index 631df0614331..986f809376eb 100644
--- a/mm/memory-tiers.c
+++ b/mm/memory-tiers.c
@@ -912,8 +912,7 @@ static int __init memory_tier_init(void)
panic("%s() failed to register memory tier subsystem\n", __func__);
#ifdef CONFIG_MIGRATION
- node_demotion = kzalloc_objs(struct demotion_nodes, nr_node_ids,
- GFP_KERNEL);
+ node_demotion = kzalloc_objs(struct demotion_nodes, nr_node_ids);
WARN_ON(!node_demotion);
#endif
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 4a33eb2a1f7f..0e5175f1c767 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -229,8 +229,7 @@ int mempolicy_set_node_perf(unsigned int node, struct access_coordinate *coords)
if (!new_bw)
return -ENOMEM;
- new_wi_state = kmalloc_flex(*new_wi_state, iw_table, nr_node_ids,
- GFP_KERNEL);
+ new_wi_state = kmalloc_flex(*new_wi_state, iw_table, nr_node_ids);
if (!new_wi_state) {
kfree(new_bw);
return -ENOMEM;
@@ -3642,8 +3641,7 @@ static ssize_t node_store(struct kobject *kobj, struct kobj_attribute *attr,
kstrtou8(buf, 0, &weight) || weight == 0)
return -EINVAL;
- new_wi_state = kzalloc_flex(*new_wi_state, iw_table, nr_node_ids,
- GFP_KERNEL);
+ new_wi_state = kzalloc_flex(*new_wi_state, iw_table, nr_node_ids);
if (!new_wi_state)
return -ENOMEM;
@@ -3695,8 +3693,7 @@ static ssize_t weighted_interleave_auto_store(struct kobject *kobj,
if (kstrtobool(buf, &input))
return -EINVAL;
- new_wi_state = kzalloc_flex(*new_wi_state, iw_table, nr_node_ids,
- GFP_KERNEL);
+ new_wi_state = kzalloc_flex(*new_wi_state, iw_table, nr_node_ids);
if (!new_wi_state)
return -ENOMEM;
for (i = 0; i < nr_node_ids; i++)
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 1aa561a055eb..a6cdf3674bdc 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -618,8 +618,7 @@ int __mmu_notifier_register(struct mmu_notifier *subscription,
* know that mm->notifier_subscriptions can't change while we
* hold the write side of the mmap_lock.
*/
- subscriptions = kzalloc_obj(struct mmu_notifier_subscriptions,
- GFP_KERNEL);
+ subscriptions = kzalloc_obj(struct mmu_notifier_subscriptions);
if (!subscriptions)
return -ENOMEM;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index a94aa1054fd8..94af29d1de88 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -3265,8 +3265,7 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si,
spin_lock_init(&cluster_info[i].lock);
if (!(si->flags & SWP_SOLIDSTATE)) {
- si->global_cluster = kmalloc_obj(*si->global_cluster,
- GFP_KERNEL);
+ si->global_cluster = kmalloc_obj(*si->global_cluster);
if (!si->global_cluster)
goto err;
for (i = 0; i < SWAP_NR_ORDERS; i++)