summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/core.c3
-rw-r--r--kernel/gcov/fs.c3
-rw-r--r--kernel/gcov/gcc_4_7.c3
-rw-r--r--kernel/locking/locktorture.c3
-rw-r--r--kernel/padata.c3
-rw-r--r--kernel/params.c3
-rw-r--r--kernel/rcu/rcuscale.c6
-rw-r--r--kernel/rcu/rcutorture.c3
-rw-r--r--kernel/sched/ext.c3
-rw-r--r--kernel/trace/trace.c9
-rw-r--r--kernel/trace/trace_events_hist.c3
-rw-r--r--kernel/trace/trace_probe.c3
-rw-r--r--kernel/unwind/deferred.c3
13 files changed, 16 insertions, 32 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 80b3e94f3fe3..3ece2da55625 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -910,8 +910,7 @@ static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_ins
struct bpf_prog_pack *pack;
int err;
- pack = kzalloc_flex(*pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT),
- GFP_KERNEL);
+ pack = kzalloc_flex(*pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT));
if (!pack)
return NULL;
pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE);
diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c
index 33829e4a4d39..1d19b1be207a 100644
--- a/kernel/gcov/fs.c
+++ b/kernel/gcov/fs.c
@@ -545,8 +545,7 @@ static struct gcov_node *new_node(struct gcov_node *parent,
if (!node)
goto err_nomem;
if (info) {
- node->loaded_info = kzalloc_objs(struct gcov_info *, 1,
- GFP_KERNEL);
+ node->loaded_info = kzalloc_objs(struct gcov_info *, 1);
if (!node->loaded_info)
goto err_nomem;
}
diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
index 46dbba7b0efd..8fa22ababd94 100644
--- a/kernel/gcov/gcc_4_7.c
+++ b/kernel/gcov/gcc_4_7.c
@@ -298,8 +298,7 @@ struct gcov_info *gcov_info_dup(struct gcov_info *info)
if (!dup->filename)
goto err_free;
- dup->functions = kzalloc_objs(struct gcov_fn_info *, info->n_functions,
- GFP_KERNEL);
+ dup->functions = kzalloc_objs(struct gcov_fn_info *, info->n_functions);
if (!dup->functions)
goto err_free;
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 96a8647a0074..776a226fc237 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -1293,8 +1293,7 @@ static int __init lock_torture_init(void)
/* Initialize the statistics so that each run gets its own numbers. */
if (nwriters_stress) {
lock_is_write_held = false;
- cxt.lwsa = kmalloc_objs(*cxt.lwsa, cxt.nrealwriters_stress,
- GFP_KERNEL);
+ cxt.lwsa = kmalloc_objs(*cxt.lwsa, cxt.nrealwriters_stress);
if (cxt.lwsa == NULL) {
VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
firsterr = -ENOMEM;
diff --git a/kernel/padata.c b/kernel/padata.c
index 0af32c78ea69..8657e6e0c224 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -1106,8 +1106,7 @@ void __init padata_init(void)
#endif
possible_cpus = num_possible_cpus();
- padata_works = kmalloc_objs(struct padata_work, possible_cpus,
- GFP_KERNEL);
+ padata_works = kmalloc_objs(struct padata_work, possible_cpus);
if (!padata_works)
goto remove_dead_state;
diff --git a/kernel/params.c b/kernel/params.c
index 5d1cd7d0b51a..7188a12dbe86 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -638,8 +638,7 @@ static __init_or_module int add_sysfs_param(struct module_kobject *mk,
return -ENOMEM;
mk->mp->grp.name = "parameters";
/* NULL-terminated attribute array. */
- mk->mp->grp.attrs = kzalloc_obj(mk->mp->grp.attrs[0],
- GFP_KERNEL);
+ mk->mp->grp.attrs = kzalloc_obj(mk->mp->grp.attrs[0]);
/* Caller will cleanup via free_module_param_attrs */
if (!mk->mp->grp.attrs)
return -ENOMEM;
diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c
index 6c1acf9ba69b..16adcbecf875 100644
--- a/kernel/rcu/rcuscale.c
+++ b/kernel/rcu/rcuscale.c
@@ -755,8 +755,7 @@ kfree_scale_thread(void *arg)
}
for (i = 0; i < kfree_alloc_num; i++) {
- alloc_ptr = kzalloc_objs(struct kfree_obj, kfree_mult,
- GFP_KERNEL);
+ alloc_ptr = kzalloc_objs(struct kfree_obj, kfree_mult);
if (!alloc_ptr)
return -ENOMEM;
@@ -1146,8 +1145,7 @@ rcu_scale_init(void)
schedule_timeout_uninterruptible(1);
writer_tasks = kzalloc_objs(writer_tasks[0], nrealwriters);
writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations), GFP_KERNEL);
- writer_n_durations = kzalloc_objs(*writer_n_durations, nrealwriters,
- GFP_KERNEL);
+ writer_n_durations = kzalloc_objs(*writer_n_durations, nrealwriters);
writer_done = kzalloc_objs(writer_done[0], nrealwriters);
if (gp_async) {
if (gp_async_max <= 0) {
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 197cea4d1f26..9279bb57586b 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -4592,8 +4592,7 @@ rcu_torture_init(void)
if (WARN_ON(nocbs_toggle < 0))
nocbs_toggle = HZ;
if (nrealnocbers > 0) {
- nocb_tasks = kzalloc_objs(nocb_tasks[0], nrealnocbers,
- GFP_KERNEL);
+ nocb_tasks = kzalloc_objs(nocb_tasks[0], nrealnocbers);
if (nocb_tasks == NULL) {
TOROUT_ERRSTRING("out of memory");
firsterr = -ENOMEM;
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 5a812b510d5d..62b1f3ac5630 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -4838,8 +4838,7 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops)
if (ret < 0)
goto err_free_ei;
- sch->global_dsqs = kzalloc_objs(sch->global_dsqs[0], nr_node_ids,
- GFP_KERNEL);
+ sch->global_dsqs = kzalloc_objs(sch->global_dsqs[0], nr_node_ids);
if (!sch->global_dsqs) {
ret = -ENOMEM;
goto err_free_hash;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index b44f5ae8958e..23de3719f495 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3903,8 +3903,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
if (!iter)
return ERR_PTR(-ENOMEM);
- iter->buffer_iter = kzalloc_objs(*iter->buffer_iter, nr_cpu_ids,
- GFP_KERNEL);
+ iter->buffer_iter = kzalloc_objs(*iter->buffer_iter, nr_cpu_ids);
if (!iter->buffer_iter)
goto release;
@@ -9310,8 +9309,7 @@ static void setup_trace_scratch(struct trace_array *tr,
mod_addr_comp, NULL, NULL);
if (IS_ENABLED(CONFIG_MODULES)) {
- module_delta = kzalloc_flex(*module_delta, delta, nr_entries,
- GFP_KERNEL);
+ module_delta = kzalloc_flex(*module_delta, delta, nr_entries);
if (!module_delta) {
pr_info("module_delta allocation failed. Not able to decode module address.");
goto reset;
@@ -10929,8 +10927,7 @@ void __init ftrace_boot_snapshot(void)
void __init early_trace_init(void)
{
if (tracepoint_printk) {
- tracepoint_print_iter = kzalloc_obj(*tracepoint_print_iter,
- GFP_KERNEL);
+ tracepoint_print_iter = kzalloc_obj(*tracepoint_print_iter);
if (MEM_FAIL(!tracepoint_print_iter,
"Failed to allocate trace iterator\n"))
tracepoint_printk = 0;
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index a45cdd05123b..73ea180cad55 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -5674,8 +5674,7 @@ static int print_entries(struct seq_file *m,
(HIST_FIELD_FL_PERCENT | HIST_FIELD_FL_GRAPH)))
continue;
if (!stats) {
- stats = kzalloc_objs(*stats, hist_data->n_vals,
- GFP_KERNEL);
+ stats = kzalloc_objs(*stats, hist_data->n_vals);
if (!stats) {
n_entries = -ENOMEM;
goto out;
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index b3ce9bb0b971..e0a5dc86c07e 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -842,8 +842,7 @@ static int __store_entry_arg(struct trace_probe *tp, int argnum)
if (!earg)
return -ENOMEM;
earg->size = 2 * tp->nr_args + 1;
- earg->code = kzalloc_objs(struct fetch_insn, earg->size,
- GFP_KERNEL);
+ earg->code = kzalloc_objs(struct fetch_insn, earg->size);
if (!earg->code) {
kfree(earg);
return -ENOMEM;
diff --git a/kernel/unwind/deferred.c b/kernel/unwind/deferred.c
index 23a7d7ea93d4..5bea47314254 100644
--- a/kernel/unwind/deferred.c
+++ b/kernel/unwind/deferred.c
@@ -120,8 +120,7 @@ int unwind_user_faultable(struct unwind_stacktrace *trace)
return -EINVAL;
if (!info->cache) {
- info->cache = kzalloc_flex(*cache, entries, UNWIND_MAX_ENTRIES,
- GFP_KERNEL);
+ info->cache = kzalloc_flex(*cache, entries, UNWIND_MAX_ENTRIES);
if (!info->cache)
return -ENOMEM;
}