diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/cgroup/cpuset.c | 21 | ||||
| -rw-r--r-- | kernel/events/callchain.c | 2 | ||||
| -rw-r--r-- | kernel/events/core.c | 8 | ||||
| -rw-r--r-- | kernel/events/ring_buffer.c | 2 | ||||
| -rw-r--r-- | kernel/irq/debugfs.c | 2 | ||||
| -rw-r--r-- | kernel/irq/matrix.c | 2 | ||||
| -rw-r--r-- | kernel/kexec_core.c | 16 | ||||
| -rw-r--r-- | kernel/power/suspend.c | 9 | ||||
| -rw-r--r-- | kernel/power/swap.c | 14 | ||||
| -rw-r--r-- | kernel/sched/core.c | 5 | ||||
| -rw-r--r-- | kernel/sched/ext.c | 23 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 2 | ||||
| -rw-r--r-- | kernel/sched/pelt.c | 2 | ||||
| -rw-r--r-- | kernel/time/clockevents.c | 2 | ||||
| -rw-r--r-- | kernel/time/hrtimer.c | 2 | ||||
| -rw-r--r-- | kernel/time/tick-broadcast.c | 2 | ||||
| -rw-r--r-- | kernel/time/tick-common.c | 2 | ||||
| -rw-r--r-- | kernel/time/tick-oneshot.c | 2 | ||||
| -rw-r--r-- | kernel/time/tick-sched.c | 2 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 8 | ||||
| -rw-r--r-- | kernel/trace/trace_events.c | 7 |
22 files changed, 88 insertions, 49 deletions
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 6e6eb09b8db6..3e8cc34d8d50 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -1668,7 +1668,14 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs, static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp) { WARN_ON_ONCE(!is_remote_partition(cs)); - WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus)); + /* + * When a CPU is offlined, top_cpuset may end up with no available CPUs, + * which should clear subpartitions_cpus. We should not emit a warning for this + * scenario: the hierarchy is updated from top to bottom, so subpartitions_cpus + * may already be cleared when disabling the partition. + */ + WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus) && + !cpumask_empty(subpartitions_cpus)); spin_lock_irq(&callback_lock); cs->remote_partition = false; @@ -3976,8 +3983,9 @@ retry: if (remote || (is_partition_valid(cs) && is_partition_valid(parent))) compute_partition_effective_cpumask(cs, &new_cpus); - if (remote && cpumask_empty(&new_cpus) && - partition_is_populated(cs, NULL)) { + if (remote && (cpumask_empty(subpartitions_cpus) || + (cpumask_empty(&new_cpus) && + partition_is_populated(cs, NULL)))) { cs->prs_err = PERR_HOTPLUG; remote_partition_disable(cs, tmp); compute_effective_cpumask(&new_cpus, cs, parent); @@ -3990,9 +3998,12 @@ retry: * 1) empty effective cpus but not valid empty partition. * 2) parent is invalid or doesn't grant any cpus to child * partitions. + * 3) subpartitions_cpus is empty. */ - if (is_local_partition(cs) && (!is_partition_valid(parent) || - tasks_nocpu_error(parent, cs, &new_cpus))) + if (is_local_partition(cs) && + (!is_partition_valid(parent) || + tasks_nocpu_error(parent, cs, &new_cpus) || + cpumask_empty(subpartitions_cpus))) partcmd = partcmd_invalidate; /* * On the other hand, an invalid partition root may be transitioned diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index b9c7e00725d6..1f6589578703 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -2,7 +2,7 @@ /* * Performance events callchain code, extracted from core.c: * - * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> + * Copyright (C) 2008 Linutronix GmbH, Thomas Gleixner <tglx@kernel.org> * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> diff --git a/kernel/events/core.c b/kernel/events/core.c index dad0d3d2e85f..5b5cb620499e 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2,7 +2,7 @@ /* * Performance events core code: * - * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> + * Copyright (C) 2008 Linutronix GmbH, Thomas Gleixner <tglx@kernel.org> * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> @@ -11906,6 +11906,11 @@ static void perf_swevent_cancel_hrtimer(struct perf_event *event) } } +static void perf_swevent_destroy_hrtimer(struct perf_event *event) +{ + hrtimer_cancel(&event->hw.hrtimer); +} + static void perf_swevent_init_hrtimer(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; @@ -11914,6 +11919,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event) return; hrtimer_setup(&hwc->hrtimer, perf_swevent_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); + event->destroy = perf_swevent_destroy_hrtimer; /* * Since hrtimers have a fixed rate, we can do a static freq->period diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 20a905023736..3e7de2661417 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -2,7 +2,7 @@ /* * Performance events ring-buffer code: * - * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> + * Copyright (C) 2008 Linutronix GmbH, Thomas Gleixner <tglx@kernel.org> * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c index 3527defd2890..5c5ebaee35f2 100644 --- a/kernel/irq/debugfs.c +++ b/kernel/irq/debugfs.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -// Copyright 2017 Thomas Gleixner <tglx@linutronix.de> +// Copyright 2017 Linutronix GmbH, Thomas Gleixner <tglx@kernel.org> #include <linux/irqdomain.h> #include <linux/irq.h> diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c index 8f222d1cccec..a50f2305a8dc 100644 --- a/kernel/irq/matrix.c +++ b/kernel/irq/matrix.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2017 Thomas Gleixner <tglx@linutronix.de> +// Copyright (C) 2017 Linutronix GmbH, Thomas Gleixner <tglx@kernel.org> #include <linux/spinlock.h> #include <linux/seq_file.h> diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 0f92acdd354d..95c585c6ddc3 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -953,17 +953,24 @@ int kimage_load_segment(struct kimage *image, int idx) return result; } -void *kimage_map_segment(struct kimage *image, - unsigned long addr, unsigned long size) +void *kimage_map_segment(struct kimage *image, int idx) { + unsigned long addr, size, eaddr; unsigned long src_page_addr, dest_page_addr = 0; - unsigned long eaddr = addr + size; kimage_entry_t *ptr, entry; struct page **src_pages; unsigned int npages; + struct page *cma; void *vaddr = NULL; int i; + cma = image->segment_cma[idx]; + if (cma) + return page_address(cma); + + addr = image->segment[idx].mem; + size = image->segment[idx].memsz; + eaddr = addr + size; /* * Collect the source pages and map them in a contiguous VA range. */ @@ -1004,7 +1011,8 @@ void *kimage_map_segment(struct kimage *image, void kimage_unmap_segment(void *segment_buffer) { - vunmap(segment_buffer); + if (is_vmalloc_addr(segment_buffer)) + vunmap(segment_buffer); } struct kexec_load_limit { diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 2da4482bb6eb..57c44268698f 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -349,9 +349,12 @@ static int suspend_test(int level) if (pm_test_level == level) { pr_info("suspend debug: Waiting for %d second(s).\n", pm_test_delay); - for (i = 0; i < pm_test_delay && !pm_wakeup_pending(); i++) - msleep(1000); - + for (i = 0; i < pm_test_delay && !pm_wakeup_pending(); i++) { + if (level > TEST_CORE) + msleep(1000); + else + mdelay(1000); + } return 1; } #endif /* !CONFIG_PM_DEBUG */ diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 33a186373bef..8050e5182835 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -902,8 +902,11 @@ out_clean: for (thr = 0; thr < nr_threads; thr++) { if (data[thr].thr) kthread_stop(data[thr].thr); - acomp_request_free(data[thr].cr); - crypto_free_acomp(data[thr].cc); + if (data[thr].cr) + acomp_request_free(data[thr].cr); + + if (!IS_ERR_OR_NULL(data[thr].cc)) + crypto_free_acomp(data[thr].cc); } vfree(data); } @@ -1499,8 +1502,11 @@ out_clean: for (thr = 0; thr < nr_threads; thr++) { if (data[thr].thr) kthread_stop(data[thr].thr); - acomp_request_free(data[thr].cr); - crypto_free_acomp(data[thr].cc); + if (data[thr].cr) + acomp_request_free(data[thr].cr); + + if (!IS_ERR_OR_NULL(data[thr].cc)) + crypto_free_acomp(data[thr].cc); } vfree(data); } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 41ba0be16911..60afadb6eede 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -10694,10 +10694,11 @@ void sched_mm_cid_before_execve(struct task_struct *t) sched_mm_cid_exit(t); } -/* Reactivate MM CID after successful execve() */ +/* Reactivate MM CID after execve() */ void sched_mm_cid_after_execve(struct task_struct *t) { - sched_mm_cid_fork(t); + if (t->mm) + sched_mm_cid_fork(t); } static void mm_cid_work_fn(struct work_struct *work) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 94164f2dec6d..8f6d8d7f895c 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1577,7 +1577,7 @@ static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags * * @p may go through multiple stopping <-> running transitions between * here and put_prev_task_scx() if task attribute changes occur while - * balance_scx() leaves @rq unlocked. However, they don't contain any + * balance_one() leaves @rq unlocked. However, they don't contain any * information meaningful to the BPF scheduler and can be suppressed by * skipping the callbacks if the task is !QUEUED. */ @@ -2372,7 +2372,7 @@ static void switch_class(struct rq *rq, struct task_struct *next) * preempted, and it regaining control of the CPU. * * ->cpu_release() complements ->cpu_acquire(), which is emitted the - * next time that balance_scx() is invoked. + * next time that balance_one() is invoked. */ if (!rq->scx.cpu_released) { if (SCX_HAS_OP(sch, cpu_release)) { @@ -2478,7 +2478,7 @@ do_pick_task_scx(struct rq *rq, struct rq_flags *rf, bool force_scx) } /* - * If balance_scx() is telling us to keep running @prev, replenish slice + * If balance_one() is telling us to keep running @prev, replenish slice * if necessary and keep running @prev. Otherwise, pop the first one * from the local DSQ. */ @@ -3956,13 +3956,8 @@ static void bypass_lb_node(struct scx_sched *sch, int node) nr_donor_target, nr_target); } - for_each_cpu(cpu, resched_mask) { - struct rq *rq = cpu_rq(cpu); - - raw_spin_rq_lock_irq(rq); - resched_curr(rq); - raw_spin_rq_unlock_irq(rq); - } + for_each_cpu(cpu, resched_mask) + resched_cpu(cpu); for_each_cpu_and(cpu, cpu_online_mask, node_mask) { u32 nr = READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr); @@ -4025,7 +4020,7 @@ static DEFINE_TIMER(scx_bypass_lb_timer, scx_bypass_lb_timerfn); * * - ops.dispatch() is ignored. * - * - balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice + * - balance_one() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice * can't be trusted. Whenever a tick triggers, the running task is rotated to * the tail of the queue with core_sched_at touched. * @@ -4783,8 +4778,10 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops) } sch->pcpu = alloc_percpu(struct scx_sched_pcpu); - if (!sch->pcpu) + if (!sch->pcpu) { + ret = -ENOMEM; goto err_free_gdsqs; + } sch->helper = kthread_run_worker(0, "sched_ext_helper"); if (IS_ERR(sch->helper)) { @@ -6067,7 +6064,7 @@ __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id) /* * A successfully consumed task can be dequeued before it starts * running while the CPU is trying to migrate other dispatched - * tasks. Bump nr_tasks to tell balance_scx() to retry on empty + * tasks. Bump nr_tasks to tell balance_one() to retry on empty * local DSQ. */ dspc->nr_tasks++; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index da46c3164537..e71302282671 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -15,7 +15,7 @@ * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> * * Scaled math optimizations by Thomas Gleixner - * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de> + * Copyright (C) 2007, Linutronix GmbH, Thomas Gleixner <tglx@kernel.org> * * Adaptive scheduling granularity, math enhancements by Peter Zijlstra * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c index fa83bbaf4f3e..897790889ba3 100644 --- a/kernel/sched/pelt.c +++ b/kernel/sched/pelt.c @@ -15,7 +15,7 @@ * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> * * Scaled math optimizations by Thomas Gleixner - * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de> + * Copyright (C) 2007, Linutronix GmbH, Thomas Gleixner <tglx@kernel.org> * * Adaptive scheduling granularity, math enhancements by Peter Zijlstra * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index a59bc75ab7c5..eaae1ce9f060 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -2,7 +2,7 @@ /* * This file contains functions which manage clock event devices. * - * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> + * Copyright(C) 2005-2006, Linutronix GmbH, Thomas Gleixner <tglx@kernel.org> * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner */ diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index f8ea8c8fc895..bdb30cc5e873 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> + * Copyright(C) 2005-2006, Linutronix GmbH, Thomas Gleixner <tglx@kernel.org> * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner * diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 0207868c8b4d..f63c65881364 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -3,7 +3,7 @@ * This file contains functions which emulate a local clock-event * device via a broadcast event source. * - * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> + * Copyright(C) 2005-2006, Linutronix GmbH, Thomas Gleixner <tglx@kernel.org> * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner */ diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 7e33d3f2e889..d305d8521896 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -3,7 +3,7 @@ * This file contains the base functions to manage periodic tick * related events. * - * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> + * Copyright(C) 2005-2006, Linutronix GmbH, Thomas Gleixner <tglx@kernel.org> * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner */ diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index ffee943d796d..7472597f3225 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c @@ -3,7 +3,7 @@ * This file contains functions which manage high resolution tick * related events. * - * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> + * Copyright(C) 2005-2006, Linutronix GmbH, Thomas Gleixner <tglx@kernel.org> * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner */ diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 8ddf74e705d3..2f8a7923fa27 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> + * Copyright(C) 2005-2006, Linutronix GmbH, Thomas Gleixner <tglx@kernel.org> * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner * diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 41c9f5d079be..630221b00838 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -3137,6 +3137,8 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, list) { list_del_init(&bpage->list); free_buffer_page(bpage); + + cond_resched(); } } out_err_unlock: diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 6f2148df14d9..baec63134ab6 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -138,7 +138,7 @@ cpumask_var_t __read_mostly tracing_buffer_mask; * by commas. */ /* Set to string format zero to disable by default */ -char ftrace_dump_on_oops[MAX_TRACER_SIZE] = "0"; +static char ftrace_dump_on_oops[MAX_TRACER_SIZE] = "0"; /* When set, tracing will stop when a WARN*() is hit */ static int __disable_trace_on_warning; @@ -3012,6 +3012,11 @@ static void __ftrace_trace_stack(struct trace_array *tr, struct ftrace_stack *fstack; struct stack_entry *entry; int stackidx; + int bit; + + bit = trace_test_and_set_recursion(_THIS_IP_, _RET_IP_, TRACE_EVENT_START); + if (bit < 0) + return; /* * Add one, for this function and the call to save_stack_trace() @@ -3080,6 +3085,7 @@ static void __ftrace_trace_stack(struct trace_array *tr, /* Again, don't let gcc optimize things here */ barrier(); __this_cpu_dec(ftrace_stack_reserve); + trace_clear_recursion(bit); } static inline void ftrace_trace_stack(struct trace_array *tr, diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 76067529db61..137b4d9bb116 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -826,16 +826,15 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file, * When soft_disable is set and enable is set, we want to * register the tracepoint for the event, but leave the event * as is. That means, if the event was already enabled, we do - * nothing (but set soft_mode). If the event is disabled, we - * set SOFT_DISABLED before enabling the event tracepoint, so - * it still seems to be disabled. + * nothing. If the event is disabled, we set SOFT_DISABLED + * before enabling the event tracepoint, so it still seems + * to be disabled. */ if (!soft_disable) clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); else { if (atomic_inc_return(&file->sm_ref) > 1) break; - soft_mode = true; /* Enable use of trace_buffered_event */ trace_buffered_event_enable(); } |
