summaryrefslogtreecommitdiff
path: root/kernel/time
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/hrtimer.c4
-rw-r--r--kernel/time/namespace.c5
-rw-r--r--kernel/time/posix-cpu-timers.c4
-rw-r--r--kernel/time/posix-timers.c14
-rw-r--r--kernel/time/tick-oneshot.c20
-rw-r--r--kernel/time/tick-sched.c41
-rw-r--r--kernel/time/timekeeping.c25
-rw-r--r--kernel/time/timer.c9
-rw-r--r--kernel/time/timer_migration.c487
-rw-r--r--kernel/time/timer_migration.h2
10 files changed, 408 insertions, 203 deletions
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 88aa062b8a55..f8ea8c8fc895 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -2145,7 +2145,7 @@ static long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
int ret;
hrtimer_setup_sleeper_on_stack(&t, restart->nanosleep.clockid, HRTIMER_MODE_ABS);
- hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
+ hrtimer_set_expires(&t.timer, restart->nanosleep.expires);
ret = do_nanosleep(&t, HRTIMER_MODE_ABS);
destroy_hrtimer_on_stack(&t.timer);
return ret;
@@ -2172,7 +2172,7 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
restart = &current->restart_block;
restart->nanosleep.clockid = t.timer.base->clockid;
- restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
+ restart->nanosleep.expires = hrtimer_get_expires(&t.timer);
set_restart_fn(restart, hrtimer_nanosleep_restart);
out:
destroy_hrtimer_on_stack(&t.timer);
diff --git a/kernel/time/namespace.c b/kernel/time/namespace.c
index 5b6997f4dc3d..e76be24b132c 100644
--- a/kernel/time/namespace.c
+++ b/kernel/time/namespace.c
@@ -478,11 +478,8 @@ const struct proc_ns_operations timens_for_children_operations = {
};
struct time_namespace init_time_ns = {
- .ns.ns_type = ns_common_type(&init_time_ns),
- .ns.__ns_ref = REFCOUNT_INIT(3),
+ .ns = NS_COMMON_INIT(init_time_ns),
.user_ns = &init_user_ns,
- .ns.inum = ns_init_inum(&init_time_ns),
- .ns.ops = &timens_operations,
.frozen_offsets = true,
};
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 2e5b89d7d866..0de2bb7cbec0 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -1557,7 +1557,7 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
* Report back to the user the time still remaining.
*/
restart = &current->restart_block;
- restart->nanosleep.expires = expires;
+ restart->nanosleep.expires = ns_to_ktime(expires);
if (restart->nanosleep.type != TT_NONE)
error = nanosleep_copyout(restart, &it.it_value);
}
@@ -1599,7 +1599,7 @@ static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
clockid_t which_clock = restart_block->nanosleep.clockid;
struct timespec64 t;
- t = ns_to_timespec64(restart_block->nanosleep.expires);
+ t = ktime_to_timespec64(restart_block->nanosleep.expires);
return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
}
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index aa3120104a51..80a8a09a21a0 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -475,12 +475,6 @@ static int do_timer_create(clockid_t which_clock, struct sigevent *event,
if (!kc->timer_create)
return -EOPNOTSUPP;
- new_timer = alloc_posix_timer();
- if (unlikely(!new_timer))
- return -EAGAIN;
-
- spin_lock_init(&new_timer->it_lock);
-
/* Special case for CRIU to restore timers with a given timer ID. */
if (unlikely(current->signal->timer_create_restore_ids)) {
if (copy_from_user(&req_id, created_timer_id, sizeof(req_id)))
@@ -490,6 +484,12 @@ static int do_timer_create(clockid_t which_clock, struct sigevent *event,
return -EINVAL;
}
+ new_timer = alloc_posix_timer();
+ if (unlikely(!new_timer))
+ return -EAGAIN;
+
+ spin_lock_init(&new_timer->it_lock);
+
/*
* Add the timer to the hash table. The timer is not yet valid
* after insertion, but has a unique ID allocated.
@@ -1242,7 +1242,7 @@ SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
* sys_clock_settime(). The kernel internal timekeeping is always using
* nanoseconds precision independent of the clocksource device which is
* used to read the time from. The resolution of that device only
- * affects the presicion of the time returned by sys_clock_gettime().
+ * affects the precision of the time returned by sys_clock_gettime().
*
* Returns:
* 0 Success. @tp contains the resolution
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index 5e2c2c26b3cc..ffee943d796d 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -19,6 +19,10 @@
/**
* tick_program_event - program the CPU local timer device for the next event
+ * @expires: the time at which the next timer event should occur
+ * @force: flag to force reprograming even if the event time hasn't changed
+ *
+ * Return: 0 on success, negative error code on failure
*/
int tick_program_event(ktime_t expires, int force)
{
@@ -57,6 +61,13 @@ void tick_resume_oneshot(void)
/**
* tick_setup_oneshot - setup the event device for oneshot mode (hres or nohz)
+ * @newdev: Pointer to the clock event device to configure
+ * @handler: Function to be called when the event device triggers an interrupt
+ * @next_event: Initial expiry time for the next event (in ktime)
+ *
+ * Configures the specified clock event device for onshot mode,
+ * assigns the given handler as its event callback, and programs
+ * the device to trigger at the specified next event time.
*/
void tick_setup_oneshot(struct clock_event_device *newdev,
void (*handler)(struct clock_event_device *),
@@ -69,6 +80,10 @@ void tick_setup_oneshot(struct clock_event_device *newdev,
/**
* tick_switch_to_oneshot - switch to oneshot mode
+ * @handler: function to call when an event occurs on the tick device
+ *
+ * Return: 0 on success, -EINVAL if the tick device is not present,
+ * not functional, or does not support oneshot mode.
*/
int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *))
{
@@ -101,7 +116,7 @@ int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *))
/**
* tick_oneshot_mode_active - check whether the system is in oneshot mode
*
- * returns 1 when either nohz or highres are enabled. otherwise 0.
+ * Return: 1 when either nohz or highres are enabled, otherwise 0.
*/
int tick_oneshot_mode_active(void)
{
@@ -120,6 +135,9 @@ int tick_oneshot_mode_active(void)
* tick_init_highres - switch to high resolution mode
*
* Called with interrupts disabled.
+ *
+ * Return: 0 on success, -EINVAL if the tick device cannot switch
+ * to oneshot/high-resolution mode.
*/
int tick_init_highres(void)
{
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index c527b421c865..8ddf74e705d3 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -201,6 +201,27 @@ static inline void tick_sched_flag_clear(struct tick_sched *ts,
ts->flags &= ~flag;
}
+/*
+ * Allow only one non-timekeeper CPU at a time update jiffies from
+ * the timer tick.
+ *
+ * Returns true if update was run.
+ */
+static bool tick_limited_update_jiffies64(struct tick_sched *ts, ktime_t now)
+{
+ static atomic_t in_progress;
+ int inp;
+
+ inp = atomic_read(&in_progress);
+ if (inp || !atomic_try_cmpxchg(&in_progress, &inp, 1))
+ return false;
+
+ if (ts->last_tick_jiffies == jiffies)
+ tick_do_update_jiffies64(now);
+ atomic_set(&in_progress, 0);
+ return true;
+}
+
#define MAX_STALLED_JIFFIES 5
static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
@@ -239,10 +260,11 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
ts->stalled_jiffies = 0;
ts->last_tick_jiffies = READ_ONCE(jiffies);
} else {
- if (++ts->stalled_jiffies == MAX_STALLED_JIFFIES) {
- tick_do_update_jiffies64(now);
- ts->stalled_jiffies = 0;
- ts->last_tick_jiffies = READ_ONCE(jiffies);
+ if (++ts->stalled_jiffies >= MAX_STALLED_JIFFIES) {
+ if (tick_limited_update_jiffies64(ts, now)) {
+ ts->stalled_jiffies = 0;
+ ts->last_tick_jiffies = READ_ONCE(jiffies);
+ }
}
}
@@ -1152,16 +1174,15 @@ static bool report_idle_softirq(void)
return false;
}
- if (ratelimit >= 10)
- return false;
-
/* On RT, softirq handling may be waiting on some lock */
if (local_bh_blocked())
return false;
- pr_warn("NOHZ tick-stop error: local softirq work is pending, handler #%02x!!!\n",
- pending);
- ratelimit++;
+ if (ratelimit < 10) {
+ pr_warn("NOHZ tick-stop error: local softirq work is pending, handler #%02x!!!\n",
+ pending);
+ ratelimit++;
+ }
return true;
}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 3a4d3b2e3f74..4790da895203 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -3060,29 +3060,34 @@ static const struct attribute_group aux_clock_enable_attr_group = {
static int __init tk_aux_sysfs_init(void)
{
struct kobject *auxo, *tko = kobject_create_and_add("time", kernel_kobj);
+ int ret = -ENOMEM;
if (!tko)
- return -ENOMEM;
+ return ret;
auxo = kobject_create_and_add("aux_clocks", tko);
- if (!auxo) {
- kobject_put(tko);
- return -ENOMEM;
- }
+ if (!auxo)
+ goto err_clean;
for (int i = 0; i < MAX_AUX_CLOCKS; i++) {
char id[2] = { [0] = '0' + i, };
struct kobject *clk = kobject_create_and_add(id, auxo);
- if (!clk)
- return -ENOMEM;
-
- int ret = sysfs_create_group(clk, &aux_clock_enable_attr_group);
+ if (!clk) {
+ ret = -ENOMEM;
+ goto err_clean;
+ }
+ ret = sysfs_create_group(clk, &aux_clock_enable_attr_group);
if (ret)
- return ret;
+ goto err_clean;
}
return 0;
+
+err_clean:
+ kobject_put(auxo);
+ kobject_put(tko);
+ return ret;
}
late_initcall(tk_aux_sysfs_init);
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 553fa469d7cc..1f2364126894 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1458,10 +1458,11 @@ static int __try_to_del_timer_sync(struct timer_list *timer, bool shutdown)
base = lock_timer_base(timer, &flags);
- if (base->running_timer != timer)
+ if (base->running_timer != timer) {
ret = detach_if_pending(timer, base, true);
- if (shutdown)
- timer->function = NULL;
+ if (shutdown)
+ timer->function = NULL;
+ }
raw_spin_unlock_irqrestore(&base->lock, flags);
@@ -2472,7 +2473,7 @@ void update_process_times(int user_tick)
run_local_timers();
rcu_sched_clock_irq(user_tick);
#ifdef CONFIG_IRQ_WORK
- if (in_irq())
+ if (in_hardirq())
irq_work_tick();
#endif
sched_tick();
diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c
index c0c54dc5314c..18dda1aa782d 100644
--- a/kernel/time/timer_migration.c
+++ b/kernel/time/timer_migration.c
@@ -10,6 +10,7 @@
#include <linux/spinlock.h>
#include <linux/timerqueue.h>
#include <trace/events/ipi.h>
+#include <linux/sched/isolation.h>
#include "timer_migration.h"
#include "tick-internal.h"
@@ -420,14 +421,54 @@ static struct list_head *tmigr_level_list __read_mostly;
static unsigned int tmigr_hierarchy_levels __read_mostly;
static unsigned int tmigr_crossnode_level __read_mostly;
+static struct tmigr_group *tmigr_root;
+
static DEFINE_PER_CPU(struct tmigr_cpu, tmigr_cpu);
+/*
+ * CPUs available for timer migration.
+ * Protected by cpuset_mutex (with cpus_read_lock held) or cpus_write_lock.
+ * Additionally tmigr_available_mutex serializes set/clear operations with each other.
+ */
+static cpumask_var_t tmigr_available_cpumask;
+static DEFINE_MUTEX(tmigr_available_mutex);
+
+/* Enabled during late initcall */
+static DEFINE_STATIC_KEY_FALSE(tmigr_exclude_isolated);
+
#define TMIGR_NONE 0xFF
#define BIT_CNT 8
static inline bool tmigr_is_not_available(struct tmigr_cpu *tmc)
{
- return !(tmc->tmgroup && tmc->online);
+ return !(tmc->tmgroup && tmc->available);
+}
+
+/*
+ * Returns true if @cpu should be excluded from the hierarchy as isolated.
+ * Domain isolated CPUs don't participate in timer migration, nohz_full CPUs
+ * are still part of the hierarchy but become idle (from a tick and timer
+ * migration perspective) when they stop their tick. This lets the timekeeping
+ * CPU handle their global timers. Marking also isolated CPUs as idle would be
+ * too costly, hence they are completely excluded from the hierarchy.
+ * This check is necessary, for instance, to prevent offline isolated CPUs from
+ * being incorrectly marked as available once getting back online.
+ *
+ * This function returns false during early boot and the isolation logic is
+ * enabled only after isolated CPUs are marked as unavailable at late boot.
+ * The tick CPU can be isolated at boot, however we cannot mark it as
+ * unavailable to avoid having no global migrator for the nohz_full CPUs. This
+ * should be ensured by the callers of this function: implicitly from hotplug
+ * callbacks and explicitly in tmigr_init_isolation() and
+ * tmigr_isolated_exclude_cpumask().
+ */
+static inline bool tmigr_is_isolated(int cpu)
+{
+ if (!static_branch_unlikely(&tmigr_exclude_isolated))
+ return false;
+ return (!housekeeping_cpu(cpu, HK_TYPE_DOMAIN) ||
+ cpuset_cpu_is_isolated(cpu)) &&
+ housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE);
}
/*
@@ -502,11 +543,6 @@ static bool tmigr_check_lonely(struct tmigr_group *group)
* @now: timer base monotonic
* @check: is set if there is the need to handle remote timers;
* required in tmigr_requires_handle_remote() only
- * @tmc_active: this flag indicates, whether the CPU which triggers
- * the hierarchy walk is !idle in the timer migration
- * hierarchy. When the CPU is idle and the whole hierarchy is
- * idle, only the first event of the top level has to be
- * considered.
*/
struct tmigr_walk {
u64 nextexp;
@@ -517,16 +553,13 @@ struct tmigr_walk {
unsigned long basej;
u64 now;
bool check;
- bool tmc_active;
};
typedef bool (*up_f)(struct tmigr_group *, struct tmigr_group *, struct tmigr_walk *);
-static void __walk_groups(up_f up, struct tmigr_walk *data,
- struct tmigr_cpu *tmc)
+static void __walk_groups_from(up_f up, struct tmigr_walk *data,
+ struct tmigr_group *child, struct tmigr_group *group)
{
- struct tmigr_group *child = NULL, *group = tmc->tmgroup;
-
do {
WARN_ON_ONCE(group->level >= tmigr_hierarchy_levels);
@@ -544,6 +577,12 @@ static void __walk_groups(up_f up, struct tmigr_walk *data,
} while (group);
}
+static void __walk_groups(up_f up, struct tmigr_walk *data,
+ struct tmigr_cpu *tmc)
+{
+ __walk_groups_from(up, data, NULL, tmc->tmgroup);
+}
+
static void walk_groups(up_f up, struct tmigr_walk *data, struct tmigr_cpu *tmc)
{
lockdep_assert_held(&tmc->lock);
@@ -708,7 +747,7 @@ void tmigr_cpu_activate(void)
/*
* Returns true, if there is nothing to be propagated to the next level
*
- * @data->firstexp is set to expiry of first gobal event of the (top level of
+ * @data->firstexp is set to expiry of first global event of the (top level of
* the) hierarchy, but only when hierarchy is completely idle.
*
* The child and group states need to be read under the lock, to prevent a race
@@ -926,7 +965,7 @@ static void tmigr_handle_remote_cpu(unsigned int cpu, u64 now,
* updated the event takes care when hierarchy is completely
* idle. Otherwise the migrator does it as the event is enqueued.
*/
- if (!tmc->online || tmc->remote || tmc->cpuevt.ignore ||
+ if (!tmc->available || tmc->remote || tmc->cpuevt.ignore ||
now < tmc->cpuevt.nextevt.expires) {
raw_spin_unlock_irq(&tmc->lock);
return;
@@ -973,7 +1012,7 @@ static void tmigr_handle_remote_cpu(unsigned int cpu, u64 now,
* (See also section "Required event and timerqueue update after a
* remote expiry" in the documentation at the top)
*/
- if (!tmc->online || !tmc->idle) {
+ if (!tmc->available || !tmc->idle) {
timer_unlock_remote_bases(cpu);
goto unlock;
}
@@ -1113,15 +1152,6 @@ static bool tmigr_requires_handle_remote_up(struct tmigr_group *group,
*/
if (!tmigr_check_migrator(group, childmask))
return true;
-
- /*
- * When there is a parent group and the CPU which triggered the
- * hierarchy walk is not active, proceed the walk to reach the top level
- * group before reading the next_expiry value.
- */
- if (group->parent && !data->tmc_active)
- return false;
-
/*
* The lock is required on 32bit architectures to read the variable
* consistently with a concurrent writer. On 64bit the lock is not
@@ -1166,7 +1196,6 @@ bool tmigr_requires_handle_remote(void)
data.now = get_jiffies_update(&jif);
data.childmask = tmc->groupmask;
data.firstexp = KTIME_MAX;
- data.tmc_active = !tmc->idle;
data.check = false;
/*
@@ -1432,38 +1461,43 @@ static long tmigr_trigger_active(void *unused)
{
struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
- WARN_ON_ONCE(!tmc->online || tmc->idle);
+ WARN_ON_ONCE(!tmc->available || tmc->idle);
return 0;
}
-static int tmigr_cpu_offline(unsigned int cpu)
+static int tmigr_clear_cpu_available(unsigned int cpu)
{
struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
int migrator;
u64 firstexp;
- raw_spin_lock_irq(&tmc->lock);
- tmc->online = false;
- WRITE_ONCE(tmc->wakeup, KTIME_MAX);
+ guard(mutex)(&tmigr_available_mutex);
- /*
- * CPU has to handle the local events on his own, when on the way to
- * offline; Therefore nextevt value is set to KTIME_MAX
- */
- firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX);
- trace_tmigr_cpu_offline(tmc);
- raw_spin_unlock_irq(&tmc->lock);
+ cpumask_clear_cpu(cpu, tmigr_available_cpumask);
+ scoped_guard(raw_spinlock_irq, &tmc->lock) {
+ if (!tmc->available)
+ return 0;
+ tmc->available = false;
+ WRITE_ONCE(tmc->wakeup, KTIME_MAX);
+
+ /*
+ * CPU has to handle the local events on his own, when on the way to
+ * offline; Therefore nextevt value is set to KTIME_MAX
+ */
+ firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX);
+ trace_tmigr_cpu_unavailable(tmc);
+ }
if (firstexp != KTIME_MAX) {
- migrator = cpumask_any_but(cpu_online_mask, cpu);
+ migrator = cpumask_any(tmigr_available_cpumask);
work_on_cpu(migrator, tmigr_trigger_active, NULL);
}
return 0;
}
-static int tmigr_cpu_online(unsigned int cpu)
+static int tmigr_set_cpu_available(unsigned int cpu)
{
struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
@@ -1471,16 +1505,123 @@ static int tmigr_cpu_online(unsigned int cpu)
if (WARN_ON_ONCE(!tmc->tmgroup))
return -EINVAL;
- raw_spin_lock_irq(&tmc->lock);
- trace_tmigr_cpu_online(tmc);
- tmc->idle = timer_base_is_idle();
- if (!tmc->idle)
- __tmigr_cpu_activate(tmc);
- tmc->online = true;
- raw_spin_unlock_irq(&tmc->lock);
+ if (tmigr_is_isolated(cpu))
+ return 0;
+
+ guard(mutex)(&tmigr_available_mutex);
+
+ cpumask_set_cpu(cpu, tmigr_available_cpumask);
+ scoped_guard(raw_spinlock_irq, &tmc->lock) {
+ if (tmc->available)
+ return 0;
+ trace_tmigr_cpu_available(tmc);
+ tmc->idle = timer_base_is_idle();
+ if (!tmc->idle)
+ __tmigr_cpu_activate(tmc);
+ tmc->available = true;
+ }
return 0;
}
+static void tmigr_cpu_isolate(struct work_struct *ignored)
+{
+ tmigr_clear_cpu_available(smp_processor_id());
+}
+
+static void tmigr_cpu_unisolate(struct work_struct *ignored)
+{
+ tmigr_set_cpu_available(smp_processor_id());
+}
+
+/**
+ * tmigr_isolated_exclude_cpumask - Exclude given CPUs from hierarchy
+ * @exclude_cpumask: the cpumask to be excluded from timer migration hierarchy
+ *
+ * This function can be called from cpuset code to provide the new set of
+ * isolated CPUs that should be excluded from the hierarchy.
+ * Online CPUs not present in exclude_cpumask but already excluded are brought
+ * back to the hierarchy.
+ * Functions to isolate/unisolate need to be called locally and can sleep.
+ */
+int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask)
+{
+ struct work_struct __percpu *works __free(free_percpu) =
+ alloc_percpu(struct work_struct);
+ cpumask_var_t cpumask __free(free_cpumask_var) = CPUMASK_VAR_NULL;
+ int cpu;
+
+ lockdep_assert_cpus_held();
+
+ if (!works)
+ return -ENOMEM;
+ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
+ return -ENOMEM;
+
+ /*
+ * First set previously isolated CPUs as available (unisolate).
+ * This cpumask contains only CPUs that switched to available now.
+ */
+ cpumask_andnot(cpumask, cpu_online_mask, exclude_cpumask);
+ cpumask_andnot(cpumask, cpumask, tmigr_available_cpumask);
+
+ for_each_cpu(cpu, cpumask) {
+ struct work_struct *work = per_cpu_ptr(works, cpu);
+
+ INIT_WORK(work, tmigr_cpu_unisolate);
+ schedule_work_on(cpu, work);
+ }
+ for_each_cpu(cpu, cpumask)
+ flush_work(per_cpu_ptr(works, cpu));
+
+ /*
+ * Then clear previously available CPUs (isolate).
+ * This cpumask contains only CPUs that switched to not available now.
+ * There cannot be overlap with the newly available ones.
+ */
+ cpumask_and(cpumask, exclude_cpumask, tmigr_available_cpumask);
+ cpumask_and(cpumask, cpumask, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE));
+ /*
+ * Handle this here and not in the cpuset code because exclude_cpumask
+ * might include also the tick CPU if included in isolcpus.
+ */
+ for_each_cpu(cpu, cpumask) {
+ if (!tick_nohz_cpu_hotpluggable(cpu)) {
+ cpumask_clear_cpu(cpu, cpumask);
+ break;
+ }
+ }
+
+ for_each_cpu(cpu, cpumask) {
+ struct work_struct *work = per_cpu_ptr(works, cpu);
+
+ INIT_WORK(work, tmigr_cpu_isolate);
+ schedule_work_on(cpu, work);
+ }
+ for_each_cpu(cpu, cpumask)
+ flush_work(per_cpu_ptr(works, cpu));
+
+ return 0;
+}
+
+static int __init tmigr_init_isolation(void)
+{
+ cpumask_var_t cpumask __free(free_cpumask_var) = CPUMASK_VAR_NULL;
+
+ static_branch_enable(&tmigr_exclude_isolated);
+
+ if (!housekeeping_enabled(HK_TYPE_DOMAIN))
+ return 0;
+ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_andnot(cpumask, cpu_possible_mask, housekeeping_cpumask(HK_TYPE_DOMAIN));
+
+ /* Protect against RCU torture hotplug testing */
+ guard(cpus_read_lock)();
+ return tmigr_isolated_exclude_cpumask(cpumask);
+}
+late_initcall(tmigr_init_isolation);
+
static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl,
int node)
{
@@ -1498,21 +1639,6 @@ static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl,
s.seq = 0;
atomic_set(&group->migr_state, s.state);
- /*
- * If this is a new top-level, prepare its groupmask in advance.
- * This avoids accidents where yet another new top-level is
- * created in the future and made visible before the current groupmask.
- */
- if (list_empty(&tmigr_level_list[lvl])) {
- group->groupmask = BIT(0);
- /*
- * The previous top level has prepared its groupmask already,
- * simply account it as the first child.
- */
- if (lvl > 0)
- group->num_children = 1;
- }
-
timerqueue_init_head(&group->events);
timerqueue_init(&group->groupevt.nextevt);
group->groupevt.nextevt.expires = KTIME_MAX;
@@ -1520,8 +1646,7 @@ static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl,
group->groupevt.ignore = true;
}
-static struct tmigr_group *tmigr_get_group(unsigned int cpu, int node,
- unsigned int lvl)
+static struct tmigr_group *tmigr_get_group(int node, unsigned int lvl)
{
struct tmigr_group *tmp, *group = NULL;
@@ -1567,25 +1692,51 @@ static struct tmigr_group *tmigr_get_group(unsigned int cpu, int node,
return group;
}
+static bool tmigr_init_root(struct tmigr_group *group, bool activate)
+{
+ if (!group->parent && group != tmigr_root) {
+ /*
+ * This is the new top-level, prepare its groupmask in advance
+ * to avoid accidents where yet another new top-level is
+ * created in the future and made visible before this groupmask.
+ */
+ group->groupmask = BIT(0);
+ WARN_ON_ONCE(activate);
+
+ return true;
+ }
+
+ return false;
+
+}
+
static void tmigr_connect_child_parent(struct tmigr_group *child,
struct tmigr_group *parent,
bool activate)
{
- struct tmigr_walk data;
-
- raw_spin_lock_irq(&child->lock);
- raw_spin_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING);
+ if (tmigr_init_root(parent, activate)) {
+ /*
+ * The previous top level had prepared its groupmask already,
+ * simply account it in advance as the first child. If some groups
+ * have been created between the old and new root due to node
+ * mismatch, the new root's child will be intialized accordingly.
+ */
+ parent->num_children = 1;
+ }
- if (activate) {
+ /* Connecting old root to new root ? */
+ if (!parent->parent && activate) {
/*
- * @child is the old top and @parent the new one. In this
- * case groupmask is pre-initialized and @child already
- * accounted, along with its new sibling corresponding to the
- * CPU going up.
+ * @child is the old top, or in case of node mismatch, some
+ * intermediate group between the old top and the new one in
+ * @parent. In this case the @child must be pre-accounted above
+ * as the first child. Its new inactive sibling corresponding
+ * to the CPU going up has been accounted as the second child.
*/
- WARN_ON_ONCE(child->groupmask != BIT(0) || parent->num_children != 2);
+ WARN_ON_ONCE(parent->num_children != 2);
+ child->groupmask = BIT(0);
} else {
- /* Adding @child for the CPU going up to @parent. */
+ /* Common case adding @child for the CPU going up to @parent. */
child->groupmask = BIT(parent->num_children++);
}
@@ -1596,87 +1747,61 @@ static void tmigr_connect_child_parent(struct tmigr_group *child,
*/
smp_store_release(&child->parent, parent);
- raw_spin_unlock(&parent->lock);
- raw_spin_unlock_irq(&child->lock);
-
trace_tmigr_connect_child_parent(child);
-
- if (!activate)
- return;
-
- /*
- * To prevent inconsistent states, active children need to be active in
- * the new parent as well. Inactive children are already marked inactive
- * in the parent group:
- *
- * * When new groups were created by tmigr_setup_groups() starting from
- * the lowest level (and not higher then one level below the current
- * top level), then they are not active. They will be set active when
- * the new online CPU comes active.
- *
- * * But if a new group above the current top level is required, it is
- * mandatory to propagate the active state of the already existing
- * child to the new parent. So tmigr_connect_child_parent() is
- * executed with the formerly top level group (child) and the newly
- * created group (parent).
- *
- * * It is ensured that the child is active, as this setup path is
- * executed in hotplug prepare callback. This is exectued by an
- * already connected and !idle CPU. Even if all other CPUs go idle,
- * the CPU executing the setup will be responsible up to current top
- * level group. And the next time it goes inactive, it will release
- * the new childmask and parent to subsequent walkers through this
- * @child. Therefore propagate active state unconditionally.
- */
- data.childmask = child->groupmask;
-
- /*
- * There is only one new level per time (which is protected by
- * tmigr_mutex). When connecting the child and the parent and set the
- * child active when the parent is inactive, the parent needs to be the
- * uppermost level. Otherwise there went something wrong!
- */
- WARN_ON(!tmigr_active_up(parent, child, &data) && parent->parent);
}
-static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
+static int tmigr_setup_groups(unsigned int cpu, unsigned int node,
+ struct tmigr_group *start, bool activate)
{
struct tmigr_group *group, *child, **stack;
- int top = 0, err = 0, i = 0;
- struct list_head *lvllist;
+ int i, top = 0, err = 0, start_lvl = 0;
+ bool root_mismatch = false;
stack = kcalloc(tmigr_hierarchy_levels, sizeof(*stack), GFP_KERNEL);
if (!stack)
return -ENOMEM;
- do {
- group = tmigr_get_group(cpu, node, i);
+ if (start) {
+ stack[start->level] = start;
+ start_lvl = start->level + 1;
+ }
+
+ if (tmigr_root)
+ root_mismatch = tmigr_root->numa_node != node;
+
+ for (i = start_lvl; i < tmigr_hierarchy_levels; i++) {
+ group = tmigr_get_group(node, i);
if (IS_ERR(group)) {
err = PTR_ERR(group);
+ i--;
break;
}
top = i;
- stack[i++] = group;
+ stack[i] = group;
/*
* When booting only less CPUs of a system than CPUs are
- * available, not all calculated hierarchy levels are required.
+ * available, not all calculated hierarchy levels are required,
+ * unless a node mismatch is detected.
*
* The loop is aborted as soon as the highest level, which might
* be different from tmigr_hierarchy_levels, contains only a
- * single group.
+ * single group, unless the nodes mismatch below tmigr_crossnode_level
*/
- if (group->parent || list_is_singular(&tmigr_level_list[i - 1]))
+ if (group->parent)
break;
+ if ((!root_mismatch || i >= tmigr_crossnode_level) &&
+ list_is_singular(&tmigr_level_list[i]))
+ break;
+ }
- } while (i < tmigr_hierarchy_levels);
-
- /* Assert single root */
- WARN_ON_ONCE(!err && !group->parent && !list_is_singular(&tmigr_level_list[top]));
+ /* Assert single root without parent */
+ if (WARN_ON_ONCE(i >= tmigr_hierarchy_levels))
+ return -EINVAL;
- while (i > 0) {
- group = stack[--i];
+ for (; i >= start_lvl; i--) {
+ group = stack[i];
if (err < 0) {
list_del(&group->list);
@@ -1692,12 +1817,10 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
if (i == 0) {
struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu);
- raw_spin_lock_irq(&group->lock);
-
tmc->tmgroup = group;
tmc->groupmask = BIT(group->num_children++);
- raw_spin_unlock_irq(&group->lock);
+ tmigr_init_root(group, activate);
trace_tmigr_connect_cpu_parent(tmc);
@@ -1705,42 +1828,58 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
continue;
} else {
child = stack[i - 1];
- /* Will be activated at online time */
- tmigr_connect_child_parent(child, group, false);
+ tmigr_connect_child_parent(child, group, activate);
}
+ }
- /* check if uppermost level was newly created */
- if (top != i)
- continue;
-
- WARN_ON_ONCE(top == 0);
+ if (err < 0)
+ goto out;
- lvllist = &tmigr_level_list[top];
+ if (activate) {
+ struct tmigr_walk data;
+ union tmigr_state state;
/*
- * Newly created root level should have accounted the upcoming
- * CPU's child group and pre-accounted the old root.
+ * To prevent inconsistent states, active children need to be active in
+ * the new parent as well. Inactive children are already marked inactive
+ * in the parent group:
+ *
+ * * When new groups were created by tmigr_setup_groups() starting from
+ * the lowest level, then they are not active. They will be set active
+ * when the new online CPU comes active.
+ *
+ * * But if new groups above the current top level are required, it is
+ * mandatory to propagate the active state of the already existing
+ * child to the new parents. So tmigr_active_up() activates the
+ * new parents while walking up from the old root to the new.
+ *
+ * * It is ensured that @start is active, as this setup path is
+ * executed in hotplug prepare callback. This is executed by an
+ * already connected and !idle CPU. Even if all other CPUs go idle,
+ * the CPU executing the setup will be responsible up to current top
+ * level group. And the next time it goes inactive, it will release
+ * the new childmask and parent to subsequent walkers through this
+ * @child. Therefore propagate active state unconditionally.
*/
- if (group->num_children == 2 && list_is_singular(lvllist)) {
- /*
- * The target CPU must never do the prepare work, except
- * on early boot when the boot CPU is the target. Otherwise
- * it may spuriously activate the old top level group inside
- * the new one (nevertheless whether old top level group is
- * active or not) and/or release an uninitialized childmask.
- */
- WARN_ON_ONCE(cpu == raw_smp_processor_id());
-
- lvllist = &tmigr_level_list[top - 1];
- list_for_each_entry(child, lvllist, list) {
- if (child->parent)
- continue;
+ state.state = atomic_read(&start->migr_state);
+ WARN_ON_ONCE(!state.active);
+ WARN_ON_ONCE(!start->parent);
+ data.childmask = start->groupmask;
+ __walk_groups_from(tmigr_active_up, &data, start, start->parent);
+ }
- tmigr_connect_child_parent(child, group, true);
- }
+ /* Root update */
+ if (list_is_singular(&tmigr_level_list[top])) {
+ group = list_first_entry(&tmigr_level_list[top],
+ typeof(*group), list);
+ WARN_ON_ONCE(group->parent);
+ if (tmigr_root) {
+ /* Old root should be the same or below */
+ WARN_ON_ONCE(tmigr_root->level > top);
}
+ tmigr_root = group;
}
-
+out:
kfree(stack);
return err;
@@ -1748,12 +1887,31 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
static int tmigr_add_cpu(unsigned int cpu)
{
+ struct tmigr_group *old_root = tmigr_root;
int node = cpu_to_node(cpu);
int ret;
- mutex_lock(&tmigr_mutex);
- ret = tmigr_setup_groups(cpu, node);
- mutex_unlock(&tmigr_mutex);
+ guard(mutex)(&tmigr_mutex);
+
+ ret = tmigr_setup_groups(cpu, node, NULL, false);
+
+ /* Root has changed? Connect the old one to the new */
+ if (ret >= 0 && old_root && old_root != tmigr_root) {
+ /*
+ * The target CPU must never do the prepare work, except
+ * on early boot when the boot CPU is the target. Otherwise
+ * it may spuriously activate the old top level group inside
+ * the new one (nevertheless whether old top level group is
+ * active or not) and/or release an uninitialized childmask.
+ */
+ WARN_ON_ONCE(cpu == raw_smp_processor_id());
+ /*
+ * The (likely) current CPU is expected to be online in the hierarchy,
+ * otherwise the old root may not be active as expected.
+ */
+ WARN_ON_ONCE(!per_cpu_ptr(&tmigr_cpu, raw_smp_processor_id())->available);
+ ret = tmigr_setup_groups(-1, old_root->numa_node, old_root, true);
+ }
return ret;
}
@@ -1798,6 +1956,11 @@ static int __init tmigr_init(void)
if (ncpus == 1)
return 0;
+ if (!zalloc_cpumask_var(&tmigr_available_cpumask, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
/*
* Calculate the required hierarchy levels. Unfortunately there is no
* reliable information available, unless all possible CPUs have been
@@ -1847,7 +2010,7 @@ static int __init tmigr_init(void)
goto err;
ret = cpuhp_setup_state(CPUHP_AP_TMIGR_ONLINE, "tmigr:online",
- tmigr_cpu_online, tmigr_cpu_offline);
+ tmigr_set_cpu_available, tmigr_clear_cpu_available);
if (ret)
goto err;
diff --git a/kernel/time/timer_migration.h b/kernel/time/timer_migration.h
index ae19f70f8170..70879cde6fdd 100644
--- a/kernel/time/timer_migration.h
+++ b/kernel/time/timer_migration.h
@@ -97,7 +97,7 @@ struct tmigr_group {
*/
struct tmigr_cpu {
raw_spinlock_t lock;
- bool online;
+ bool available;
bool idle;
bool remote;
struct tmigr_group *tmgroup;