summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-01-24 09:29:41 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2026-01-24 09:29:41 -0800
commitaf5a3fae860df6d065d796810a3e3a03fbb6f895 (patch)
tree18452b33f14627955fd326347865a1218e27db26 /kernel
parentceaeaf66a21f87febed73ec2051f9384390efe2f (diff)
parent15257cc2f905dbf5813c0bfdd3c15885f28093c4 (diff)
Merge tag 'sched-urgent-2026-01-24' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: - Fix PELT clock synchronization bug when entering idle - Disable the NEXT_BUDDY feature, as during extensive testing Mel found that the negatives outweigh the positives - Make wakeup preemption less aggressive, which resulted in an unreasonable increase in preemption frequency * tag 'sched-urgent-2026-01-24' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/fair: Revert force wakeup preemption sched/fair: Disable scheduler feature NEXT_BUDDY sched/fair: Fix pelt clock sync when entering idle
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c16
-rw-r--r--kernel/sched/features.h2
-rw-r--r--kernel/sched/idle.c6
3 files changed, 7 insertions, 17 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e71302282671..3eaeceda71b0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8828,16 +8828,6 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
if ((wake_flags & WF_FORK) || pse->sched_delayed)
return;
- /*
- * If @p potentially is completing work required by current then
- * consider preemption.
- *
- * Reschedule if waker is no longer eligible. */
- if (in_task() && !entity_eligible(cfs_rq, se)) {
- preempt_action = PREEMPT_WAKEUP_RESCHED;
- goto preempt;
- }
-
/* Prefer picking wakee soon if appropriate. */
if (sched_feat(NEXT_BUDDY) &&
set_preempt_buddy(cfs_rq, wake_flags, pse, se)) {
@@ -8995,12 +8985,6 @@ idle:
goto again;
}
- /*
- * rq is about to be idle, check if we need to update the
- * lost_idle_time of clock_pelt
- */
- update_idle_rq_clock_pelt(rq);
-
return NULL;
}
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 980d92bab8ab..136a6584be79 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -29,7 +29,7 @@ SCHED_FEAT(PREEMPT_SHORT, true)
* wakeup-preemption), since its likely going to consume data we
* touched, increases cache locality.
*/
-SCHED_FEAT(NEXT_BUDDY, true)
+SCHED_FEAT(NEXT_BUDDY, false)
/*
* Allow completely ignoring cfs_rq->next; which can be set from various
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index c174afe1dd17..abf8f15d60c9 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -468,6 +468,12 @@ static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool fir
scx_update_idle(rq, true, true);
schedstat_inc(rq->sched_goidle);
next->se.exec_start = rq_clock_task(rq);
+
+ /*
+ * rq is about to be idle, check if we need to update the
+ * lost_idle_time of clock_pelt
+ */
+ update_idle_rq_clock_pelt(rq);
}
struct task_struct *pick_task_idle(struct rq *rq, struct rq_flags *rf)