diff options
| author | Tejun Heo <tj@kernel.org> | 2025-11-03 11:57:11 -1000 |
|---|---|---|
| committer | Tejun Heo <tj@kernel.org> | 2025-11-03 11:57:26 -1000 |
| commit | 587eb08a5fef911856c16c8bb444b2ab7f4f207f (patch) | |
| tree | a5e73c037b5e6f5a6b56f613004cb82009eb5d48 /kernel/sched | |
| parent | 34423456443c5cf5ad5180155f7d40f96b836194 (diff) | |
| parent | d245698d727ab8f5420b3e28d1243f96a5234851 (diff) | |
sched_ext: Merge branch 'for-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup into for-6.19
Pull cgroup/for-6.19 to receive:
16dad7801aad ("cgroup: Rename cgroup lifecycle hooks to cgroup_task_*()")
260fbcb92bbe ("cgroup: Move dying_tasks cleanup from cgroup_task_release() to cgroup_task_free()")
d245698d727a ("cgroup: Defer task cgroup unlink until after the task is done switching out")
These are needed for the sched_ext cgroup exit ordering fix.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/sched')
| -rw-r--r-- | kernel/sched/autogroup.c | 4 | ||||
| -rw-r--r-- | kernel/sched/core.c | 4 | ||||
| -rw-r--r-- | kernel/sched/deadline.c | 3 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 26 |
4 files changed, 22 insertions, 15 deletions
diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c index cdea931aae30..954137775f38 100644 --- a/kernel/sched/autogroup.c +++ b/kernel/sched/autogroup.c @@ -178,8 +178,8 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag) * this process can already run with task_group() == prev->tg or we can * race with cgroup code which can read autogroup = prev under rq->lock. * In the latter case for_each_thread() can not miss a migrating thread, - * cpu_cgroup_attach() must not be possible after cgroup_exit() and it - * can't be removed from thread list, we hold ->siglock. + * cpu_cgroup_attach() must not be possible after cgroup_task_exit() + * and it can't be removed from thread list, we hold ->siglock. * * If an exiting thread was already removed from thread list we rely on * sched_autogroup_exit_task(). diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 096e8d03d85e..0324457622d7 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5151,6 +5151,8 @@ static struct rq *finish_task_switch(struct task_struct *prev) if (prev->sched_class->task_dead) prev->sched_class->task_dead(prev); + cgroup_task_dead(prev); + /* Task is done with its stack. */ put_task_stack(prev); @@ -8474,10 +8476,12 @@ int sched_cpu_dying(unsigned int cpu) sched_tick_stop(cpu); rq_lock_irqsave(rq, &rf); + update_rq_clock(rq); if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) { WARN(true, "Dying CPU not properly vacated!"); dump_rq_tasks(rq, KERN_WARNING); } + dl_server_stop(&rq->fair_server); rq_unlock_irqrestore(rq, &rf); calc_load_migrate(rq); diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 48357d4609bf..6b8a9286e2fc 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1582,6 +1582,9 @@ void dl_server_start(struct sched_dl_entity *dl_se) if (!dl_server(dl_se) || dl_se->dl_server_active) return; + if (WARN_ON_ONCE(!cpu_online(cpu_of(rq)))) + return; + dl_se->dl_server_active = 1; enqueue_dl_entity(dl_se, ENQUEUE_WAKEUP); if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &rq->curr->dl)) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 2554055c1ba1..b58e696d7ccc 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8911,21 +8911,21 @@ simple: return p; idle: - if (!rf) - return NULL; - - new_tasks = sched_balance_newidle(rq, rf); + if (rf) { + new_tasks = sched_balance_newidle(rq, rf); - /* - * Because sched_balance_newidle() releases (and re-acquires) rq->lock, it is - * possible for any higher priority task to appear. In that case we - * must re-start the pick_next_entity() loop. - */ - if (new_tasks < 0) - return RETRY_TASK; + /* + * Because sched_balance_newidle() releases (and re-acquires) + * rq->lock, it is possible for any higher priority task to + * appear. In that case we must re-start the pick_next_entity() + * loop. + */ + if (new_tasks < 0) + return RETRY_TASK; - if (new_tasks > 0) - goto again; + if (new_tasks > 0) + goto again; + } /* * rq is about to be idle, check if we need to update the |
