diff options
| author | Michael Ellerman <mpe@ellerman.id.au> | 2022-10-31 17:37:21 +1100 |
|---|---|---|
| committer | Michael Ellerman <mpe@ellerman.id.au> | 2022-10-31 17:37:21 +1100 |
| commit | f98b4d13717a58b15c00880d2ccd24972316b430 (patch) | |
| tree | c3b50090ee3ef4b15b7874ae237d76710108674c /kernel/sched/sched.h | |
| parent | 65722736c3baf29e02e964a09e85c9ef71c48e8d (diff) | |
| parent | 247f34f7b80357943234f93f247a1ae6b6c3a740 (diff) | |
Merge tag 'v6.1-rc2' into fixes
Merge rc2 into our fixes branch, which was based on rc1 but wasn't
merged until rc3, so that for the remainder of the release our fixes
branch will be based on rc2 for the purposes of resolving conflicts with
other trees (if necessary).
Diffstat (limited to 'kernel/sched/sched.h')
| -rw-r--r-- | kernel/sched/sched.h | 32 |
1 files changed, 19 insertions, 13 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 1644242ecd11..a4a20046e586 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -938,6 +938,12 @@ struct uclamp_rq { DECLARE_STATIC_KEY_FALSE(sched_uclamp_used); #endif /* CONFIG_UCLAMP_TASK */ +struct rq; +struct balance_callback { + struct balance_callback *next; + void (*func)(struct rq *rq); +}; + /* * This is the main, per-CPU runqueue data structure. * @@ -1036,7 +1042,7 @@ struct rq { unsigned long cpu_capacity; unsigned long cpu_capacity_orig; - struct callback_head *balance_callback; + struct balance_callback *balance_callback; unsigned char nohz_idle_balance; unsigned char idle_balance; @@ -1182,6 +1188,14 @@ static inline bool is_migration_disabled(struct task_struct *p) #endif } +DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); + +#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) +#define this_rq() this_cpu_ptr(&runqueues) +#define task_rq(p) cpu_rq(task_cpu(p)) +#define cpu_curr(cpu) (cpu_rq(cpu)->curr) +#define raw_rq() raw_cpu_ptr(&runqueues) + struct sched_group; #ifdef CONFIG_SCHED_CORE static inline struct cpumask *sched_group_span(struct sched_group *sg); @@ -1269,7 +1283,7 @@ static inline bool sched_group_cookie_match(struct rq *rq, return true; for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) { - if (sched_core_cookie_match(rq, p)) + if (sched_core_cookie_match(cpu_rq(cpu), p)) return true; } return false; @@ -1384,14 +1398,6 @@ static inline void update_idle_core(struct rq *rq) static inline void update_idle_core(struct rq *rq) { } #endif -DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); - -#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) -#define this_rq() this_cpu_ptr(&runqueues) -#define task_rq(p) cpu_rq(task_cpu(p)) -#define cpu_curr(cpu) (cpu_rq(cpu)->curr) -#define raw_rq() raw_cpu_ptr(&runqueues) - #ifdef CONFIG_FAIR_GROUP_SCHED static inline struct task_struct *task_of(struct sched_entity *se) { @@ -1544,7 +1550,7 @@ struct rq_flags { #endif }; -extern struct callback_head balance_push_callback; +extern struct balance_callback balance_push_callback; /* * Lockdep annotation that avoids accidental unlocks; it's like a @@ -1724,7 +1730,7 @@ init_numa_balancing(unsigned long clone_flags, struct task_struct *p) static inline void queue_balance_callback(struct rq *rq, - struct callback_head *head, + struct balance_callback *head, void (*func)(struct rq *rq)) { lockdep_assert_rq_held(rq); @@ -1737,7 +1743,7 @@ queue_balance_callback(struct rq *rq, if (unlikely(head->next || rq->balance_callback == &balance_push_callback)) return; - head->func = (void (*)(struct callback_head *))func; + head->func = func; head->next = rq->balance_callback; rq->balance_callback = head; } |
