summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorzhidao su <suzhidao@xiaomi.com>2026-03-09 10:46:12 +0800
committerTejun Heo <tj@kernel.org>2026-03-09 06:08:26 -1000
commit2fcfe5951eb2e8440fc5e1dd6ea977336ff83a1d (patch)
tree014b793cbe1f708060704a84863435b12e8a5877 /kernel
parent57ccf5ccdc56954f2a91a7f66684fd31c566bde5 (diff)
sched_ext: Use WRITE_ONCE() for the write side of scx_enable helper pointer
scx_enable() uses double-checked locking to lazily initialize a static kthread_worker pointer. The fast path reads helper locklessly: if (!READ_ONCE(helper)) { // lockless read -- no helper_mutex The write side initializes helper under helper_mutex, but previously used a plain assignment: helper = kthread_run_worker(0, "scx_enable_helper"); ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ plain write -- KCSAN data race with READ_ONCE() above Since READ_ONCE() on the fast path and the plain write on the initialization path access the same variable without a common lock, they constitute a data race. KCSAN requires that all sides of a lock-free access use READ_ONCE()/WRITE_ONCE() consistently. Use a temporary variable to stage the result of kthread_run_worker(), and only WRITE_ONCE() into helper after confirming the pointer is valid. This avoids a window where a concurrent caller on the fast path could observe an ERR pointer via READ_ONCE(helper) before the error check completes. Fixes: b06ccbabe250 ("sched_ext: Fix starvation of scx_enable() under fair-class saturation") Signed-off-by: zhidao su <suzhidao@xiaomi.com> Acked-by: Andrea Righi <arighi@nvidia.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/ext.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 174e3650d7fe..26a6ac2f8826 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -5258,13 +5258,14 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
if (!READ_ONCE(helper)) {
mutex_lock(&helper_mutex);
if (!helper) {
- helper = kthread_run_worker(0, "scx_enable_helper");
- if (IS_ERR_OR_NULL(helper)) {
- helper = NULL;
+ struct kthread_worker *w =
+ kthread_run_worker(0, "scx_enable_helper");
+ if (IS_ERR_OR_NULL(w)) {
mutex_unlock(&helper_mutex);
return -ENOMEM;
}
- sched_set_fifo(helper->task);
+ sched_set_fifo(w->task);
+ WRITE_ONCE(helper, w);
}
mutex_unlock(&helper_mutex);
}