diff options
| -rw-r--r-- | kernel/workqueue.c | 11 | 
1 files changed, 3 insertions, 8 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ae58c6ace23f..499a264183ef 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4979,6 +4979,9 @@ static void unbind_workers(int cpu)  		 * We've blocked all attach/detach operations. Make all workers  		 * unbound and set DISASSOCIATED.  Before this, all workers  		 * must be on the cpu.  After this, they may become diasporas. +		 * And the preemption disabled section in their sched callbacks +		 * are guaranteed to see WORKER_UNBOUND since the code here +		 * is on the same cpu.  		 */  		for_each_pool_worker(worker, pool)  			worker->flags |= WORKER_UNBOUND; @@ -4995,14 +4998,6 @@ static void unbind_workers(int cpu)  		mutex_unlock(&wq_pool_attach_mutex);  		/* -		 * Call schedule() so that we cross rq->lock and thus can -		 * guarantee sched callbacks see the %WORKER_UNBOUND flag. -		 * This is necessary as scheduler callbacks may be invoked -		 * from other cpus. -		 */ -		schedule(); - -		/*  		 * Sched callbacks are disabled now.  Zap nr_running.  		 * After this, nr_running stays zero and need_more_worker()  		 * and keep_working() are always true as long as the  | 
