summaryrefslogtreecommitdiff
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2004-02-18 04:47:11 -0800
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-02-18 04:47:11 -0800
commit933ba10234f68f44af3a72afb3c0810b082c5a0e (patch)
tree7888eebd491a842cc34d6b9704d0c4565301ddfa /kernel/workqueue.c
parentad77865cc25cd19c1516f2f22d845402d6d88f8d (diff)
[PATCH] kthread primitive
From: Rusty Russell <rusty@rustcorp.com.au> These two patches provide the framework for stopping kernel threads to allow hotplug CPU. This one just adds kthread.c and kthread.h, next one uses it. Most importantly, adds a Monty Python quote to the kernel. Details: The hotplug CPU code introduces two major problems: 1) Threads which previously never stopped (migration thread, ksoftirqd, keventd) have to be stopped cleanly as CPUs go offline. 2) Threads which previously never had to be created now have to be created when a CPU goes online. Unfortunately, stopping a thread is fairly baroque, involving memory barriers, a completion and spinning until the task is actually dead (for example, complete_and_exit() must be used if inside a module). There are also three problems in starting a thread: 1) Doing it from a random process context risks environment contamination: better to do it from keventd to guarantee a clean environment, a-la call_usermodehelper. 2) Getting the task struct without races is a hard: see kernel/sched.c migration_call(), kernel/workqueue.c create_workqueue_thread(). 3) There are races in starting a thread for a CPU which is not yet online: migration thread does a complex dance at the moment for a similar reason (there may be no migration thread to migrate us). Place all this logic in some primitives to make life easier: kthread_create() and kthread_stop(). These primitives require no extra data-structures in the caller: they operate on normal "struct task_struct"s. Other changes: - Expose keventd_up(), as keventd and migration threads will use kthread to launch, and kthread normally uses workqueues and must recognize this case. - Kthreads created at boot before "keventd" are spawned directly. However, this means that they don't have all signals blocked, and hence can be killed. The simplest solution is to always explicitly block all signals in the kthread. - Change over the migration threads, the workqueue threads and the ksoftirqd threads to use kthread. - module.c currently spawns threads directly to stop the machine, so a module can be atomically tested for removal. - Unfortunately, this means that the current task is manipulated (which races with set_cpus_allowed, for example), and it can't set its priority artificially high. Using a kernel thread can solve this cleanly, and with kthread_run, it's simple. - kthreads use keventd, so they inherit its cpus_allowed mask. Unset it. All current users set it explicity anyway, but it's nice to fix. - call_usermode_helper uses keventd, so the process created inherits its cpus_allowed mask. Unset it. - Prevent errors in boot when cpus_possible() contains a cpu which is not online (ie. a cpu didn't come up). This doesn't happen on x86, since a boot failure makes that CPU no longer possible (hacky, but it works). - When the cpu fails to come up, some callbacks do kthread_stop(), which doesn't work without keventd (which hasn't started yet). Call it directly, and take care that it restores signal state (note: do_sigaction does a flush on blocked signals, so we don't need to repeat it).
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c67
1 files changed, 27 insertions, 40 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index fed20b9813e5..47bd9c000e74 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -22,6 +22,7 @@
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
+#include <linux/kthread.h>
/*
* The per-CPU workqueue.
@@ -45,7 +46,6 @@ struct cpu_workqueue_struct {
struct workqueue_struct *wq;
task_t *thread;
- struct completion exit;
} ____cacheline_aligned;
@@ -153,28 +153,23 @@ static inline void run_workqueue(struct cpu_workqueue_struct *cwq)
spin_unlock_irqrestore(&cwq->lock, flags);
}
-typedef struct startup_s {
- struct cpu_workqueue_struct *cwq;
- struct completion done;
- const char *name;
-} startup_t;
-
-static int worker_thread(void *__startup)
+static int worker_thread(void *__cwq)
{
- startup_t *startup = __startup;
- struct cpu_workqueue_struct *cwq = startup->cwq;
+ struct cpu_workqueue_struct *cwq = __cwq;
int cpu = cwq - cwq->wq->cpu_wq;
DECLARE_WAITQUEUE(wait, current);
struct k_sigaction sa;
+ sigset_t blocked;
- daemonize("%s/%d", startup->name, cpu);
current->flags |= PF_IOTHREAD;
- cwq->thread = current;
set_user_nice(current, -10);
- set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ BUG_ON(smp_processor_id() != cpu);
- complete(&startup->done);
+ /* Block and flush all signals */
+ sigfillset(&blocked);
+ sigprocmask(SIG_BLOCK, &blocked, NULL);
+ flush_signals(current);
/* SIG_IGN makes children autoreap: see do_notify_parent(). */
sa.sa.sa_handler = SIG_IGN;
@@ -182,12 +177,10 @@ static int worker_thread(void *__startup)
siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
- for (;;) {
+ while (!kthread_should_stop()) {
set_task_state(current, TASK_INTERRUPTIBLE);
add_wait_queue(&cwq->more_work, &wait);
- if (!cwq->thread)
- break;
if (list_empty(&cwq->worklist))
schedule();
else
@@ -197,9 +190,6 @@ static int worker_thread(void *__startup)
if (!list_empty(&cwq->worklist))
run_workqueue(cwq);
}
- remove_wait_queue(&cwq->more_work, &wait);
- complete(&cwq->exit);
-
return 0;
}
@@ -251,9 +241,8 @@ static int create_workqueue_thread(struct workqueue_struct *wq,
const char *name,
int cpu)
{
- startup_t startup;
struct cpu_workqueue_struct *cwq = wq->cpu_wq + cpu;
- int ret;
+ struct task_struct *p;
spin_lock_init(&cwq->lock);
cwq->wq = wq;
@@ -263,17 +252,13 @@ static int create_workqueue_thread(struct workqueue_struct *wq,
INIT_LIST_HEAD(&cwq->worklist);
init_waitqueue_head(&cwq->more_work);
init_waitqueue_head(&cwq->work_done);
- init_completion(&cwq->exit);
-
- init_completion(&startup.done);
- startup.cwq = cwq;
- startup.name = name;
- ret = kernel_thread(worker_thread, &startup, CLONE_FS | CLONE_FILES);
- if (ret >= 0) {
- wait_for_completion(&startup.done);
- BUG_ON(!cwq->thread);
- }
- return ret;
+
+ p = kthread_create(worker_thread, cwq, "%s/%d", name, cpu);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+ cwq->thread = p;
+ kthread_bind(p, cpu);
+ return 0;
}
struct workqueue_struct *create_workqueue(const char *name)
@@ -292,6 +277,8 @@ struct workqueue_struct *create_workqueue(const char *name)
continue;
if (create_workqueue_thread(wq, name, cpu) < 0)
destroy = 1;
+ else
+ wake_up_process(wq->cpu_wq[cpu].thread);
}
/*
* Was there any error during startup? If yes then clean up:
@@ -308,13 +295,8 @@ static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
struct cpu_workqueue_struct *cwq;
cwq = wq->cpu_wq + cpu;
- if (cwq->thread) {
- /* Tell thread to exit and wait for it. */
- cwq->thread = NULL;
- wake_up(&cwq->more_work);
-
- wait_for_completion(&cwq->exit);
- }
+ if (cwq->thread)
+ kthread_stop(cwq->thread);
}
void destroy_workqueue(struct workqueue_struct *wq)
@@ -347,6 +329,11 @@ void flush_scheduled_work(void)
flush_workqueue(keventd_wq);
}
+int keventd_up(void)
+{
+ return keventd_wq != NULL;
+}
+
int current_is_keventd(void)
{
struct cpu_workqueue_struct *cwq;