diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/Makefile | 4 | ||||
| -rw-r--r-- | kernel/context.c | 221 | ||||
| -rw-r--r-- | kernel/kmod.c | 15 | ||||
| -rw-r--r-- | kernel/sys.c | 8 | ||||
| -rw-r--r-- | kernel/workqueue.c | 387 |
5 files changed, 398 insertions, 237 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 3f36152dac1b..b3fce6d3ac9c 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -2,13 +2,13 @@ # Makefile for the linux kernel. # -export-objs = signal.o sys.o kmod.o context.o ksyms.o pm.o exec_domain.o \ +export-objs = signal.o sys.o kmod.o workqueue.o ksyms.o pm.o exec_domain.o \ printk.o platform.o suspend.o dma.o module.o cpufreq.o obj-y = sched.o fork.o exec_domain.o panic.o printk.o \ module.o exit.o itimer.o time.o softirq.o resource.o \ sysctl.o capability.o ptrace.o timer.o user.o \ - signal.o sys.o kmod.o context.o futex.o platform.o pid.o + signal.o sys.o kmod.o workqueue.o futex.o platform.o pid.o obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o obj-$(CONFIG_SMP) += cpu.o diff --git a/kernel/context.c b/kernel/context.c deleted file mode 100644 index ae836aaf2ddf..000000000000 --- a/kernel/context.c +++ /dev/null @@ -1,221 +0,0 @@ -/* - * linux/kernel/context.c - * - * Mechanism for running arbitrary tasks in process context - * - * dwmw2@redhat.com: Genesis - * - * andrewm@uow.edu.au: 2.4.0-test12 - * - Child reaping - * - Support for tasks which re-add themselves - * - flush_scheduled_tasks. - */ - -#define __KERNEL_SYSCALLS__ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/init.h> -#include <linux/unistd.h> -#include <linux/signal.h> -#include <linux/completion.h> -#include <linux/tqueue.h> - -static DECLARE_TASK_QUEUE(tq_context); -static DECLARE_WAIT_QUEUE_HEAD(context_task_wq); -static DECLARE_WAIT_QUEUE_HEAD(context_task_done); -static int keventd_running; -static struct task_struct *keventd_task; - -static spinlock_t tqueue_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; - -typedef struct list_head task_queue; - -/* - * Queue a task on a tq. Return non-zero if it was successfully - * added. - */ -static inline int queue_task(struct tq_struct *tq, task_queue *list) -{ - int ret = 0; - unsigned long flags; - - if (!test_and_set_bit(0, &tq->sync)) { - spin_lock_irqsave(&tqueue_lock, flags); - list_add_tail(&tq->list, list); - spin_unlock_irqrestore(&tqueue_lock, flags); - ret = 1; - } - return ret; -} - -#define TQ_ACTIVE(q) (!list_empty(&q)) - -static inline void run_task_queue(task_queue *list) -{ - struct list_head head, *next; - unsigned long flags; - - if (!TQ_ACTIVE(*list)) - return; - - spin_lock_irqsave(&tqueue_lock, flags); - list_add(&head, list); - list_del_init(list); - spin_unlock_irqrestore(&tqueue_lock, flags); - - next = head.next; - while (next != &head) { - void (*f) (void *); - struct tq_struct *p; - void *data; - - p = list_entry(next, struct tq_struct, list); - next = next->next; - f = p->routine; - data = p->data; - wmb(); - p->sync = 0; - if (f) - f(data); - } -} - -static int need_keventd(const char *who) -{ - if (keventd_running == 0) - printk(KERN_ERR "%s(): keventd has not started\n", who); - return keventd_running; -} - -int current_is_keventd(void) -{ - int ret = 0; - if (need_keventd(__FUNCTION__)) - ret = (current == keventd_task); - return ret; -} - -/** - * schedule_task - schedule a function for subsequent execution in process context. - * @task: pointer to a &tq_struct which defines the function to be scheduled. - * - * May be called from interrupt context. The scheduled function is run at some - * time in the near future by the keventd kernel thread. If it can sleep, it - * should be designed to do so for the minimum possible time, as it will be - * stalling all other scheduled tasks. - * - * schedule_task() returns non-zero if the task was successfully scheduled. - * If @task is already residing on a task queue then schedule_task() fails - * to schedule your task and returns zero. - */ -int schedule_task(struct tq_struct *task) -{ - int ret; - need_keventd(__FUNCTION__); - ret = queue_task(task, &tq_context); - wake_up(&context_task_wq); - return ret; -} - -static int context_thread(void *startup) -{ - struct task_struct *curtask = current; - DECLARE_WAITQUEUE(wait, curtask); - struct k_sigaction sa; - - daemonize(); - strcpy(curtask->comm, "keventd"); - current->flags |= PF_IOTHREAD; - keventd_running = 1; - keventd_task = curtask; - - spin_lock_irq(&curtask->sig->siglock); - siginitsetinv(&curtask->blocked, sigmask(SIGCHLD)); - recalc_sigpending(); - spin_unlock_irq(&curtask->sig->siglock); - - complete((struct completion *)startup); - - /* Install a handler so SIGCLD is delivered */ - sa.sa.sa_handler = SIG_IGN; - sa.sa.sa_flags = 0; - siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD)); - do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0); - - /* - * If one of the functions on a task queue re-adds itself - * to the task queue we call schedule() in state TASK_RUNNING - */ - for (;;) { - set_task_state(curtask, TASK_INTERRUPTIBLE); - add_wait_queue(&context_task_wq, &wait); - if (TQ_ACTIVE(tq_context)) - set_task_state(curtask, TASK_RUNNING); - schedule(); - remove_wait_queue(&context_task_wq, &wait); - run_task_queue(&tq_context); - wake_up(&context_task_done); - if (signal_pending(curtask)) { - while (waitpid(-1, (unsigned int *)0, __WALL|WNOHANG) > 0) - ; - spin_lock_irq(&curtask->sig->siglock); - flush_signals(curtask); - recalc_sigpending(); - spin_unlock_irq(&curtask->sig->siglock); - } - } -} - -/** - * flush_scheduled_tasks - ensure that any scheduled tasks have run to completion. - * - * Forces execution of the schedule_task() queue and blocks until its completion. - * - * If a kernel subsystem uses schedule_task() and wishes to flush any pending - * tasks, it should use this function. This is typically used in driver shutdown - * handlers. - * - * The caller should hold no spinlocks and should hold no semaphores which could - * cause the scheduled tasks to block. - */ -static struct tq_struct dummy_task; - -void flush_scheduled_tasks(void) -{ - int count; - DECLARE_WAITQUEUE(wait, current); - - /* - * Do it twice. It's possible, albeit highly unlikely, that - * the caller queued a task immediately before calling us, - * and that the eventd thread was already past the run_task_queue() - * but not yet into wake_up(), so it woke us up before completing - * the caller's queued task or our new dummy task. - */ - add_wait_queue(&context_task_done, &wait); - for (count = 0; count < 2; count++) { - set_current_state(TASK_UNINTERRUPTIBLE); - - /* Queue a dummy task to make sure we get kicked */ - schedule_task(&dummy_task); - - /* Wait for it to complete */ - schedule(); - } - remove_wait_queue(&context_task_done, &wait); -} - -int start_context_thread(void) -{ - static struct completion startup __initdata = COMPLETION_INITIALIZER(startup); - - kernel_thread(context_thread, &startup, CLONE_FS | CLONE_FILES); - wait_for_completion(&startup); - return 0; -} - -EXPORT_SYMBOL(schedule_task); -EXPORT_SYMBOL(flush_scheduled_tasks); - diff --git a/kernel/kmod.c b/kernel/kmod.c index b4abfdfe9888..755e5807e815 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -28,7 +28,7 @@ #include <linux/namespace.h> #include <linux/completion.h> #include <linux/file.h> -#include <linux/tqueue.h> +#include <linux/workqueue.h> #include <asm/uaccess.h> @@ -346,18 +346,15 @@ static void __call_usermodehelper(void *data) */ int call_usermodehelper(char *path, char **argv, char **envp) { - DECLARE_COMPLETION(work); + DECLARE_COMPLETION(done); struct subprocess_info sub_info = { - .complete = &work, + .complete = &done, .path = path, .argv = argv, .envp = envp, .retval = 0, }; - struct tq_struct tqs = { - .routine = __call_usermodehelper, - .data = &sub_info, - }; + DECLARE_WORK(work, __call_usermodehelper, &sub_info); if (!system_running) return -EBUSY; @@ -369,8 +366,8 @@ int call_usermodehelper(char *path, char **argv, char **envp) /* We can't wait on keventd! */ __call_usermodehelper(&sub_info); } else { - schedule_task(&tqs); - wait_for_completion(&work); + schedule_work(&work); + wait_for_completion(&done); } out: return sub_info.retval; diff --git a/kernel/sys.c b/kernel/sys.c index 636a5e6b6768..5b7e84384cfa 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -16,7 +16,7 @@ #include <linux/init.h> #include <linux/highuid.h> #include <linux/fs.h> -#include <linux/tqueue.h> +#include <linux/workqueue.h> #include <linux/device.h> #include <linux/times.h> #include <linux/security.h> @@ -442,12 +442,10 @@ static void deferred_cad(void *dummy) */ void ctrl_alt_del(void) { - static struct tq_struct cad_tq = { - .routine = deferred_cad, - }; + static DECLARE_WORK(cad_work, deferred_cad, NULL); if (C_A_D) - schedule_task(&cad_tq); + schedule_work(&cad_work); else kill_proc(cad_pid, SIGINT, 1); } diff --git a/kernel/workqueue.c b/kernel/workqueue.c new file mode 100644 index 000000000000..5fab2cd1933f --- /dev/null +++ b/kernel/workqueue.c @@ -0,0 +1,387 @@ +/* + * linux/kernel/workqueue.c + * + * Generic mechanism for defining kernel helper threads for running + * arbitrary tasks in process context. + * + * Started by Ingo Molnar, Copyright (C) 2002 + * + * Derived from the taskqueue/keventd code by: + * + * David Woodhouse <dwmw2@redhat.com> + * Andrew Morton <andrewm@uow.edu.au> + * Kai Petzke <wpp@marie.physik.tu-berlin.de> + * Theodore Ts'o <tytso@mit.edu> + */ + +#define __KERNEL_SYSCALLS__ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/init.h> +#include <linux/unistd.h> +#include <linux/signal.h> +#include <linux/completion.h> +#include <linux/workqueue.h> +#include <linux/slab.h> + +/* + * The per-CPU workqueue: + */ +struct cpu_workqueue_struct { + + spinlock_t lock; + + atomic_t nr_queued; + struct list_head worklist; + wait_queue_head_t more_work; + wait_queue_head_t work_done; + + struct workqueue_struct *wq; + task_t *thread; + struct completion exit; + +} ____cacheline_aligned; + +/* + * The externally visible workqueue abstraction is an array of + * per-CPU workqueues: + */ +struct workqueue_struct { + struct cpu_workqueue_struct cpu_wq[NR_CPUS]; +}; + +/* + * Queue work on a workqueue. Return non-zero if it was successfully + * added. + * + * We queue the work to the CPU it was submitted, but there is no + * guarantee that it will be processed by that CPU. + */ +int queue_work(struct workqueue_struct *wq, struct work_struct *work) +{ + unsigned long flags; + int ret = 0, cpu = get_cpu(); + struct cpu_workqueue_struct *cwq = wq->cpu_wq + cpu; + + if (!test_and_set_bit(0, &work->pending)) { + BUG_ON(!list_empty(&work->entry)); + work->wq_data = cwq; + + spin_lock_irqsave(&cwq->lock, flags); + list_add_tail(&work->entry, &cwq->worklist); + atomic_inc(&cwq->nr_queued); + spin_unlock_irqrestore(&cwq->lock, flags); + + wake_up(&cwq->more_work); + ret = 1; + } + put_cpu(); + return ret; +} + +static void delayed_work_timer_fn(unsigned long __data) +{ + struct work_struct *work = (struct work_struct *)__data; + struct cpu_workqueue_struct *cwq = work->wq_data; + unsigned long flags; + + /* + * Do the wakeup within the spinlock, so that flushing + * can be done in a guaranteed way. + */ + spin_lock_irqsave(&cwq->lock, flags); + list_add_tail(&work->entry, &cwq->worklist); + wake_up(&cwq->more_work); + spin_unlock_irqrestore(&cwq->lock, flags); +} + +int queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay) +{ + int ret = 0, cpu = get_cpu(); + timer_t *timer = &work->timer; + struct cpu_workqueue_struct *cwq = wq->cpu_wq + cpu; + + if (!test_and_set_bit(0, &work->pending)) { + BUG_ON(timer_pending(timer)); + BUG_ON(!list_empty(&work->entry)); + + /* + * Increase nr_queued so that the flush function + * knows that there's something pending. + */ + atomic_inc(&cwq->nr_queued); + work->wq_data = cwq; + + timer->expires = jiffies + delay; + timer->data = (unsigned long)work; + timer->function = delayed_work_timer_fn; + add_timer(timer); + + ret = 1; + } + put_cpu(); + return ret; +} + +static inline void run_workqueue(struct cpu_workqueue_struct *cwq) +{ + unsigned long flags; + + /* + * Keep taking off work from the queue until + * done. + */ + spin_lock_irqsave(&cwq->lock, flags); + while (!list_empty(&cwq->worklist)) { + struct work_struct *work = list_entry(cwq->worklist.next, struct work_struct, entry); + void (*f) (void *) = work->func; + void *data = work->data; + + list_del_init(cwq->worklist.next); + spin_unlock_irqrestore(&cwq->lock, flags); + + BUG_ON(work->wq_data != cwq); + clear_bit(0, &work->pending); + f(data); + + /* + * We only wake up 'work done' waiters (flush) when + * the last function has been fully processed. + */ + if (atomic_dec_and_test(&cwq->nr_queued)) + wake_up(&cwq->work_done); + + spin_lock_irqsave(&cwq->lock, flags); + } + spin_unlock_irqrestore(&cwq->lock, flags); +} + +typedef struct startup_s { + struct cpu_workqueue_struct *cwq; + struct completion done; + const char *name; +} startup_t; + +static int worker_thread(void *__startup) +{ + startup_t *startup = __startup; + struct cpu_workqueue_struct *cwq = startup->cwq; + int cpu = cwq - cwq->wq->cpu_wq; + DECLARE_WAITQUEUE(wait, current); + struct k_sigaction sa; + + daemonize(); + sprintf(current->comm, "%s/%d", startup->name, cpu); + current->flags |= PF_IOTHREAD; + cwq->thread = current; + + set_cpus_allowed(current, 1UL << cpu); + + spin_lock_irq(¤t->sig->siglock); + siginitsetinv(¤t->blocked, sigmask(SIGCHLD)); + recalc_sigpending(); + spin_unlock_irq(¤t->sig->siglock); + + complete(&startup->done); + + /* Install a handler so SIGCLD is delivered */ + sa.sa.sa_handler = SIG_IGN; + sa.sa.sa_flags = 0; + siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD)); + do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0); + + for (;;) { + set_task_state(current, TASK_INTERRUPTIBLE); + + add_wait_queue(&cwq->more_work, &wait); + if (!cwq->thread) + break; + if (list_empty(&cwq->worklist)) + schedule(); + else + set_task_state(current, TASK_RUNNING); + remove_wait_queue(&cwq->more_work, &wait); + + if (!list_empty(&cwq->worklist)) + run_workqueue(cwq); + + if (signal_pending(current)) { + while (waitpid(-1, NULL, __WALL|WNOHANG) > 0) + /* SIGCHLD - auto-reaping */ ; + + /* zap all other signals */ + spin_lock_irq(¤t->sig->siglock); + flush_signals(current); + recalc_sigpending(); + spin_unlock_irq(¤t->sig->siglock); + } + } + remove_wait_queue(&cwq->more_work, &wait); + complete(&cwq->exit); + + return 0; +} + +/* + * flush_workqueue - ensure that any scheduled work has run to completion. + * + * Forces execution of the workqueue and blocks until its completion. + * This is typically used in driver shutdown handlers. + * + * NOTE: if work is being added to the queue constantly by some other + * context then this function might block indefinitely. + */ +void flush_workqueue(struct workqueue_struct *wq) +{ + struct cpu_workqueue_struct *cwq; + int cpu; + + for (cpu = 0; cpu < NR_CPUS; cpu++) { + if (!cpu_online(cpu)) + continue; + cwq = wq->cpu_wq + cpu; + + if (atomic_read(&cwq->nr_queued)) { + DECLARE_WAITQUEUE(wait, current); + + if (!list_empty(&cwq->worklist)) + run_workqueue(cwq); + + /* + * Wait for helper thread(s) to finish up + * the queue: + */ + set_task_state(current, TASK_INTERRUPTIBLE); + add_wait_queue(&cwq->work_done, &wait); + if (atomic_read(&cwq->nr_queued)) + schedule(); + else + set_task_state(current, TASK_RUNNING); + remove_wait_queue(&cwq->work_done, &wait); + } + } +} + +struct workqueue_struct *create_workqueue(const char *name) +{ + int ret, cpu, destroy = 0; + struct cpu_workqueue_struct *cwq; + startup_t startup; + struct workqueue_struct *wq; + + BUG_ON(strlen(name) > 10); + startup.name = name; + + wq = kmalloc(sizeof(*wq), GFP_KERNEL); + if (!wq) + return NULL; + + for (cpu = 0; cpu < NR_CPUS; cpu++) { + if (!cpu_online(cpu)) + continue; + cwq = wq->cpu_wq + cpu; + + spin_lock_init(&cwq->lock); + cwq->wq = wq; + cwq->thread = NULL; + atomic_set(&cwq->nr_queued, 0); + INIT_LIST_HEAD(&cwq->worklist); + init_waitqueue_head(&cwq->more_work); + init_waitqueue_head(&cwq->work_done); + + init_completion(&startup.done); + startup.cwq = cwq; + ret = kernel_thread(worker_thread, &startup, + CLONE_FS | CLONE_FILES); + if (ret < 0) + destroy = 1; + else { + wait_for_completion(&startup.done); + BUG_ON(!cwq->thread); + } + } + /* + * Was there any error during startup? If yes then clean up: + */ + if (destroy) { + destroy_workqueue(wq); + wq = NULL; + } + return wq; +} + +void destroy_workqueue(struct workqueue_struct *wq) +{ + struct cpu_workqueue_struct *cwq; + int cpu; + + for (cpu = 0; cpu < NR_CPUS; cpu++) { + if (!cpu_online(cpu)) + continue; + cwq = wq->cpu_wq + cpu; + if (!cwq->thread) + continue; + /* + * Initiate an exit and wait for it: + */ + init_completion(&cwq->exit); + cwq->thread = NULL; + wake_up(&cwq->more_work); + + wait_for_completion(&cwq->exit); + } + kfree(wq); +} + +static struct workqueue_struct *keventd_wq; + +int schedule_work(struct work_struct *work) +{ + return queue_work(keventd_wq, work); +} + +int schedule_delayed_work(struct work_struct *work, unsigned long delay) +{ + return queue_delayed_work(keventd_wq, work, delay); +} + +void flush_scheduled_work(void) +{ + flush_workqueue(keventd_wq); +} + +int current_is_keventd(void) +{ + struct cpu_workqueue_struct *cwq; + int cpu; + + BUG_ON(!keventd_wq); + + for (cpu = 0; cpu < NR_CPUS; cpu++) { + if (!cpu_online(cpu)) + continue; + cwq = keventd_wq->cpu_wq + cpu; + if (current == cwq->thread) + return 1; + } + return 0; +} + +void init_workqueues(void) +{ + keventd_wq = create_workqueue("events"); + BUG_ON(!keventd_wq); +} + +EXPORT_SYMBOL_GPL(create_workqueue); +EXPORT_SYMBOL_GPL(queue_work); +EXPORT_SYMBOL_GPL(queue_delayed_work); +EXPORT_SYMBOL_GPL(flush_workqueue); +EXPORT_SYMBOL_GPL(destroy_workqueue); + +EXPORT_SYMBOL(schedule_work); +EXPORT_SYMBOL(schedule_delayed_work); +EXPORT_SYMBOL(flush_scheduled_work); + |
