diff options
| author | Andrew Morton <akpm@osdl.org> | 2004-05-14 05:42:14 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2004-05-14 05:42:14 -0700 |
| commit | 2afafa3b9804e7a8a515c528270121c5349dbf53 (patch) | |
| tree | aaa3bf25f50fa1c68dbac3fd57997835dd9442ae | |
| parent | 2f24285400352ec3b832db6de3644241ee7c2c84 (diff) | |
[PATCH] filtered wakeups: wakeup enhancements
From: William Lee Irwin III <wli@holomorphy.com>
This patch provides an additional argument to __wake_up_common() so that the
information wakefunc.patch made waiters ready to receive may be passed to them
by wakers. This is provided as a separate patch so that the overhead of the
additional argument to __wake_up_common() can be measured in isolation. No
change in performance was observable here.
| -rw-r--r-- | include/linux/wait.h | 14 | ||||
| -rw-r--r-- | kernel/sched.c | 17 |
2 files changed, 16 insertions, 15 deletions
diff --git a/include/linux/wait.h b/include/linux/wait.h index b377ae40e509..52edb1786b14 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -104,17 +104,17 @@ static inline void __remove_wait_queue(wait_queue_head_t *head, list_del(&old->task_list); } -extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr)); +void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key)); extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode)); extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr)); -#define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1) -#define wake_up_nr(x, nr) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr) -#define wake_up_all(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0) +#define wake_up(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL) +#define wake_up_nr(x, nr) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL) +#define wake_up_all(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, NULL) #define wake_up_all_sync(x) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0) -#define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE, 1) -#define wake_up_interruptible_nr(x, nr) __wake_up((x),TASK_INTERRUPTIBLE, nr) -#define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE, 0) +#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) +#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) +#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) #define wake_up_locked(x) __wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE) #define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1) diff --git a/kernel/sched.c b/kernel/sched.c index bf9903af64c1..e146ee030888 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2320,7 +2320,7 @@ EXPORT_SYMBOL(default_wake_function); * zero in this (rare) case, and we handle it by continuing to scan the queue. */ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, - int nr_exclusive, int sync) + int nr_exclusive, int sync, void *key) { struct list_head *tmp, *next; @@ -2329,7 +2329,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, unsigned flags; curr = list_entry(tmp, wait_queue_t, task_list); flags = curr->flags; - if (curr->func(curr, mode, sync, NULL) && + if (curr->func(curr, mode, sync, key) && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) break; @@ -2342,12 +2342,13 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, * @mode: which threads * @nr_exclusive: how many wake-one or wake-many threads to wake up */ -void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) +void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode, + int nr_exclusive, void *key) { unsigned long flags; spin_lock_irqsave(&q->lock, flags); - __wake_up_common(q, mode, nr_exclusive, 0); + __wake_up_common(q, mode, nr_exclusive, 0, key); spin_unlock_irqrestore(&q->lock, flags); } @@ -2358,7 +2359,7 @@ EXPORT_SYMBOL(__wake_up); */ void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode) { - __wake_up_common(q, mode, 1, 0); + __wake_up_common(q, mode, 1, 0, NULL); } /** @@ -2386,7 +2387,7 @@ void fastcall __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exc sync = 0; spin_lock_irqsave(&q->lock, flags); - __wake_up_common(q, mode, nr_exclusive, sync); + __wake_up_common(q, mode, nr_exclusive, sync, NULL); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ @@ -2398,7 +2399,7 @@ void fastcall complete(struct completion *x) spin_lock_irqsave(&x->wait.lock, flags); x->done++; __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, - 1, 0); + 1, 0, NULL); spin_unlock_irqrestore(&x->wait.lock, flags); } EXPORT_SYMBOL(complete); @@ -2410,7 +2411,7 @@ void fastcall complete_all(struct completion *x) spin_lock_irqsave(&x->wait.lock, flags); x->done += UINT_MAX/2; __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, - 0, 0); + 0, 0, NULL); spin_unlock_irqrestore(&x->wait.lock, flags); } EXPORT_SYMBOL(complete_all); |
