diff options
| author | Bob Miller <rem@osdl.org> | 2002-07-14 03:36:55 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@home.transmeta.com> | 2002-07-14 03:36:55 -0700 |
| commit | 72785ef74af0ec8afc1c344981ca9ba609796f5d (patch) | |
| tree | a34322490491917840e0dcbd473953b2cc839040 /kernel | |
| parent | 5150c802ecfb2e2779cb96296399d25ad6e33e4a (diff) | |
[PATCH] 2.5.25 remove global semaphore_lock spin lock.
Replace the global semaphore_lock with the spinlock embedded in
the wait_queue_head_t. None of the data protected by semaphore_lock
is global and there is no need to restrict the system to only allow
one semaphore to be dealt with at a time.
This removes 2 lock round trips from __down() and __down_interruptible().
It also reduces the number of cache lines touched by 1 (the cache line
with seamphore_lock).
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched.c | 10 |
1 files changed, 9 insertions, 1 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 49563c9c45d9..c8169afb4b1f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -928,7 +928,7 @@ int default_wake_function(wait_queue_t *curr, unsigned mode, int sync) * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns * zero in this (rare) case, and we handle it by continuing to scan the queue. */ -static inline void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync) +static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync) { struct list_head *tmp, *next; @@ -956,6 +956,14 @@ void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) spin_unlock_irqrestore(&q->lock, flags); } +/* + * Same as __wake_up but called with the spinlock in wait_queue_head_t held. + */ +void __wake_up_locked(wait_queue_head_t *q, unsigned int mode) +{ + __wake_up_common(q, mode, 1, 0); +} + #if CONFIG_SMP void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) |
