From 72785ef74af0ec8afc1c344981ca9ba609796f5d Mon Sep 17 00:00:00 2001 From: Bob Miller Date: Sun, 14 Jul 2002 03:36:55 -0700 Subject: [PATCH] 2.5.25 remove global semaphore_lock spin lock. Replace the global semaphore_lock with the spinlock embedded in the wait_queue_head_t. None of the data protected by semaphore_lock is global and there is no need to restrict the system to only allow one semaphore to be dealt with at a time. This removes 2 lock round trips from __down() and __down_interruptible(). It also reduces the number of cache lines touched by 1 (the cache line with seamphore_lock). --- kernel/sched.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 49563c9c45d9..c8169afb4b1f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -928,7 +928,7 @@ int default_wake_function(wait_queue_t *curr, unsigned mode, int sync) * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns * zero in this (rare) case, and we handle it by continuing to scan the queue. */ -static inline void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync) +static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync) { struct list_head *tmp, *next; @@ -956,6 +956,14 @@ void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) spin_unlock_irqrestore(&q->lock, flags); } +/* + * Same as __wake_up but called with the spinlock in wait_queue_head_t held. + */ +void __wake_up_locked(wait_queue_head_t *q, unsigned int mode) +{ + __wake_up_common(q, mode, 1, 0); +} + #if CONFIG_SMP void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) -- cgit v1.2.3