diff options
| author | Bob Miller <rem@osdl.org> | 2002-07-14 03:36:55 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@home.transmeta.com> | 2002-07-14 03:36:55 -0700 |
| commit | 72785ef74af0ec8afc1c344981ca9ba609796f5d (patch) | |
| tree | a34322490491917840e0dcbd473953b2cc839040 | |
| parent | 5150c802ecfb2e2779cb96296399d25ad6e33e4a (diff) | |
[PATCH] 2.5.25 remove global semaphore_lock spin lock.
Replace the global semaphore_lock with the spinlock embedded in
the wait_queue_head_t. None of the data protected by semaphore_lock
is global and there is no need to restrict the system to only allow
one semaphore to be dealt with at a time.
This removes 2 lock round trips from __down() and __down_interruptible().
It also reduces the number of cache lines touched by 1 (the cache line
with seamphore_lock).
| -rw-r--r-- | arch/i386/kernel/semaphore.c | 64 | ||||
| -rw-r--r-- | include/linux/sched.h | 21 | ||||
| -rw-r--r-- | kernel/sched.c | 10 |
3 files changed, 66 insertions, 29 deletions
diff --git a/arch/i386/kernel/semaphore.c b/arch/i386/kernel/semaphore.c index 06bdf8a44986..e015553534fc 100644 --- a/arch/i386/kernel/semaphore.c +++ b/arch/i386/kernel/semaphore.c @@ -28,8 +28,8 @@ * needs to do something only if count was negative before * the increment operation. * - * "sleeping" and the contention routine ordering is - * protected by the semaphore spinlock. + * "sleeping" and the contention routine ordering is protected + * by the spinlock in the semaphore's waitqueue head. * * Note that these functions are only called when there is * contention on the lock, and as such all this is the @@ -53,39 +53,41 @@ void __up(struct semaphore *sem) wake_up(&sem->wait); } -static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED; - void __down(struct semaphore * sem) { struct task_struct *tsk = current; DECLARE_WAITQUEUE(wait, tsk); + unsigned long flags; + tsk->state = TASK_UNINTERRUPTIBLE; - add_wait_queue_exclusive(&sem->wait, &wait); + spin_lock_irqsave(&sem->wait.lock, flags); + add_wait_queue_exclusive_locked(&sem->wait, &wait); - spin_lock_irq(&semaphore_lock); sem->sleepers++; for (;;) { int sleepers = sem->sleepers; /* * Add "everybody else" into it. They aren't - * playing, because we own the spinlock. + * playing, because we own the spinlock in + * the wait_queue_head. */ if (!atomic_add_negative(sleepers - 1, &sem->count)) { sem->sleepers = 0; break; } sem->sleepers = 1; /* us - see -1 above */ - spin_unlock_irq(&semaphore_lock); + spin_unlock_irqrestore(&sem->wait.lock, flags); schedule(); + + spin_lock_irqsave(&sem->wait.lock, flags); tsk->state = TASK_UNINTERRUPTIBLE; - spin_lock_irq(&semaphore_lock); } - spin_unlock_irq(&semaphore_lock); - remove_wait_queue(&sem->wait, &wait); + remove_wait_queue_locked(&sem->wait, &wait); + wake_up_locked(&sem->wait); + spin_unlock_irqrestore(&sem->wait.lock, flags); tsk->state = TASK_RUNNING; - wake_up(&sem->wait); } int __down_interruptible(struct semaphore * sem) @@ -93,11 +95,13 @@ int __down_interruptible(struct semaphore * sem) int retval = 0; struct task_struct *tsk = current; DECLARE_WAITQUEUE(wait, tsk); + unsigned long flags; + tsk->state = TASK_INTERRUPTIBLE; - add_wait_queue_exclusive(&sem->wait, &wait); + spin_lock_irqsave(&sem->wait.lock, flags); + add_wait_queue_exclusive_locked(&sem->wait, &wait); - spin_lock_irq(&semaphore_lock); - sem->sleepers ++; + sem->sleepers++; for (;;) { int sleepers = sem->sleepers; @@ -117,25 +121,27 @@ int __down_interruptible(struct semaphore * sem) /* * Add "everybody else" into it. They aren't - * playing, because we own the spinlock. The - * "-1" is because we're still hoping to get - * the lock. + * playing, because we own the spinlock in + * wait_queue_head. The "-1" is because we're + * still hoping to get the semaphore. */ if (!atomic_add_negative(sleepers - 1, &sem->count)) { sem->sleepers = 0; break; } sem->sleepers = 1; /* us - see -1 above */ - spin_unlock_irq(&semaphore_lock); + spin_unlock_irqrestore(&sem->wait.lock, flags); schedule(); + + spin_lock_irqsave(&sem->wait.lock, flags); tsk->state = TASK_INTERRUPTIBLE; - spin_lock_irq(&semaphore_lock); } - spin_unlock_irq(&semaphore_lock); + remove_wait_queue_locked(&sem->wait, &wait); + wake_up_locked(&sem->wait); + spin_unlock_irqrestore(&sem->wait.lock, flags); + tsk->state = TASK_RUNNING; - remove_wait_queue(&sem->wait, &wait); - wake_up(&sem->wait); return retval; } @@ -152,18 +158,20 @@ int __down_trylock(struct semaphore * sem) int sleepers; unsigned long flags; - spin_lock_irqsave(&semaphore_lock, flags); + spin_lock_irqsave(&sem->wait.lock, flags); sleepers = sem->sleepers + 1; sem->sleepers = 0; /* * Add "everybody else" and us into it. They aren't - * playing, because we own the spinlock. + * playing, because we own the spinlock in the + * wait_queue_head. */ - if (!atomic_add_negative(sleepers, &sem->count)) - wake_up(&sem->wait); + if (!atomic_add_negative(sleepers, &sem->count)) { + wake_up_locked(&sem->wait); + } - spin_unlock_irqrestore(&semaphore_lock, flags); + spin_unlock_irqrestore(&sem->wait.lock, flags); return 1; } diff --git a/include/linux/sched.h b/include/linux/sched.h index 41382c9f8653..2e38ae05d9d2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -487,6 +487,7 @@ extern unsigned long prof_len; extern unsigned long prof_shift; extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr)); +extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode)); extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr)); extern void FASTCALL(sleep_on(wait_queue_head_t *q)); extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q, @@ -504,6 +505,7 @@ extern void FASTCALL(sched_exit(task_t * p)); #define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE, 1) #define wake_up_interruptible_nr(x, nr) __wake_up((x),TASK_INTERRUPTIBLE, nr) #define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE, 0) +#define wake_up_locked(x) __wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE) #ifdef CONFIG_SMP #define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1) #else @@ -696,6 +698,25 @@ do { \ remove_wait_queue(&wq, &__wait); \ } while (0) +/* + * Must be called with the spinlock in the wait_queue_head_t held. + */ +static inline void add_wait_queue_exclusive_locked(wait_queue_head_t *q, + wait_queue_t * wait) +{ + wait->flags |= WQ_FLAG_EXCLUSIVE; + __add_wait_queue_tail(q, wait); +} + +/* + * Must be called with the spinlock in the wait_queue_head_t held. + */ +static inline void remove_wait_queue_locked(wait_queue_head_t *q, + wait_queue_t * wait) +{ + __remove_wait_queue(q, wait); +} + #define wait_event_interruptible(wq, condition) \ ({ \ int __ret = 0; \ diff --git a/kernel/sched.c b/kernel/sched.c index 49563c9c45d9..c8169afb4b1f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -928,7 +928,7 @@ int default_wake_function(wait_queue_t *curr, unsigned mode, int sync) * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns * zero in this (rare) case, and we handle it by continuing to scan the queue. */ -static inline void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync) +static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync) { struct list_head *tmp, *next; @@ -956,6 +956,14 @@ void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) spin_unlock_irqrestore(&q->lock, flags); } +/* + * Same as __wake_up but called with the spinlock in wait_queue_head_t held. + */ +void __wake_up_locked(wait_queue_head_t *q, unsigned int mode) +{ + __wake_up_common(q, mode, 1, 0); +} + #if CONFIG_SMP void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) |
