diff options
| author | David Howells <dhowells@redhat.com> | 2002-07-24 18:52:25 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@home.transmeta.com> | 2002-07-24 18:52:25 -0700 |
| commit | 866b413df71523a4c08c2d84b33ed6001c645eb3 (patch) | |
| tree | 07f46493b0039c1dc1f81e060a547e0f3809ab4b /lib | |
| parent | 8b8c90a3695ccbc9b7e3f56bdb7060956da52ff8 (diff) | |
[PATCH] read-write semaphore downgrade and trylock
Here's a patch from Christoph Hellwig and myself to supply write->read
semaphore downgrade, and also from Brian Watson to supply trylock for rwsems.
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/rwsem.c | 37 |
1 files changed, 34 insertions, 3 deletions
diff --git a/lib/rwsem.c b/lib/rwsem.c index 1acf30ae566b..d0d93847c785 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c @@ -34,8 +34,9 @@ void rwsemtrace(struct rw_semaphore *sem, const char *str) * - there must be someone on the queue * - the spinlock must be held by the caller * - woken process blocks are discarded from the list after having flags zeroised + * - writers are only woken if wakewrite is non-zero */ -static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem) +static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) { struct rwsem_waiter *waiter; struct list_head *next; @@ -44,6 +45,9 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem) rwsemtrace(sem,"Entering __rwsem_do_wake"); + if (!wakewrite) + goto dont_wake_writers; + /* only wake someone up if we can transition the active part of the count from 0 -> 1 */ try_again: oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS,sem) - RWSEM_ACTIVE_BIAS; @@ -64,6 +68,12 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem) wake_up_process(waiter->task); goto out; + /* don't want to wake any writers */ + dont_wake_writers: + waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list); + if (waiter->flags & RWSEM_WAITING_FOR_WRITE) + goto out; + /* grant an infinite number of read locks to the readers at the front of the queue * - note we increment the 'active part' of the count by the number of readers (less one * for the activity decrement we've already done) before waking any processes up @@ -132,7 +142,7 @@ static inline struct rw_semaphore *rwsem_down_failed_common(struct rw_semaphore * - it might even be this process, since the waker takes a more active part */ if (!(count & RWSEM_ACTIVE_MASK)) - sem = __rwsem_do_wake(sem); + sem = __rwsem_do_wake(sem,1); spin_unlock(&sem->wait_lock); @@ -193,7 +203,7 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) /* do nothing if list empty */ if (!list_empty(&sem->wait_list)) - sem = __rwsem_do_wake(sem); + sem = __rwsem_do_wake(sem,1); spin_unlock(&sem->wait_lock); @@ -202,6 +212,27 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) return sem; } +/* + * downgrade a write lock into a read lock + * - caller incremented waiting part of count, and discovered it to be still negative + * - just wake up any readers at the front of the queue + */ +struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) +{ + rwsemtrace(sem,"Entering rwsem_downgrade_wake"); + + spin_lock(&sem->wait_lock); + + /* do nothing if list empty */ + if (!list_empty(&sem->wait_list)) + sem = __rwsem_do_wake(sem,0); + + spin_unlock(&sem->wait_lock); + + rwsemtrace(sem,"Leaving rwsem_downgrade_wake"); + return sem; +} + EXPORT_SYMBOL_NOVERS(rwsem_down_read_failed); EXPORT_SYMBOL_NOVERS(rwsem_down_write_failed); EXPORT_SYMBOL_NOVERS(rwsem_wake); |
