summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@home.transmeta.com>2002-07-28 02:28:50 -0700
committerLinus Torvalds <torvalds@home.transmeta.com>2002-07-28 02:28:50 -0700
commit61887e47ae271f1bd894d892e16e2d94bc5ad6ef (patch)
treead5ebbfa5c2222e1ccc3d424f271de187626ef32
parent47fff65a4661723b044cb5e5e897eeafa3ccfece (diff)
parent39520ba43578bf731a5a3b91a36c64f992e87d20 (diff)
Merge bk://bkbits.ras.ucalgary.ca/rgooch-2.5
into home.transmeta.com:/home/torvalds/v2.5/linux
-rw-r--r--arch/i386/kernel/process.c16
-rw-r--r--include/asm-i386/rwsem.h36
-rw-r--r--include/linux/rwsem-spinlock.h3
-rw-r--r--include/linux/rwsem.h25
-rw-r--r--lib/rwsem-spinlock.c89
5 files changed, 154 insertions, 15 deletions
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 43673f006c10..f214606d8c80 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -675,6 +675,14 @@ void __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
tss->esp0 = next->esp0;
/*
+ * Load the per-thread Thread-Local Storage descriptor.
+ *
+ * NOTE: it's faster to do the two stores unconditionally
+ * than to branch away.
+ */
+ load_TLS_desc(next, cpu);
+
+ /*
* Save away %fs and %gs. No need to save %es and %ds, as
* those are always kernel segments while inside the kernel.
*/
@@ -690,14 +698,6 @@ void __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
}
/*
- * Load the per-thread Thread-Local Storage descriptor.
- *
- * NOTE: it's faster to do the two stores unconditionally
- * than to branch away.
- */
- load_TLS_desc(next, cpu);
-
- /*
* Now maybe reload the debug registers
*/
if (unlikely(next->debugreg[7])) {
diff --git a/include/asm-i386/rwsem.h b/include/asm-i386/rwsem.h
index 72f2ae078a36..7d3a3f087ed3 100644
--- a/include/asm-i386/rwsem.h
+++ b/include/asm-i386/rwsem.h
@@ -118,6 +118,29 @@ LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value
}
/*
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+static inline int __down_read_trylock(struct rw_semaphore *sem)
+{
+ __s32 result, tmp;
+ __asm__ __volatile__(
+ "# beginning __down_read_trylock\n\t"
+ " movl %0,%1\n\t"
+ "1:\n\t"
+ " movl %1,%2\n\t"
+ " addl %3,%2\n\t"
+ " jle 2f\n\t"
+LOCK_PREFIX " cmpxchgl %2,%0\n\t"
+ " jnz 1b\n\t"
+ "2:\n\t"
+ "# ending __down_read_trylock\n\t"
+ : "+m"(sem->count), "=&a"(result), "=&r"(tmp)
+ : "i"(RWSEM_ACTIVE_READ_BIAS)
+ : "memory", "cc");
+ return result>=0 ? 1 : 0;
+}
+
+/*
* lock for writing
*/
static inline void __down_write(struct rw_semaphore *sem)
@@ -145,6 +168,19 @@ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the
}
/*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+static inline int __down_write_trylock(struct rw_semaphore *sem)
+{
+ signed long ret = cmpxchg(&sem->count,
+ RWSEM_UNLOCKED_VALUE,
+ RWSEM_ACTIVE_WRITE_BIAS);
+ if (ret == RWSEM_UNLOCKED_VALUE)
+ return 1;
+ return 0;
+}
+
+/*
* unlock after reading
*/
static inline void __up_read(struct rw_semaphore *sem)
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index 3087c5c101f4..f4ac435bf141 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -54,9 +54,12 @@ struct rw_semaphore {
extern void FASTCALL(init_rwsem(struct rw_semaphore *sem));
extern void FASTCALL(__down_read(struct rw_semaphore *sem));
+extern int FASTCALL(__down_read_trylock(struct rw_semaphore *sem));
extern void FASTCALL(__down_write(struct rw_semaphore *sem));
+extern int FASTCALL(__down_write_trylock(struct rw_semaphore *sem));
extern void FASTCALL(__up_read(struct rw_semaphore *sem));
extern void FASTCALL(__up_write(struct rw_semaphore *sem));
+extern void FASTCALL(__downgrade_write(struct rw_semaphore *sem));
#endif /* __KERNEL__ */
#endif /* _LINUX_RWSEM_SPINLOCK_H */
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 320138d6643d..4a7e2bb0d7c4 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -46,6 +46,18 @@ static inline void down_read(struct rw_semaphore *sem)
}
/*
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+static inline int down_read_trylock(struct rw_semaphore *sem)
+{
+ int ret;
+ rwsemtrace(sem,"Entering down_read_trylock");
+ ret = __down_read_trylock(sem);
+ rwsemtrace(sem,"Leaving down_read_trylock");
+ return ret;
+}
+
+/*
* lock for writing
*/
static inline void down_write(struct rw_semaphore *sem)
@@ -56,6 +68,18 @@ static inline void down_write(struct rw_semaphore *sem)
}
/*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+static inline int down_write_trylock(struct rw_semaphore *sem)
+{
+ int ret;
+ rwsemtrace(sem,"Entering down_write_trylock");
+ ret = __down_write_trylock(sem);
+ rwsemtrace(sem,"Leaving down_write_trylock");
+ return ret;
+}
+
+/*
* release a read lock
*/
static inline void up_read(struct rw_semaphore *sem)
@@ -85,6 +109,5 @@ static inline void downgrade_write(struct rw_semaphore *sem)
rwsemtrace(sem,"Leaving downgrade_write");
}
-
#endif /* __KERNEL__ */
#endif /* _LINUX_RWSEM_H */
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index a17abe4e7840..0a38eebfae79 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -46,8 +46,9 @@ void init_rwsem(struct rw_semaphore *sem)
* - the 'waiting count' is non-zero
* - the spinlock must be held by the caller
* - woken process blocks are discarded from the list after having flags zeroised
+ * - writers are only woken if wakewrite is non-zero
*/
-static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
+static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
{
struct rwsem_waiter *waiter;
int woken;
@@ -56,7 +57,14 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
- /* try to grant a single write lock if there's a writer at the front of the queue
+ if (!wakewrite) {
+ if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
+ goto out;
+ goto dont_wake_writers;
+ }
+
+ /* if we are allowed to wake writers try to grant a single write lock if there's a
+ * writer at the front of the queue
* - we leave the 'waiting count' incremented to signify potential contention
*/
if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
@@ -68,16 +76,19 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
}
/* grant an infinite number of read locks to the readers at the front of the queue */
+ dont_wake_writers:
woken = 0;
- do {
+ while (waiter->flags&RWSEM_WAITING_FOR_READ) {
+ struct list_head *next = waiter->list.next;
+
list_del(&waiter->list);
waiter->flags = 0;
wake_up_process(waiter->task);
woken++;
if (list_empty(&sem->wait_list))
break;
- waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
- } while (waiter->flags&RWSEM_WAITING_FOR_READ);
+ waiter = list_entry(next,struct rwsem_waiter,list);
+ }
sem->activity += woken;
@@ -149,6 +160,28 @@ void __down_read(struct rw_semaphore *sem)
}
/*
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+int __down_read_trylock(struct rw_semaphore *sem)
+{
+ int ret = 0;
+ rwsemtrace(sem,"Entering __down_read_trylock");
+
+ spin_lock(&sem->wait_lock);
+
+ if (sem->activity>=0 && list_empty(&sem->wait_list)) {
+ /* granted */
+ sem->activity++;
+ ret = 1;
+ }
+
+ spin_unlock(&sem->wait_lock);
+
+ rwsemtrace(sem,"Leaving __down_read_trylock");
+ return ret;
+}
+
+/*
* get a write lock on the semaphore
* - note that we increment the waiting count anyway to indicate an exclusive lock
*/
@@ -195,6 +228,28 @@ void __down_write(struct rw_semaphore *sem)
}
/*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+int __down_write_trylock(struct rw_semaphore *sem)
+{
+ int ret = 0;
+ rwsemtrace(sem,"Entering __down_write_trylock");
+
+ spin_lock(&sem->wait_lock);
+
+ if (sem->activity==0 && list_empty(&sem->wait_list)) {
+ /* granted */
+ sem->activity = -1;
+ ret = 1;
+ }
+
+ spin_unlock(&sem->wait_lock);
+
+ rwsemtrace(sem,"Leaving __down_write_trylock");
+ return ret;
+}
+
+/*
* release a read lock on the semaphore
*/
void __up_read(struct rw_semaphore *sem)
@@ -222,18 +277,40 @@ void __up_write(struct rw_semaphore *sem)
sem->activity = 0;
if (!list_empty(&sem->wait_list))
- sem = __rwsem_do_wake(sem);
+ sem = __rwsem_do_wake(sem, 1);
spin_unlock(&sem->wait_lock);
rwsemtrace(sem,"Leaving __up_write");
}
+/*
+ * downgrade a write lock into a read lock
+ * - just wake up any readers at the front of the queue
+ */
+void __downgrade_write(struct rw_semaphore *sem)
+{
+ rwsemtrace(sem,"Entering __rwsem_downgrade");
+
+ spin_lock(&sem->wait_lock);
+
+ sem->activity = 1;
+ if (!list_empty(&sem->wait_list))
+ sem = __rwsem_do_wake(sem,0);
+
+ spin_unlock(&sem->wait_lock);
+
+ rwsemtrace(sem,"Leaving __rwsem_downgrade");
+}
+
EXPORT_SYMBOL(init_rwsem);
EXPORT_SYMBOL(__down_read);
+EXPORT_SYMBOL(__down_read_trylock);
EXPORT_SYMBOL(__down_write);
+EXPORT_SYMBOL(__down_write_trylock);
EXPORT_SYMBOL(__up_read);
EXPORT_SYMBOL(__up_write);
+EXPORT_SYMBOL(__downgrade_write);
#if RWSEM_DEBUG
EXPORT_SYMBOL(rwsemtrace);
#endif