summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2003-08-29 10:23:42 +1000
committerAnton Blanchard <anton@samba.org>2003-08-29 10:23:42 +1000
commite5122ad463da0d880e104341c7828b43526d9624 (patch)
treedade213b2345d65e9c1ca751f3eee58f58330610
parent64553593bc1bb5803f121c2cedece686f5804730 (diff)
ppc64: semaphore fixes based on report by ever watchful Olaf Hering
-rw-r--r--arch/ppc64/kernel/semaphore.c15
-rw-r--r--include/asm-ppc64/rwsem.h22
-rw-r--r--include/asm-ppc64/semaphore.h15
3 files changed, 17 insertions, 35 deletions
diff --git a/arch/ppc64/kernel/semaphore.c b/arch/ppc64/kernel/semaphore.c
index 7fb2d8d95d0a..c977029e2465 100644
--- a/arch/ppc64/kernel/semaphore.c
+++ b/arch/ppc64/kernel/semaphore.c
@@ -75,9 +75,8 @@ void __down(struct semaphore *sem)
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
- tsk->state = TASK_UNINTERRUPTIBLE;
+ __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
add_wait_queue_exclusive(&sem->wait, &wait);
- smp_wmb();
/*
* Try to get the semaphore. If the count is > 0, then we've
@@ -87,10 +86,10 @@ void __down(struct semaphore *sem)
*/
while (__sem_update_count(sem, -1) <= 0) {
schedule();
- tsk->state = TASK_UNINTERRUPTIBLE;
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
}
remove_wait_queue(&sem->wait, &wait);
- tsk->state = TASK_RUNNING;
+ __set_task_state(tsk, TASK_RUNNING);
/*
* If there are any more sleepers, wake one of them up so
@@ -106,9 +105,8 @@ int __down_interruptible(struct semaphore * sem)
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
- tsk->state = TASK_INTERRUPTIBLE;
+ __set_task_state(tsk, TASK_INTERRUPTIBLE);
add_wait_queue_exclusive(&sem->wait, &wait);
- smp_wmb();
while (__sem_update_count(sem, -1) <= 0) {
if (signal_pending(current)) {
@@ -122,10 +120,11 @@ int __down_interruptible(struct semaphore * sem)
break;
}
schedule();
- tsk->state = TASK_INTERRUPTIBLE;
+ set_task_state(tsk, TASK_INTERRUPTIBLE);
}
- tsk->state = TASK_RUNNING;
remove_wait_queue(&sem->wait, &wait);
+ __set_task_state(tsk, TASK_RUNNING);
+
wake_up(&sem->wait);
return retval;
}
diff --git a/include/asm-ppc64/rwsem.h b/include/asm-ppc64/rwsem.h
index affeaf32495d..bd5c2f093575 100644
--- a/include/asm-ppc64/rwsem.h
+++ b/include/asm-ppc64/rwsem.h
@@ -1,5 +1,5 @@
/*
- * include/asm-ppc/rwsem.h: R/W semaphores for PPC using the stuff
+ * include/asm-ppc64/rwsem.h: R/W semaphores for PPC using the stuff
* in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h
* by Paul Mackerras <paulus@samba.org>.
*
@@ -74,9 +74,7 @@ static inline void init_rwsem(struct rw_semaphore *sem)
*/
static inline void __down_read(struct rw_semaphore *sem)
{
- if (atomic_inc_return((atomic_t *)(&sem->count)) > 0)
- smp_wmb();
- else
+ if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0))
rwsem_down_read_failed(sem);
}
@@ -87,7 +85,6 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
while ((tmp = sem->count) >= 0) {
if (tmp == cmpxchg(&sem->count, tmp,
tmp + RWSEM_ACTIVE_READ_BIAS)) {
- smp_wmb();
return 1;
}
}
@@ -103,9 +100,7 @@ static inline void __down_write(struct rw_semaphore *sem)
tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_t *)(&sem->count));
- if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
- smp_wmb();
- else
+ if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
rwsem_down_write_failed(sem);
}
@@ -115,7 +110,6 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS);
- smp_wmb();
return tmp == RWSEM_UNLOCKED_VALUE;
}
@@ -126,9 +120,8 @@ static inline void __up_read(struct rw_semaphore *sem)
{
int tmp;
- smp_wmb();
tmp = atomic_dec_return((atomic_t *)(&sem->count));
- if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
+ if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
rwsem_wake(sem);
}
@@ -137,9 +130,8 @@ static inline void __up_read(struct rw_semaphore *sem)
*/
static inline void __up_write(struct rw_semaphore *sem)
{
- smp_wmb();
- if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
- (atomic_t *)(&sem->count)) < 0)
+ if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
+ (atomic_t *)(&sem->count)) < 0))
rwsem_wake(sem);
}
@@ -158,7 +150,6 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
{
int tmp;
- smp_wmb();
tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
if (tmp < 0)
rwsem_downgrade_wake(sem);
@@ -169,7 +160,6 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
*/
static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
{
- smp_mb();
return atomic_add_return(delta, (atomic_t *)(&sem->count));
}
diff --git a/include/asm-ppc64/semaphore.h b/include/asm-ppc64/semaphore.h
index 31c6636a1100..22dfee71d8ba 100644
--- a/include/asm-ppc64/semaphore.h
+++ b/include/asm-ppc64/semaphore.h
@@ -82,9 +82,8 @@ static inline void down(struct semaphore * sem)
/*
* Try to get the semaphore, take the slow path if we fail.
*/
- if (atomic_dec_return(&sem->count) < 0)
+ if (unlikely(atomic_dec_return(&sem->count) < 0))
__down(sem);
- smp_wmb();
}
static inline int down_interruptible(struct semaphore * sem)
@@ -96,23 +95,18 @@ static inline int down_interruptible(struct semaphore * sem)
#endif
might_sleep();
- if (atomic_dec_return(&sem->count) < 0)
+ if (unlikely(atomic_dec_return(&sem->count) < 0))
ret = __down_interruptible(sem);
- smp_wmb();
return ret;
}
static inline int down_trylock(struct semaphore * sem)
{
- int ret;
-
#ifdef WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
- ret = atomic_dec_if_positive(&sem->count) < 0;
- smp_wmb();
- return ret;
+ return atomic_dec_if_positive(&sem->count) < 0;
}
static inline void up(struct semaphore * sem)
@@ -121,8 +115,7 @@ static inline void up(struct semaphore * sem)
CHECK_MAGIC(sem->__magic);
#endif
- smp_wmb();
- if (atomic_inc_return(&sem->count) <= 0)
+ if (unlikely(atomic_inc_return(&sem->count) <= 0))
__up(sem);
}