summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2002-05-02 13:01:43 +1000
committerAnton Blanchard <anton@samba.org>2002-05-02 13:01:43 +1000
commitde8aec0426f98611dff9d3d557785ac2873653e2 (patch)
tree1602867519d82cd45941ad24fe1bc53712f062f7 /include
parente8812777bc1b1f24890e24519000876f888543ec (diff)
ppc64: Only implement thread priority macros on HMT or iSeries kernels
Drop back to eieio in spinlocks for the moment due to performance issues of sync on power3
Diffstat (limited to 'include')
-rw-r--r--include/asm-ppc64/memory.h20
-rw-r--r--include/asm-ppc64/processor.h5
-rw-r--r--include/asm-ppc64/spinlock.h39
3 files changed, 41 insertions, 23 deletions
diff --git a/include/asm-ppc64/memory.h b/include/asm-ppc64/memory.h
index 25d0fb6f6a90..60ece9cc43b8 100644
--- a/include/asm-ppc64/memory.h
+++ b/include/asm-ppc64/memory.h
@@ -41,4 +41,24 @@ static inline void isync(void)
#define isync_on_smp() __asm__ __volatile__("": : :"memory")
#endif
+/* Macros for adjusting thread priority (hardware multi-threading) */
+
+#if defined(CONFIG_PPC_ISERIES) || defined(CONFIG_HMT)
+#define HMT_low() asm volatile("or 1,1,1 # low priority")
+#define HMT_medium() asm volatile("or 2,2,2 # medium priority")
+#define HMT_high() asm volatile("or 3,3,3 # high priority")
+
+#define HMT_LOW "\tor 1,1,1 # low priority\n"
+#define HMT_MEDIUM "\tor 2,2,2 # medium priority\n"
+#define HMT_MEDIUM "\tor 3,3,3 # high priority\n"
+#else
+#define HMT_low() do { } while(0)
+#define HMT_medium() do { } while(0)
+#define HMT_high() do { } while(0)
+
+#define HMT_LOW
+#define HMT_MEDIUM
+#define HMT_LOW
+#endif
+
#endif
diff --git a/include/asm-ppc64/processor.h b/include/asm-ppc64/processor.h
index cdcee424cc79..0c1ce4752259 100644
--- a/include/asm-ppc64/processor.h
+++ b/include/asm-ppc64/processor.h
@@ -589,11 +589,6 @@ GLUE(GLUE(.LT,NAME),_procname_end):
#define CTRLF 0x088
#define RUNLATCH 0x0001
-/* Macros for adjusting thread priority (hardware multi-threading) */
-#define HMT_low() asm volatile("or 1,1,1")
-#define HMT_medium() asm volatile("or 2,2,2")
-#define HMT_high() asm volatile("or 3,3,3")
-
/* Size of an exception stack frame contained in the paca. */
#define EXC_FRAME_SIZE 64
diff --git a/include/asm-ppc64/spinlock.h b/include/asm-ppc64/spinlock.h
index 1f9827870db0..4043e6585773 100644
--- a/include/asm-ppc64/spinlock.h
+++ b/include/asm-ppc64/spinlock.h
@@ -49,12 +49,13 @@ static __inline__ void _raw_spin_lock(spinlock_t *lock)
__asm__ __volatile__(
"b 2f # spin_lock\n\
-1: or 1,1,1 # spin at low priority\n\
- lwzx %0,0,%1\n\
+1:"
+ HMT_LOW
+" lwzx %0,0,%1\n\
cmpwi 0,%0,0\n\
- bne+ 1b\n\
- or 2,2,2 # back to medium priority\n\
-2: lwarx %0,0,%1\n\
+ bne+ 1b\n"
+ HMT_MEDIUM
+"2: lwarx %0,0,%1\n\
cmpwi 0,%0,0\n\
bne- 1b\n\
stwcx. %2,0,%1\n\
@@ -67,7 +68,7 @@ static __inline__ void _raw_spin_lock(spinlock_t *lock)
static __inline__ void _raw_spin_unlock(spinlock_t *lock)
{
- __asm__ __volatile__("lwsync # spin_unlock": : :"memory");
+ __asm__ __volatile__("eieio # spin_unlock": : :"memory");
lock->lock = 0;
}
@@ -115,12 +116,13 @@ static __inline__ void _raw_read_lock(rwlock_t *rw)
__asm__ __volatile__(
"b 2f # read_lock\n\
-1: or 1,1,1 # spin at low priority\n\
- lwax %0,0,%1\n\
+1:"
+ HMT_LOW
+" lwax %0,0,%1\n\
cmpwi 0,%0,0\n\
- blt+ 1b\n\
- or 2,2,2 # back to medium priority\n\
-2: lwarx %0,0,%1\n\
+ blt+ 1b\n"
+ HMT_MEDIUM
+"2: lwarx %0,0,%1\n\
extsw %0,%0\n\
addic. %0,%0,1\n\
ble- 1b\n\
@@ -137,7 +139,7 @@ static __inline__ void _raw_read_unlock(rwlock_t *rw)
unsigned int tmp;
__asm__ __volatile__(
- "lwsync # read_unlock\n\
+ "eieio # read_unlock\n\
1: lwarx %0,0,%1\n\
addic %0,%0,-1\n\
stwcx. %0,0,%1\n\
@@ -174,12 +176,13 @@ static __inline__ void _raw_write_lock(rwlock_t *rw)
__asm__ __volatile__(
"b 2f # write_lock\n\
-1: or 1,1,1 # spin at low priority\n\
- lwax %0,0,%1\n\
+1:"
+ HMT_LOW
+ "lwax %0,0,%1\n\
cmpwi 0,%0,0\n\
- bne+ 1b\n\
- or 2,2,2 # back to medium priority\n\
-2: lwarx %0,0,%1\n\
+ bne+ 1b\n"
+ HMT_MEDIUM
+"2: lwarx %0,0,%1\n\
cmpwi 0,%0,0\n\
bne- 1b\n\
stwcx. %2,0,%1\n\
@@ -192,7 +195,7 @@ static __inline__ void _raw_write_lock(rwlock_t *rw)
static __inline__ void _raw_write_unlock(rwlock_t *rw)
{
- __asm__ __volatile__("lwsync # write_unlock": : :"memory");
+ __asm__ __volatile__("eieio # write_unlock": : :"memory");
rw->lock = 0;
}