summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorAndrew Morton <akpm@digeo.com>2003-01-10 18:40:13 -0800
committerRichard Henderson <rth@are.twiddle.net>2003-01-10 18:40:13 -0800
commit2faf433864abcc7337ff731ea04d5a6fc98d8247 (patch)
tree4b3da1243c69ddcfe5866303f5adcfd5fa2fea7d /include
parentb4adddd61936b6500ad089fb740c1164b23e27a1 (diff)
[PATCH] Fix an SMP+preempt latency problem
Here is spin_lock(): #define spin_lock(lock) \ do { \ preempt_disable(); \ _raw_spin_lock(lock); \ } while(0) Here is the scenario: CPU0: spin_lock(some_lock); do_very_long_thing(); /* This has cond_resched()s in it */ CPU1: spin_lock(some_lock); Now suppose that the scheduler tries to schedule a task on CPU1. Nothing happens, because CPU1 is spinning on the lock with preemption disabled. CPU0 will happliy hold the lock for a long time because nobody has set need_resched() against CPU0. This problem can cause scheduling latencies of many tens of milliseconds on SMP on kernels which handle UP quite happily. This patch fixes the problem by changing the spin_lock() and write_lock() contended slowpath to spin on the lock by hand, while polling for preemption requests. I would have done read_lock() too, but we don't seem to have read_trylock() primitives. The patch also shrinks the kernel by 30k due to not having separate out-of-line spinning code for each spin_lock() callsite.
Diffstat (limited to 'include')
-rw-r--r--include/linux/spinlock.h56
1 files changed, 39 insertions, 17 deletions
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 0fe9a764520c..403033961628 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -85,31 +85,37 @@
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
* methods are defined as nops in the case they are not required.
*/
-#define spin_lock(lock) \
-do { \
- preempt_disable(); \
- _raw_spin_lock(lock); \
-} while(0)
-
#define spin_trylock(lock) ({preempt_disable(); _raw_spin_trylock(lock) ? \
1 : ({preempt_enable(); 0;});})
-#define spin_unlock(lock) \
+#define write_trylock(lock) ({preempt_disable();_raw_write_trylock(lock) ? \
+ 1 : ({preempt_enable(); 0;});})
+
+/* Where's read_trylock? */
+
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
+void __preempt_spin_lock(spinlock_t *lock);
+void __preempt_write_lock(rwlock_t *lock);
+
+#define spin_lock(lock) \
do { \
- _raw_spin_unlock(lock); \
- preempt_enable(); \
+ preempt_disable(); \
+ if (unlikely(!_raw_spin_trylock(lock))) \
+ __preempt_spin_lock(lock); \
} while (0)
-#define read_lock(lock) \
+#define write_lock(lock) \
do { \
preempt_disable(); \
- _raw_read_lock(lock); \
-} while(0)
+ if (unlikely(!_raw_write_trylock(lock))) \
+ __preempt_write_lock(lock); \
+} while (0)
-#define read_unlock(lock) \
+#else
+#define spin_lock(lock) \
do { \
- _raw_read_unlock(lock); \
- preempt_enable(); \
+ preempt_disable(); \
+ _raw_spin_lock(lock); \
} while(0)
#define write_lock(lock) \
@@ -117,6 +123,19 @@ do { \
preempt_disable(); \
_raw_write_lock(lock); \
} while(0)
+#endif
+
+#define read_lock(lock) \
+do { \
+ preempt_disable(); \
+ _raw_read_lock(lock); \
+} while(0)
+
+#define spin_unlock(lock) \
+do { \
+ _raw_spin_unlock(lock); \
+ preempt_enable(); \
+} while (0)
#define write_unlock(lock) \
do { \
@@ -124,8 +143,11 @@ do { \
preempt_enable(); \
} while(0)
-#define write_trylock(lock) ({preempt_disable();_raw_write_trylock(lock) ? \
- 1 : ({preempt_enable(); 0;});})
+#define read_unlock(lock) \
+do { \
+ _raw_read_unlock(lock); \
+ preempt_enable(); \
+} while(0)
#define spin_lock_irqsave(lock, flags) \
do { \