summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorAndrew Morton <akpm@digeo.com>2003-02-02 06:07:45 -0800
committerLinus Torvalds <torvalds@home.transmeta.com>2003-02-02 06:07:45 -0800
commit157697ebe3717ac92da440e40faa8f2dc82c2aac (patch)
treefb25cd0d962506f2d6eb8fe14ae2fa6e000927c8 /kernel
parentdf38988ca7f60177ffa147e009e7d80bdeb9ad48 (diff)
[PATCH] preempt spinlock efficiency fix
Patch from: jak@rudolph.ccur.com (Joe Korty) The new, preemptable spin_lock() spins on an atomic bus-locking read/write instead of an ordinary read, as the original spin_lock implementation did. Perhaps that is the source of the inefficiency being seen. Attached sample code compiles but is untested and incomplete (present only to illustrate the idea).
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c29
1 files changed, 12 insertions, 17 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index c7fc43e34f43..973799334ebe 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2465,15 +2465,12 @@ void __preempt_spin_lock(spinlock_t *lock)
_raw_spin_lock(lock);
return;
}
-
- while (!_raw_spin_trylock(lock)) {
- if (need_resched()) {
- preempt_enable_no_resched();
- __cond_resched();
- preempt_disable();
- }
- cpu_relax();
- }
+ do {
+ preempt_enable();
+ while (spin_is_locked(lock))
+ cpu_relax();
+ preempt_disable();
+ } while (!_raw_spin_trylock(lock));
}
void __preempt_write_lock(rwlock_t *lock)
@@ -2483,13 +2480,11 @@ void __preempt_write_lock(rwlock_t *lock)
return;
}
- while (!_raw_write_trylock(lock)) {
- if (need_resched()) {
- preempt_enable_no_resched();
- __cond_resched();
- preempt_disable();
- }
- cpu_relax();
- }
+ do {
+ preempt_enable();
+ while (rwlock_is_locked(lock))
+ cpu_relax();
+ preempt_disable();
+ } while (!_raw_write_trylock(lock));
}
#endif