diff options
| author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2004-10-25 01:21:01 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2004-10-25 01:21:01 -0700 |
| commit | 59a7718e80cd84ffc338cda7e5531d530e3d0749 (patch) | |
| tree | fdb8297060313d509359f72f8593adc21b661358 /include/linux | |
| parent | 6f60f5cfdd2a51b82479d8285ee2344e74ccb098 (diff) | |
Allow BKL re-acquire to fail, causing us to re-schedule.
This allows for low-latency BKL contention even with
preemption. Previously, since preemption is disabled
over context switches, re-acquiring the kernel lock when
resuming a process would be non-preemtible.
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/smp_lock.h | 24 |
1 files changed, 19 insertions, 5 deletions
diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h index 99ea247059c1..17ff18b82cd9 100644 --- a/include/linux/smp_lock.h +++ b/include/linux/smp_lock.h @@ -9,7 +9,7 @@ #define kernel_locked() (current->lock_depth >= 0) -extern void __lockfunc get_kernel_lock(void); +extern int __lockfunc get_kernel_lock(void); extern void __lockfunc put_kernel_lock(void); /* @@ -20,10 +20,24 @@ extern void __lockfunc put_kernel_lock(void); put_kernel_lock(); \ } while (0) -#define reacquire_kernel_lock(tsk) do { \ - if (unlikely((tsk)->lock_depth >= 0)) \ - get_kernel_lock(); \ -} while (0) +/* + * Non-SMP kernels will never block on the kernel lock, + * so we are better off returning a constant zero from + * reacquire_kernel_lock() so that the compiler can see + * it at compile-time. + */ +#ifdef CONFIG_SMP +#define return_value_on_smp return +#else +#define return_value_on_smp +#endif + +static inline int reacquire_kernel_lock(struct task_struct *task) +{ + if (unlikely(task->lock_depth >= 0)) + return_value_on_smp get_kernel_lock(); + return 0; +} extern void __lockfunc lock_kernel(void) __acquires(kernel_lock); extern void __lockfunc unlock_kernel(void) __releases(kernel_lock); |
