From 59a7718e80cd84ffc338cda7e5531d530e3d0749 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 25 Oct 2004 01:21:01 -0700 Subject: Allow BKL re-acquire to fail, causing us to re-schedule. This allows for low-latency BKL contention even with preemption. Previously, since preemption is disabled over context switches, re-acquiring the kernel lock when resuming a process would be non-preemtible. --- include/linux/smp_lock.h | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h index 99ea247059c1..17ff18b82cd9 100644 --- a/include/linux/smp_lock.h +++ b/include/linux/smp_lock.h @@ -9,7 +9,7 @@ #define kernel_locked() (current->lock_depth >= 0) -extern void __lockfunc get_kernel_lock(void); +extern int __lockfunc get_kernel_lock(void); extern void __lockfunc put_kernel_lock(void); /* @@ -20,10 +20,24 @@ extern void __lockfunc put_kernel_lock(void); put_kernel_lock(); \ } while (0) -#define reacquire_kernel_lock(tsk) do { \ - if (unlikely((tsk)->lock_depth >= 0)) \ - get_kernel_lock(); \ -} while (0) +/* + * Non-SMP kernels will never block on the kernel lock, + * so we are better off returning a constant zero from + * reacquire_kernel_lock() so that the compiler can see + * it at compile-time. + */ +#ifdef CONFIG_SMP +#define return_value_on_smp return +#else +#define return_value_on_smp +#endif + +static inline int reacquire_kernel_lock(struct task_struct *task) +{ + if (unlikely(task->lock_depth >= 0)) + return_value_on_smp get_kernel_lock(); + return 0; +} extern void __lockfunc lock_kernel(void) __acquires(kernel_lock); extern void __lockfunc unlock_kernel(void) __releases(kernel_lock); -- cgit v1.2.3