summaryrefslogtreecommitdiff
path: root/lib/kernel_lock.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2004-10-24 02:24:27 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-10-24 02:24:27 -0700
commit389fb07b0a3ed522318b22e12054d0212990f5c6 (patch)
tree6dd9a109255ae17a7142345d4a7579b80da608ea /lib/kernel_lock.c
parent6ae629404093b6d8c9c085d0677840018904d026 (diff)
Un-inline the big kernel lock.
Now that spinlocks are uninlined, it is silly to keep the BKL inlined. And this should make it a lot easier for people to play around with variations on the locking (ie Ingo's semaphores etc).
Diffstat (limited to 'lib/kernel_lock.c')
-rw-r--r--lib/kernel_lock.c102
1 files changed, 102 insertions, 0 deletions
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
new file mode 100644
index 000000000000..59fcbf3de282
--- /dev/null
+++ b/lib/kernel_lock.c
@@ -0,0 +1,102 @@
+/*
+ * lib/kernel_lock.c
+ *
+ * This is the traditional BKL - big kernel lock. Largely
+ * relegated to obsolescense, but used by various less
+ * important (or lazy) subsystems.
+ */
+#include <linux/smp_lock.h>
+#include <linux/module.h>
+
+/*
+ * The 'big kernel lock'
+ *
+ * This spinlock is taken and released recursively by lock_kernel()
+ * and unlock_kernel(). It is transparently dropped and reaquired
+ * over schedule(). It is used to protect legacy code that hasn't
+ * been migrated to a proper locking design yet.
+ *
+ * Don't use in new code.
+ */
+static spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
+
+
+/*
+ * Acquire/release the underlying lock from the scheduler.
+ *
+ * The scheduler release and re-acquire currently always happen
+ * with preemption disabled. Which is likely a bug in the acquire
+ * case...
+ *
+ * Regardless, we try to be polite about preemption. If SMP is
+ * not on (ie UP preemption), this all goes away because the
+ * _raw_spin_trylock() will always succeed.
+ */
+#ifdef CONFIG_PREEMPT
+inline void __lockfunc get_kernel_lock(void)
+{
+ preempt_disable();
+ if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
+ /*
+ * If preemption was disabled even before this
+ * was called, there's nothing we can be polite
+ * about - just spin.
+ */
+ if (preempt_count() > 1) {
+ _raw_spin_lock(&kernel_flag);
+ return;
+ }
+
+ /*
+ * Otherwise, let's wait for the kernel lock
+ * with preemption enabled..
+ */
+ do {
+ preempt_enable();
+ while (spin_is_locked(&kernel_flag))
+ cpu_relax();
+ preempt_disable();
+ } while (!_raw_spin_trylock(&kernel_flag));
+ }
+}
+
+#else
+
+/*
+ * Non-preemption case - just get the spinlock
+ */
+inline void __lockfunc get_kernel_lock(void)
+{
+ _raw_spin_lock(&kernel_flag);
+}
+#endif
+
+inline void __lockfunc put_kernel_lock(void)
+{
+ _raw_spin_unlock(&kernel_flag);
+ preempt_enable();
+}
+
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously, so we only need to
+ * worry about other CPU's.
+ */
+void __lockfunc lock_kernel(void)
+{
+ int depth = current->lock_depth+1;
+ if (likely(!depth))
+ get_kernel_lock();
+ current->lock_depth = depth;
+}
+
+void __lockfunc unlock_kernel(void)
+{
+ BUG_ON(current->lock_depth < 0);
+ if (likely(--current->lock_depth < 0))
+ put_kernel_lock();
+}
+
+EXPORT_SYMBOL(lock_kernel);
+EXPORT_SYMBOL(unlock_kernel);