summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2002-07-21 02:11:12 -0700
committerLinus Torvalds <torvalds@home.transmeta.com>2002-07-21 02:11:12 -0700
commitae86a80aed1e269d435c70f6e85deb80e8f8be98 (patch)
treec0c5b816da7b3a3102f159c335745ae9b01883c1 /include/linux
parent3d37e1e6171f8cbd81e442524d4dd231b8cbf5d1 (diff)
[PATCH] "big IRQ lock" removal, IRQ cleanups
This is a massive cleanup of the IRQ subsystem. It's losely based on Linus' original idea and DaveM's original implementation, to fold our various irq, softirq and bh counters into the preemption counter. with this approach it was possible: - to remove the 'big IRQ lock' on SMP - on which sti() and cli() relied. - to streamline/simplify arch/i386/kernel/irq.c significantly. - to simplify the softirq code. - to remove the preemption count increase/decrease code from the lowlevel IRQ assembly code. - to speed up schedule() a bit. Global sti() and cli() is gone forever on SMP, there is no more globally synchronizing irq-disabling capability. All code that relied on sti() and cli() and restore_flags() must use other locking mechanisms from now on (spinlocks and __cli()/__sti()). obviously this patch breaks massive amounts of code, so only limited .configs are working at the moment (UP is expected to be unaffected, but SMP will require various driver updates). The patch was developed and tested on SMP systems, and while the code is still a bit rough in places, the base IRQ code appears to be pretty robust and clean. while it boots already so the worst is over, there is lots of work left: eg. to fix the serial layer to not use cli()/sti() and bhs ...
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/irq_cpustat.h2
-rw-r--r--include/linux/preempt.h46
-rw-r--r--include/linux/smp_lock.h2
-rw-r--r--include/linux/spinlock.h37
4 files changed, 48 insertions, 39 deletions
diff --git a/include/linux/irq_cpustat.h b/include/linux/irq_cpustat.h
index dfd73c5ec60d..6eab29be1d61 100644
--- a/include/linux/irq_cpustat.h
+++ b/include/linux/irq_cpustat.h
@@ -29,8 +29,6 @@ extern irq_cpustat_t irq_stat[]; /* defined in asm/hardirq.h */
/* arch independent irq_stat fields */
#define softirq_pending(cpu) __IRQ_STAT((cpu), __softirq_pending)
-#define local_irq_count(cpu) __IRQ_STAT((cpu), __local_irq_count)
-#define local_bh_count(cpu) __IRQ_STAT((cpu), __local_bh_count)
#define syscall_count(cpu) __IRQ_STAT((cpu), __syscall_count)
#define ksoftirqd_task(cpu) __IRQ_STAT((cpu), __ksoftirqd_task)
/* arch dependent irq_stat fields */
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
new file mode 100644
index 000000000000..172471f0dbde
--- /dev/null
+++ b/include/linux/preempt.h
@@ -0,0 +1,46 @@
+#ifndef __LINUX_PREEMPT_H
+#define __LINUX_PREEMPT_H
+
+#include <linux/config.h>
+
+#define preempt_count() (current_thread_info()->preempt_count)
+
+#ifdef CONFIG_PREEMPT
+
+extern void preempt_schedule(void);
+
+#define preempt_disable() \
+do { \
+ preempt_count()++; \
+ barrier(); \
+} while (0)
+
+#define preempt_enable_no_resched() \
+do { \
+ preempt_count()--; \
+ barrier(); \
+} while (0)
+
+#define preempt_enable() \
+do { \
+ preempt_enable_no_resched(); \
+ if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
+ preempt_schedule(); \
+} while (0)
+
+#define preempt_check_resched() \
+do { \
+ if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
+ preempt_schedule(); \
+} while (0)
+
+#else
+
+#define preempt_disable() do { } while (0)
+#define preempt_enable_no_resched() do {} while(0)
+#define preempt_enable() do { } while (0)
+#define preempt_check_resched() do { } while (0)
+
+#endif
+
+#endif /* __LINUX_PREEMPT_H */
diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h
index 13d8c7ace0bb..cfb23f363e61 100644
--- a/include/linux/smp_lock.h
+++ b/include/linux/smp_lock.h
@@ -7,7 +7,7 @@
#define lock_kernel() do { } while(0)
#define unlock_kernel() do { } while(0)
-#define release_kernel_lock(task, cpu) do { } while(0)
+#define release_kernel_lock(task) do { } while(0)
#define reacquire_kernel_lock(task) do { } while(0)
#define kernel_locked() 1
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 194541968c6a..d9f4af4103e4 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -2,6 +2,7 @@
#define __LINUX_SPINLOCK_H
#include <linux/config.h>
+#include <linux/preempt.h>
#include <linux/linkage.h>
#include <linux/compiler.h>
#include <linux/thread_info.h>
@@ -120,36 +121,6 @@
#ifdef CONFIG_PREEMPT
-asmlinkage void preempt_schedule(void);
-
-#define preempt_get_count() (current_thread_info()->preempt_count)
-
-#define preempt_disable() \
-do { \
- ++current_thread_info()->preempt_count; \
- barrier(); \
-} while (0)
-
-#define preempt_enable_no_resched() \
-do { \
- --current_thread_info()->preempt_count; \
- barrier(); \
-} while (0)
-
-#define preempt_enable() \
-do { \
- --current_thread_info()->preempt_count; \
- barrier(); \
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
- preempt_schedule(); \
-} while (0)
-
-#define preempt_check_resched() \
-do { \
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
- preempt_schedule(); \
-} while (0)
-
#define spin_lock(lock) \
do { \
preempt_disable(); \
@@ -179,12 +150,6 @@ do { \
#else
-#define preempt_get_count() (0)
-#define preempt_disable() do { } while (0)
-#define preempt_enable_no_resched() do {} while(0)
-#define preempt_enable() do { } while (0)
-#define preempt_check_resched() do { } while (0)
-
#define spin_lock(lock) _raw_spin_lock(lock)
#define spin_trylock(lock) _raw_spin_trylock(lock)
#define spin_unlock(lock) _raw_spin_unlock(lock)