summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2002-07-21 02:11:12 -0700
committerLinus Torvalds <torvalds@home.transmeta.com>2002-07-21 02:11:12 -0700
commitae86a80aed1e269d435c70f6e85deb80e8f8be98 (patch)
treec0c5b816da7b3a3102f159c335745ae9b01883c1 /include
parent3d37e1e6171f8cbd81e442524d4dd231b8cbf5d1 (diff)
[PATCH] "big IRQ lock" removal, IRQ cleanups
This is a massive cleanup of the IRQ subsystem. It's losely based on Linus' original idea and DaveM's original implementation, to fold our various irq, softirq and bh counters into the preemption counter. with this approach it was possible: - to remove the 'big IRQ lock' on SMP - on which sti() and cli() relied. - to streamline/simplify arch/i386/kernel/irq.c significantly. - to simplify the softirq code. - to remove the preemption count increase/decrease code from the lowlevel IRQ assembly code. - to speed up schedule() a bit. Global sti() and cli() is gone forever on SMP, there is no more globally synchronizing irq-disabling capability. All code that relied on sti() and cli() and restore_flags() must use other locking mechanisms from now on (spinlocks and __cli()/__sti()). obviously this patch breaks massive amounts of code, so only limited .configs are working at the moment (UP is expected to be unaffected, but SMP will require various driver updates). The patch was developed and tested on SMP systems, and while the code is still a bit rough in places, the base IRQ code appears to be pretty robust and clean. while it boots already so the worst is over, there is lots of work left: eg. to fix the serial layer to not use cli()/sti() and bhs ...
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/smplock.h3
-rw-r--r--include/asm-i386/hardirq.h76
-rw-r--r--include/asm-i386/smplock.h14
-rw-r--r--include/asm-i386/softirq.h55
-rw-r--r--include/asm-i386/system.h26
-rw-r--r--include/linux/irq_cpustat.h2
-rw-r--r--include/linux/preempt.h46
-rw-r--r--include/linux/smp_lock.h2
-rw-r--r--include/linux/spinlock.h37
9 files changed, 88 insertions, 173 deletions
diff --git a/include/asm-generic/smplock.h b/include/asm-generic/smplock.h
index 96565069c988..d77431f2cb25 100644
--- a/include/asm-generic/smplock.h
+++ b/include/asm-generic/smplock.h
@@ -13,11 +13,10 @@ extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
-#define release_kernel_lock(task, cpu) \
+#define release_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
- release_irqlock(cpu); \
__sti(); \
} while (0)
diff --git a/include/asm-i386/hardirq.h b/include/asm-i386/hardirq.h
index ed73b0444f6b..cbd78d1dcaa1 100644
--- a/include/asm-i386/hardirq.h
+++ b/include/asm-i386/hardirq.h
@@ -8,8 +8,6 @@
/* assembly code in softirq.h is sensitive to the offsets of these fields */
typedef struct {
unsigned int __softirq_pending;
- unsigned int __local_irq_count;
- unsigned int __local_bh_count;
unsigned int __syscall_count;
struct task_struct * __ksoftirqd_task; /* waitqueue is too large */
unsigned long idle_timestamp;
@@ -18,77 +16,27 @@ typedef struct {
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+#define IRQ_OFFSET 64
+
/*
* Are we in an interrupt context? Either doing bottom half
* or hardware interrupt processing?
*/
-#define in_interrupt() ({ int __cpu = smp_processor_id(); \
- (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
-
-#define in_irq() (local_irq_count(smp_processor_id()) != 0)
-
-#ifndef CONFIG_SMP
-
-#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
-#define hardirq_endlock(cpu) do { } while (0)
+#define in_interrupt() \
+ ((preempt_count() & ~PREEMPT_ACTIVE) >= IRQ_OFFSET)
-#define irq_enter(cpu, irq) (local_irq_count(cpu)++)
-#define irq_exit(cpu, irq) (local_irq_count(cpu)--)
+#define in_irq in_interrupt
-#define synchronize_irq() barrier()
+#define hardirq_trylock() (!in_interrupt())
+#define hardirq_endlock() do { } while (0)
-#define release_irqlock(cpu) do { } while (0)
+#define irq_enter() (preempt_count() += IRQ_OFFSET)
+#define irq_exit() (preempt_count() -= IRQ_OFFSET)
+#ifndef CONFIG_SMP
+# define synchronize_irq(irq) barrier()
#else
-
-#include <asm/atomic.h>
-#include <asm/smp.h>
-
-extern unsigned char global_irq_holder;
-extern unsigned volatile long global_irq_lock; /* long for set_bit -RR */
-
-static inline int irqs_running (void)
-{
- int i;
-
- for (i = 0; i < NR_CPUS; i++)
- if (local_irq_count(i))
- return 1;
- return 0;
-}
-
-static inline void release_irqlock(int cpu)
-{
- /* if we didn't own the irq lock, just ignore.. */
- if (global_irq_holder == (unsigned char) cpu) {
- global_irq_holder = NO_PROC_ID;
- clear_bit(0,&global_irq_lock);
- }
-}
-
-static inline void irq_enter(int cpu, int irq)
-{
- ++local_irq_count(cpu);
-
- while (test_bit(0,&global_irq_lock)) {
- cpu_relax();
- }
-}
-
-static inline void irq_exit(int cpu, int irq)
-{
- --local_irq_count(cpu);
-}
-
-static inline int hardirq_trylock(int cpu)
-{
- return !local_irq_count(cpu) && !test_bit(0,&global_irq_lock);
-}
-
-#define hardirq_endlock(cpu) do { } while (0)
-
-extern void synchronize_irq(void);
-
+ extern void synchronize_irq(unsigned int irq);
#endif /* CONFIG_SMP */
#endif /* __ASM_HARDIRQ_H */
diff --git a/include/asm-i386/smplock.h b/include/asm-i386/smplock.h
index 888a7b82f103..8bee3fd434c0 100644
--- a/include/asm-i386/smplock.h
+++ b/include/asm-i386/smplock.h
@@ -12,15 +12,9 @@ extern spinlock_t kernel_flag;
#ifdef CONFIG_SMP
#define kernel_locked() spin_is_locked(&kernel_flag)
-#define check_irq_holder(cpu) \
-do { \
- if (global_irq_holder == (cpu)) \
- BUG(); \
-} while(0)
#else
#ifdef CONFIG_PREEMPT
-#define kernel_locked() preempt_get_count()
-#define check_irq_holder(cpu) do { } while(0)
+#define kernel_locked() preempt_count()
#else
#define kernel_locked() 1
#endif
@@ -29,12 +23,10 @@ do { \
/*
* Release global kernel lock and global interrupt lock
*/
-#define release_kernel_lock(task, cpu) \
+#define release_kernel_lock(task) \
do { \
- if (unlikely(task->lock_depth >= 0)) { \
+ if (unlikely(task->lock_depth >= 0)) \
spin_unlock(&kernel_flag); \
- check_irq_holder(cpu); \
- } \
} while (0)
/*
diff --git a/include/asm-i386/softirq.h b/include/asm-i386/softirq.h
index c62cbece6ce7..c28019d821af 100644
--- a/include/asm-i386/softirq.h
+++ b/include/asm-i386/softirq.h
@@ -1,50 +1,27 @@
#ifndef __ASM_SOFTIRQ_H
#define __ASM_SOFTIRQ_H
-#include <asm/atomic.h>
+#include <linux/preempt.h>
#include <asm/hardirq.h>
-#define __cpu_bh_enable(cpu) \
- do { barrier(); local_bh_count(cpu)--; preempt_enable(); } while (0)
-#define cpu_bh_disable(cpu) \
- do { preempt_disable(); local_bh_count(cpu)++; barrier(); } while (0)
+#define local_bh_disable() \
+ do { preempt_count() += IRQ_OFFSET; barrier(); } while (0)
+#define __local_bh_enable() \
+ do { barrier(); preempt_count() -= IRQ_OFFSET; } while (0)
-#define local_bh_disable() cpu_bh_disable(smp_processor_id())
-#define __local_bh_enable() __cpu_bh_enable(smp_processor_id())
-
-#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
-
-/*
- * NOTE: this assembly code assumes:
- *
- * (char *)&local_bh_count - 8 == (char *)&softirq_pending
- *
- * If you change the offsets in irq_stat then you have to
- * update this code as well.
- */
-#define _local_bh_enable() \
+#define local_bh_enable() \
do { \
- unsigned int *ptr = &local_bh_count(smp_processor_id()); \
- \
- barrier(); \
- if (!--*ptr) \
- __asm__ __volatile__ ( \
- "cmpl $0, -8(%0);" \
- "jnz 2f;" \
- "1:;" \
- \
- LOCK_SECTION_START("") \
- "2: pushl %%eax; pushl %%ecx; pushl %%edx;" \
- "call %c1;" \
- "popl %%edx; popl %%ecx; popl %%eax;" \
- "jmp 1b;" \
- LOCK_SECTION_END \
- \
- : /* no output */ \
- : "r" (ptr), "i" (do_softirq) \
- /* no registers clobbered */ ); \
+ if (unlikely((preempt_count() == IRQ_OFFSET) && \
+ softirq_pending(smp_processor_id()))) { \
+ __local_bh_enable(); \
+ do_softirq(); \
+ preempt_check_resched(); \
+ } else { \
+ __local_bh_enable(); \
+ preempt_check_resched(); \
+ } \
} while (0)
-#define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while (0)
+#define in_softirq() in_interrupt()
#endif /* __ASM_SOFTIRQ_H */
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 851f090e4394..7d9fa1282e26 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -324,24 +324,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#define local_irq_disable() __cli()
#define local_irq_enable() __sti()
-#ifdef CONFIG_SMP
-
-extern void __global_cli(void);
-extern void __global_sti(void);
-extern unsigned long __global_save_flags(void);
-extern void __global_restore_flags(unsigned long);
-#define cli() __global_cli()
-#define sti() __global_sti()
-#define save_flags(x) ((x)=__global_save_flags())
-#define restore_flags(x) __global_restore_flags(x)
-
-#else
-
-#define cli() __cli()
-#define sti() __sti()
-#define save_flags(x) __save_flags(x)
-#define restore_flags(x) __restore_flags(x)
-
+/*
+ * Compatibility macros - they will be removed after some time.
+ */
+#if !CONFIG_SMP
+# define sti() __sti()
+# define cli() __cli()
+# define save_flags(flags) __save_flags(flags)
+# define restore_flags(flags) __restore_flags(flags)
#endif
/*
diff --git a/include/linux/irq_cpustat.h b/include/linux/irq_cpustat.h
index dfd73c5ec60d..6eab29be1d61 100644
--- a/include/linux/irq_cpustat.h
+++ b/include/linux/irq_cpustat.h
@@ -29,8 +29,6 @@ extern irq_cpustat_t irq_stat[]; /* defined in asm/hardirq.h */
/* arch independent irq_stat fields */
#define softirq_pending(cpu) __IRQ_STAT((cpu), __softirq_pending)
-#define local_irq_count(cpu) __IRQ_STAT((cpu), __local_irq_count)
-#define local_bh_count(cpu) __IRQ_STAT((cpu), __local_bh_count)
#define syscall_count(cpu) __IRQ_STAT((cpu), __syscall_count)
#define ksoftirqd_task(cpu) __IRQ_STAT((cpu), __ksoftirqd_task)
/* arch dependent irq_stat fields */
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
new file mode 100644
index 000000000000..172471f0dbde
--- /dev/null
+++ b/include/linux/preempt.h
@@ -0,0 +1,46 @@
+#ifndef __LINUX_PREEMPT_H
+#define __LINUX_PREEMPT_H
+
+#include <linux/config.h>
+
+#define preempt_count() (current_thread_info()->preempt_count)
+
+#ifdef CONFIG_PREEMPT
+
+extern void preempt_schedule(void);
+
+#define preempt_disable() \
+do { \
+ preempt_count()++; \
+ barrier(); \
+} while (0)
+
+#define preempt_enable_no_resched() \
+do { \
+ preempt_count()--; \
+ barrier(); \
+} while (0)
+
+#define preempt_enable() \
+do { \
+ preempt_enable_no_resched(); \
+ if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
+ preempt_schedule(); \
+} while (0)
+
+#define preempt_check_resched() \
+do { \
+ if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
+ preempt_schedule(); \
+} while (0)
+
+#else
+
+#define preempt_disable() do { } while (0)
+#define preempt_enable_no_resched() do {} while(0)
+#define preempt_enable() do { } while (0)
+#define preempt_check_resched() do { } while (0)
+
+#endif
+
+#endif /* __LINUX_PREEMPT_H */
diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h
index 13d8c7ace0bb..cfb23f363e61 100644
--- a/include/linux/smp_lock.h
+++ b/include/linux/smp_lock.h
@@ -7,7 +7,7 @@
#define lock_kernel() do { } while(0)
#define unlock_kernel() do { } while(0)
-#define release_kernel_lock(task, cpu) do { } while(0)
+#define release_kernel_lock(task) do { } while(0)
#define reacquire_kernel_lock(task) do { } while(0)
#define kernel_locked() 1
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 194541968c6a..d9f4af4103e4 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -2,6 +2,7 @@
#define __LINUX_SPINLOCK_H
#include <linux/config.h>
+#include <linux/preempt.h>
#include <linux/linkage.h>
#include <linux/compiler.h>
#include <linux/thread_info.h>
@@ -120,36 +121,6 @@
#ifdef CONFIG_PREEMPT
-asmlinkage void preempt_schedule(void);
-
-#define preempt_get_count() (current_thread_info()->preempt_count)
-
-#define preempt_disable() \
-do { \
- ++current_thread_info()->preempt_count; \
- barrier(); \
-} while (0)
-
-#define preempt_enable_no_resched() \
-do { \
- --current_thread_info()->preempt_count; \
- barrier(); \
-} while (0)
-
-#define preempt_enable() \
-do { \
- --current_thread_info()->preempt_count; \
- barrier(); \
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
- preempt_schedule(); \
-} while (0)
-
-#define preempt_check_resched() \
-do { \
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
- preempt_schedule(); \
-} while (0)
-
#define spin_lock(lock) \
do { \
preempt_disable(); \
@@ -179,12 +150,6 @@ do { \
#else
-#define preempt_get_count() (0)
-#define preempt_disable() do { } while (0)
-#define preempt_enable_no_resched() do {} while(0)
-#define preempt_enable() do { } while (0)
-#define preempt_check_resched() do { } while (0)
-
#define spin_lock(lock) _raw_spin_lock(lock)
#define spin_trylock(lock) _raw_spin_trylock(lock)
#define spin_unlock(lock) _raw_spin_unlock(lock)