diff options
| author | Ingo Molnar <mingo@elte.hu> | 2005-01-07 21:59:57 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@evo.osdl.org> | 2005-01-07 21:59:57 -0800 |
| commit | fb8f6499abc6a847109d9602b797aa6afd2d5a3d (patch) | |
| tree | 9b23f9dde8826bb5df266ce9be81c1d51c6e804a /include | |
| parent | 8a1a48b7cd80de98d4d07ee1e78311a88c738335 (diff) | |
[PATCH] remove the BKL by turning it into a semaphore
This is the current remove-BKL patch. I test-booted it on x86 and x64, trying
every conceivable combination of SMP, PREEMPT and PREEMPT_BKL. All other
architectures should compile as well. (most of the testing was done with the
zaphod patch undone but it applies cleanly on vanilla -mm3 as well and should
work fine.)
this is the debugging-enabled variant of the patch which has two main
debugging features:
- debug potentially illegal smp_processor_id() use. Has caught a number
of real bugs - e.g. look at the printk.c fix in the patch.
- make it possible to enable/disable the BKL via a .config. If this
goes upstream we dont want this of course, but for now it gives
people a chance to find out whether any particular problem was caused
by this patch.
This patch has one important fix over the previous BKL patch: on PREEMPT
kernels if we preempted BKL-using code then the code still auto-dropped the
BKL by mistake. This caused a number of breakages for testers, which
breakages went away once this bug was fixed.
Also the debugging mechanism has been improved alot relative to the previous
BKL patch.
Would be nice to test-drive this in -mm. There will likely be some more
smp_processor_id() false positives but they are 1) harmless 2) easy to fix up.
We could as well find more real smp_processor_id() related breakages as well.
The most noteworthy fact is that no BKL-using code was found yet that relied
on smp_processor_id(), which is promising from a compatibility POV.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
| -rw-r--r-- | include/asm-i386/smp.h | 2 | ||||
| -rw-r--r-- | include/asm-x86_64/smp.h | 2 | ||||
| -rw-r--r-- | include/linux/hardirq.h | 14 | ||||
| -rw-r--r-- | include/linux/interrupt.h | 4 | ||||
| -rw-r--r-- | include/linux/preempt.h | 19 | ||||
| -rw-r--r-- | include/linux/smp.h | 33 | ||||
| -rw-r--r-- | include/linux/smp_lock.h | 14 | ||||
| -rw-r--r-- | include/net/route.h | 2 | ||||
| -rw-r--r-- | include/net/snmp.h | 14 |
9 files changed, 69 insertions, 35 deletions
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h index ba7eccc97f19..a1e2a1de8237 100644 --- a/include/asm-i386/smp.h +++ b/include/asm-i386/smp.h @@ -50,7 +50,7 @@ extern u8 x86_cpu_to_apicid[]; * from the initial startup. We map APIC_BASE very early in page_setup(), * so this is correct in the x86 case. */ -#define smp_processor_id() (current_thread_info()->cpu) +#define __smp_processor_id() (current_thread_info()->cpu) extern cpumask_t cpu_callout_map; #define cpu_possible_map cpu_callout_map diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h index a0229ff0baa2..dd354e282a92 100644 --- a/include/asm-x86_64/smp.h +++ b/include/asm-x86_64/smp.h @@ -66,7 +66,7 @@ static inline int num_booting_cpus(void) return cpus_weight(cpu_callout_map); } -#define smp_processor_id() read_pda(cpunumber) +#define __smp_processor_id() read_pda(cpunumber) extern __inline int hard_smp_processor_id(void) { diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 833216955f4a..ba0fcb34c8cd 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -61,12 +61,16 @@ #define in_softirq() (softirq_count()) #define in_interrupt() (irq_count()) -#ifdef CONFIG_PREEMPT +#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL) # define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked()) +#else +# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0) +#endif + +#ifdef CONFIG_PREEMPT # define preemptible() (preempt_count() == 0 && !irqs_disabled()) # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) #else -# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0) # define preemptible() 0 # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET #endif @@ -77,10 +81,10 @@ extern void synchronize_irq(unsigned int irq); # define synchronize_irq(irq) barrier() #endif -#define nmi_enter() (preempt_count() += HARDIRQ_OFFSET) -#define nmi_exit() (preempt_count() -= HARDIRQ_OFFSET) +#define nmi_enter() irq_enter() +#define nmi_exit() sub_preempt_count(HARDIRQ_OFFSET) -#define irq_enter() (preempt_count() += HARDIRQ_OFFSET) +#define irq_enter() add_preempt_count(HARDIRQ_OFFSET) extern void irq_exit(void); #endif /* LINUX_HARDIRQ_H */ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 6ef18a885b96..991831cff1da 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -70,9 +70,9 @@ extern void enable_irq(unsigned int irq); /* SoftIRQ primitives. */ #define local_bh_disable() \ - do { preempt_count() += SOFTIRQ_OFFSET; barrier(); } while (0) + do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0) #define __local_bh_enable() \ - do { barrier(); preempt_count() -= SOFTIRQ_OFFSET; } while (0) + do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0) extern void local_bh_enable(void); diff --git a/include/linux/preempt.h b/include/linux/preempt.h index a7ad90136d64..dd98c54a23b4 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -9,17 +9,18 @@ #include <linux/config.h> #include <linux/linkage.h> -#define preempt_count() (current_thread_info()->preempt_count) +#ifdef CONFIG_DEBUG_PREEMPT + extern void fastcall add_preempt_count(int val); + extern void fastcall sub_preempt_count(int val); +#else +# define add_preempt_count(val) do { preempt_count() += (val); } while (0) +# define sub_preempt_count(val) do { preempt_count() -= (val); } while (0) +#endif -#define inc_preempt_count() \ -do { \ - preempt_count()++; \ -} while (0) +#define inc_preempt_count() add_preempt_count(1) +#define dec_preempt_count() sub_preempt_count(1) -#define dec_preempt_count() \ -do { \ - preempt_count()--; \ -} while (0) +#define preempt_count() (current_thread_info()->preempt_count) #ifdef CONFIG_PREEMPT diff --git a/include/linux/smp.h b/include/linux/smp.h index a4ca8abdbedb..c438ec9880e9 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -97,8 +97,10 @@ void smp_prepare_boot_cpu(void); /* * These macros fold the SMP functionality into a single CPU system */ - -#define smp_processor_id() 0 + +#if !defined(__smp_processor_id) || !defined(CONFIG_PREEMPT) +# define smp_processor_id() 0 +#endif #define hard_smp_processor_id() 0 #define smp_threads_ready 1 #define smp_call_function(func,info,retry,wait) ({ 0; }) @@ -109,6 +111,33 @@ static inline void smp_send_reschedule(int cpu) { } #endif /* !SMP */ +/* + * DEBUG_PREEMPT support: check whether smp_processor_id() is being + * used in a preemption-safe way. + * + * An architecture has to enable this debugging code explicitly. + * It can do so by renaming the smp_processor_id() macro to + * __smp_processor_id(). This should only be done after some minimal + * testing, because usually there are a number of false positives + * that an architecture will trigger. + * + * To fix a false positive (i.e. smp_processor_id() use that the + * debugging code reports but which use for some reason is legal), + * change the smp_processor_id() reference to _smp_processor_id(), + * which is the nondebug variant. NOTE: don't use this to hack around + * real bugs. + */ +#ifdef __smp_processor_id +# if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) + extern unsigned int smp_processor_id(void); +# else +# define smp_processor_id() __smp_processor_id() +# endif +# define _smp_processor_id() __smp_processor_id() +#else +# define _smp_processor_id() smp_processor_id() +#endif + #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) #define put_cpu() preempt_enable() #define put_cpu_no_resched() preempt_enable_no_resched() diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h index 8a142e0311a2..b63ce7014093 100644 --- a/include/linux/smp_lock.h +++ b/include/linux/smp_lock.h @@ -9,15 +9,15 @@ #define kernel_locked() (current->lock_depth >= 0) -extern int __lockfunc get_kernel_lock(void); -extern void __lockfunc put_kernel_lock(void); +extern int __lockfunc __reacquire_kernel_lock(void); +extern void __lockfunc __release_kernel_lock(void); /* * Release/re-acquire global kernel lock for the scheduler */ #define release_kernel_lock(tsk) do { \ if (unlikely((tsk)->lock_depth >= 0)) \ - put_kernel_lock(); \ + __release_kernel_lock(); \ } while (0) /* @@ -26,16 +26,16 @@ extern void __lockfunc put_kernel_lock(void); * reacquire_kernel_lock() so that the compiler can see * it at compile-time. */ -#ifdef CONFIG_SMP -#define return_value_on_smp return +#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_BKL) +# define return_value_on_smp return #else -#define return_value_on_smp +# define return_value_on_smp #endif static inline int reacquire_kernel_lock(struct task_struct *task) { if (unlikely(task->lock_depth >= 0)) - return_value_on_smp get_kernel_lock(); + return_value_on_smp __reacquire_kernel_lock(); return 0; } diff --git a/include/net/route.h b/include/net/route.h index 5e0100185d95..6228a91777dc 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -105,7 +105,7 @@ struct rt_cache_stat extern struct rt_cache_stat *rt_cache_stat; #define RT_CACHE_STAT_INC(field) \ - (per_cpu_ptr(rt_cache_stat, smp_processor_id())->field++) + (per_cpu_ptr(rt_cache_stat, _smp_processor_id())->field++) extern struct ip_rt_acct *ip_rt_acct; diff --git a/include/net/snmp.h b/include/net/snmp.h index b7068876e0eb..a15ab256276e 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h @@ -128,18 +128,18 @@ struct linux_mib { #define SNMP_STAT_USRPTR(name) (name[1]) #define SNMP_INC_STATS_BH(mib, field) \ - (per_cpu_ptr(mib[0], smp_processor_id())->mibs[field]++) + (per_cpu_ptr(mib[0], _smp_processor_id())->mibs[field]++) #define SNMP_INC_STATS_OFFSET_BH(mib, field, offset) \ - (per_cpu_ptr(mib[0], smp_processor_id())->mibs[field + (offset)]++) + (per_cpu_ptr(mib[0], _smp_processor_id())->mibs[field + (offset)]++) #define SNMP_INC_STATS_USER(mib, field) \ - (per_cpu_ptr(mib[1], smp_processor_id())->mibs[field]++) + (per_cpu_ptr(mib[1], _smp_processor_id())->mibs[field]++) #define SNMP_INC_STATS(mib, field) \ - (per_cpu_ptr(mib[!in_softirq()], smp_processor_id())->mibs[field]++) + (per_cpu_ptr(mib[!in_softirq()], _smp_processor_id())->mibs[field]++) #define SNMP_DEC_STATS(mib, field) \ - (per_cpu_ptr(mib[!in_softirq()], smp_processor_id())->mibs[field]--) + (per_cpu_ptr(mib[!in_softirq()], _smp_processor_id())->mibs[field]--) #define SNMP_ADD_STATS_BH(mib, field, addend) \ - (per_cpu_ptr(mib[0], smp_processor_id())->mibs[field] += addend) + (per_cpu_ptr(mib[0], _smp_processor_id())->mibs[field] += addend) #define SNMP_ADD_STATS_USER(mib, field, addend) \ - (per_cpu_ptr(mib[1], smp_processor_id())->mibs[field] += addend) + (per_cpu_ptr(mib[1], _smp_processor_id())->mibs[field] += addend) #endif |
