diff options
| author | David S. Miller <davem@nuts.ninka.net> | 2002-12-04 20:47:20 -0800 |
|---|---|---|
| committer | David S. Miller <davem@nuts.ninka.net> | 2002-12-04 20:47:20 -0800 |
| commit | a091cf751c3cb07efdfbf74a8c89cd0ba32fc5b8 (patch) | |
| tree | c387011ddd6e5f0eaa6110a484f36c56250baacb /include | |
| parent | 538a0f61ecefac405d05c9f32349869f2be9829e (diff) | |
[SPINLOCK]: Fix non-SMP nopping spin/rwlock macros.
Diffstat (limited to 'include')
| -rw-r--r-- | include/linux/spinlock.h | 278 |
1 files changed, 193 insertions, 85 deletions
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 749d3054b2dc..0fe9a764520c 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -1,52 +1,23 @@ #ifndef __LINUX_SPINLOCK_H #define __LINUX_SPINLOCK_H +/* + * include/linux/spinlock.h - generic locking declarations + */ + #include <linux/config.h> #include <linux/preempt.h> #include <linux/linkage.h> #include <linux/compiler.h> #include <linux/thread_info.h> #include <linux/kernel.h> +#include <linux/stringify.h> #include <asm/system.h> /* - * These are the generic versions of the spinlocks and read-write - * locks.. + * Must define these before including other files, inline functions need them */ -#define spin_lock_irqsave(lock, flags) do { local_irq_save(flags); spin_lock(lock); } while (0) -#define spin_lock_irq(lock) do { local_irq_disable(); spin_lock(lock); } while (0) -#define spin_lock_bh(lock) do { local_bh_disable(); spin_lock(lock); } while (0) - -#define read_lock_irqsave(lock, flags) do { local_irq_save(flags); read_lock(lock); } while (0) -#define read_lock_irq(lock) do { local_irq_disable(); read_lock(lock); } while (0) -#define read_lock_bh(lock) do { local_bh_disable(); read_lock(lock); } while (0) - -#define write_lock_irqsave(lock, flags) do { local_irq_save(flags); write_lock(lock); } while (0) -#define write_lock_irq(lock) do { local_irq_disable(); write_lock(lock); } while (0) -#define write_lock_bh(lock) do { local_bh_disable(); write_lock(lock); } while (0) - -#define spin_unlock_irqrestore(lock, flags) do { _raw_spin_unlock(lock); local_irq_restore(flags); preempt_enable(); } while (0) -#define _raw_spin_unlock_irqrestore(lock, flags) do { _raw_spin_unlock(lock); local_irq_restore(flags); } while (0) -#define spin_unlock_irq(lock) do { _raw_spin_unlock(lock); local_irq_enable(); preempt_enable(); } while (0) -#define spin_unlock_bh(lock) do { spin_unlock(lock); local_bh_enable(); } while (0) - -#define read_unlock_irqrestore(lock, flags) do { _raw_read_unlock(lock); local_irq_restore(flags); preempt_enable(); } while (0) -#define read_unlock_irq(lock) do { _raw_read_unlock(lock); local_irq_enable(); preempt_enable(); } while (0) -#define read_unlock_bh(lock) do { read_unlock(lock); local_bh_enable(); } while (0) - -#define write_unlock_irqrestore(lock, flags) do { _raw_write_unlock(lock); local_irq_restore(flags); preempt_enable(); } while (0) -#define write_unlock_irq(lock) do { _raw_write_unlock(lock); local_irq_enable(); preempt_enable(); } while (0) -#define write_unlock_bh(lock) do { write_unlock(lock); local_bh_enable(); } while (0) -#define spin_trylock_bh(lock) ({ int __r; local_bh_disable();\ - __r = spin_trylock(lock); \ - if (!__r) local_bh_enable(); \ - __r; }) - -/* Must define these before including other files, inline functions need them */ - -#include <linux/stringify.h> - #define LOCK_SECTION_NAME \ ".text.lock." __stringify(KBUILD_BASENAME) @@ -60,11 +31,17 @@ #define LOCK_SECTION_END \ ".previous\n\t" +/* + * If CONFIG_SMP is set, pull in the _raw_* definitions + */ #ifdef CONFIG_SMP #include <asm/spinlock.h> -#elif !defined(spin_lock_init) /* !SMP and spin_lock_init not previously - defined (e.g. by including asm/spinlock.h */ +/* + * !CONFIG_SMP and spin_lock_init not previously defined + * (e.g. by including include/asm/spinlock.h) + */ +#elif !defined(spin_lock_init) #ifndef CONFIG_PREEMPT # define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic) @@ -72,55 +49,42 @@ #endif /* - * Your basic spinlocks, allowing only a single CPU anywhere - * - * Most gcc versions have a nasty bug with empty initializers. + * gcc versions before ~2.95 have a nasty bug with empty initializers. */ #if (__GNUC__ > 2) typedef struct { } spinlock_t; -# define SPIN_LOCK_UNLOCKED (spinlock_t) { } + typedef struct { } rwlock_t; + #define SPIN_LOCK_UNLOCKED (spinlock_t) { } + #define RW_LOCK_UNLOCKED (rwlock_t) { } #else typedef struct { int gcc_is_buggy; } spinlock_t; -# define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } + typedef struct { int gcc_is_buggy; } rwlock_t; + #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } + #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } #endif +/* + * If CONFIG_SMP is unset, declare the _raw_* definitions as nops + */ #define spin_lock_init(lock) do { (void)(lock); } while(0) -#define _raw_spin_lock(lock) (void)(lock) /* Not "unused variable". */ +#define _raw_spin_lock(lock) do { (void)(lock); } while(0) #define spin_is_locked(lock) ((void)(lock), 0) #define _raw_spin_trylock(lock) ((void)(lock), 1) #define spin_unlock_wait(lock) do { (void)(lock); } while(0) #define _raw_spin_unlock(lock) do { (void)(lock); } while(0) - -/* - * Read-write spinlocks, allowing multiple readers - * but only one writer. - * - * NOTE! it is quite common to have readers in interrupts - * but no interrupt writers. For those circumstances we - * can "mix" irq-safe locks - any writer needs to get a - * irq-safe write-lock, but readers can get non-irqsafe - * read-locks. - * - * Most gcc versions have a nasty bug with empty initializers. - */ -#if (__GNUC__ > 2) - typedef struct { } rwlock_t; - #define RW_LOCK_UNLOCKED (rwlock_t) { } -#else - typedef struct { int gcc_is_buggy; } rwlock_t; - #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } -#endif - -#define rwlock_init(lock) do { } while(0) -#define _raw_read_lock(lock) (void)(lock) /* Not "unused variable". */ -#define _raw_read_unlock(lock) do { } while(0) -#define _raw_write_lock(lock) (void)(lock) /* Not "unused variable". */ -#define _raw_write_unlock(lock) do { } while(0) +#define rwlock_init(lock) do { (void)(lock); } while(0) +#define _raw_read_lock(lock) do { (void)(lock); } while(0) +#define _raw_read_unlock(lock) do { (void)(lock); } while(0) +#define _raw_write_lock(lock) do { (void)(lock); } while(0) +#define _raw_write_unlock(lock) do { (void)(lock); } while(0) #endif /* !SMP */ -#ifdef CONFIG_PREEMPT - +/* + * Define the various spin_lock and rw_lock methods. Note we define these + * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various + * methods are defined as nops in the case they are not required. + */ #define spin_lock(lock) \ do { \ preempt_disable(); \ @@ -129,31 +93,175 @@ do { \ #define spin_trylock(lock) ({preempt_disable(); _raw_spin_trylock(lock) ? \ 1 : ({preempt_enable(); 0;});}) + #define spin_unlock(lock) \ do { \ _raw_spin_unlock(lock); \ preempt_enable(); \ } while (0) -#define read_lock(lock) ({preempt_disable(); _raw_read_lock(lock);}) -#define read_unlock(lock) ({_raw_read_unlock(lock); preempt_enable();}) -#define write_lock(lock) ({preempt_disable(); _raw_write_lock(lock);}) -#define write_unlock(lock) ({_raw_write_unlock(lock); preempt_enable();}) +#define read_lock(lock) \ +do { \ + preempt_disable(); \ + _raw_read_lock(lock); \ +} while(0) + +#define read_unlock(lock) \ +do { \ + _raw_read_unlock(lock); \ + preempt_enable(); \ +} while(0) + +#define write_lock(lock) \ +do { \ + preempt_disable(); \ + _raw_write_lock(lock); \ +} while(0) + +#define write_unlock(lock) \ +do { \ + _raw_write_unlock(lock); \ + preempt_enable(); \ +} while(0) + #define write_trylock(lock) ({preempt_disable();_raw_write_trylock(lock) ? \ 1 : ({preempt_enable(); 0;});}) -#else +#define spin_lock_irqsave(lock, flags) \ +do { \ + local_irq_save(flags); \ + preempt_disable(); \ + _raw_spin_lock(lock); \ +} while (0) -#define spin_lock(lock) _raw_spin_lock(lock) -#define spin_trylock(lock) _raw_spin_trylock(lock) -#define spin_unlock(lock) _raw_spin_unlock(lock) +#define spin_lock_irq(lock) \ +do { \ + local_irq_disable(); \ + preempt_disable(); \ + _raw_spin_lock(lock); \ +} while (0) -#define read_lock(lock) _raw_read_lock(lock) -#define read_unlock(lock) _raw_read_unlock(lock) -#define write_lock(lock) _raw_write_lock(lock) -#define write_unlock(lock) _raw_write_unlock(lock) -#define write_trylock(lock) _raw_write_trylock(lock) -#endif +#define spin_lock_bh(lock) \ +do { \ + local_bh_disable(); \ + preempt_disable(); \ + _raw_spin_lock(lock); \ +} while (0) + +#define read_lock_irqsave(lock, flags) \ +do { \ + local_irq_save(flags); \ + preempt_disable(); \ + _raw_read_lock(lock); \ +} while (0) + +#define read_lock_irq(lock) \ +do { \ + local_irq_disable(); \ + preempt_disable(); \ + _raw_read_lock(lock); \ +} while (0) + +#define read_lock_bh(lock) \ +do { \ + local_bh_disable(); \ + preempt_disable(); \ + _raw_read_lock(lock); \ +} while (0) + +#define write_lock_irqsave(lock, flags) \ +do { \ + local_irq_save(flags); \ + preempt_disable(); \ + _raw_write_lock(lock); \ +} while (0) + +#define write_lock_irq(lock) \ +do { \ + local_irq_disable(); \ + preempt_disable(); \ + _raw_write_lock(lock); \ +} while (0) + +#define write_lock_bh(lock) \ +do { \ + local_bh_disable(); \ + preempt_disable(); \ + _raw_write_lock(lock); \ +} while (0) + +#define spin_unlock_irqrestore(lock, flags) \ +do { \ + _raw_spin_unlock(lock); \ + local_irq_restore(flags); \ + preempt_enable(); \ +} while (0) + +#define _raw_spin_unlock_irqrestore(lock, flags) \ +do { \ + _raw_spin_unlock(lock); \ + local_irq_restore(flags); \ +} while (0) + +#define spin_unlock_irq(lock) \ +do { \ + _raw_spin_unlock(lock); \ + local_irq_enable(); \ + preempt_enable(); \ +} while (0) + +#define spin_unlock_bh(lock) \ +do { \ + _raw_spin_unlock(lock); \ + preempt_enable(); \ + local_bh_enable(); \ +} while (0) + +#define read_unlock_irqrestore(lock, flags) \ +do { \ + _raw_read_unlock(lock); \ + local_irq_restore(flags); \ + preempt_enable(); \ +} while (0) + +#define read_unlock_irq(lock) \ +do { \ + _raw_read_unlock(lock); \ + local_irq_enable(); \ + preempt_enable(); \ +} while (0) + +#define read_unlock_bh(lock) \ +do { \ + _raw_read_unlock(lock); \ + preempt_enable(); \ + local_bh_enable(); \ +} while (0) + +#define write_unlock_irqrestore(lock, flags) \ +do { \ + _raw_write_unlock(lock); \ + local_irq_restore(flags); \ + preempt_enable(); \ +} while (0) + +#define write_unlock_irq(lock) \ +do { \ + _raw_write_unlock(lock); \ + local_irq_enable(); \ + preempt_enable(); \ +} while (0) + +#define write_unlock_bh(lock) \ +do { \ + _raw_write_unlock(lock); \ + preempt_enable(); \ + local_bh_enable(); \ +} while (0) + +#define spin_trylock_bh(lock) ({ local_bh_disable(); preempt_disable(); \ + _raw_spin_trylock(lock) ? 1 : \ + ({preempt_enable(); local_bh_enable(); 0;});}) /* "lock on reference count zero" */ #ifndef ATOMIC_DEC_AND_LOCK |
