diff options
| author | Martin Dalecki <dalecki@evision-ventures.com> | 2002-06-08 21:51:57 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@home.transmeta.com> | 2002-06-08 21:51:57 -0700 |
| commit | eada9464889e4dab0f67b9f7533ad8a926186fcf (patch) | |
| tree | c69e2848391df91fe9e4c6f9f2a0c36f051ebffc /include | |
| parent | 467b45ea6236af91d8119422b19e4b1f8c4a67d6 (diff) | |
[PATCH] 2.5.20 locks.h cleanup
Since I have been looking in to the lcoking issues recently
the following rather trivial gabrage code collection became
obvious...
- Remove "not yet used" code from 1995 in asm/locks.h. It's garbage.
- Remove useless DEBUG_SPINLOCK code from generic spinlock.h code. Just
compiling for SMP does the trick already.
- Replace all usages of SPINLOCK_DEBUG with the now global
CONFIG_DEBUG_SPINLOCK.
Diffstat (limited to 'include')
| -rw-r--r-- | include/asm-cris/locks.h | 133 | ||||
| -rw-r--r-- | include/asm-i386/locks.h | 135 | ||||
| -rw-r--r-- | include/asm-i386/spinlock.h | 28 | ||||
| -rw-r--r-- | include/asm-ppc/spinlock.h | 14 | ||||
| -rw-r--r-- | include/asm-x86_64/locks.h | 135 | ||||
| -rw-r--r-- | include/asm-x86_64/spinlock.h | 26 | ||||
| -rw-r--r-- | include/linux/spinlock.h | 48 |
7 files changed, 27 insertions, 492 deletions
diff --git a/include/asm-cris/locks.h b/include/asm-cris/locks.h deleted file mode 100644 index a075e92bd7ed..000000000000 --- a/include/asm-cris/locks.h +++ /dev/null @@ -1,133 +0,0 @@ -/* - * SMP locks primitives for building ix86 locks - * (not yet used). - * - * Alan Cox, alan@cymru.net, 1995 - */ - -/* - * This would be much easier but far less clear and easy - * to borrow for other processors if it was just assembler. - */ - -extern __inline__ void prim_spin_lock(struct spinlock *sp) -{ - int processor=smp_processor_id(); - - /* - * Grab the lock bit - */ - - while(lock_set_bit(0,&sp->lock)) - { - /* - * Failed, but that's cos we own it! - */ - - if(sp->cpu==processor) - { - sp->users++; - return 0; - } - /* - * Spin in the cache S state if possible - */ - while(sp->lock) - { - /* - * Wait for any invalidates to go off - */ - - if(smp_invalidate_needed&(1<<processor)) - while(lock_clear_bit(processor,&smp_invalidate_needed)) - local_flush_tlb(); - sp->spins++; - } - /* - * Someone wrote the line, we go 'I' and get - * the cache entry. Now try to regrab - */ - } - sp->users++;sp->cpu=processor; - return 1; -} - -/* - * Release a spin lock - */ - -extern __inline__ int prim_spin_unlock(struct spinlock *sp) -{ - /* This is safe. The decrement is still guarded by the lock. A multilock would - not be safe this way */ - if(!--sp->users) - { - lock_clear_bit(0,&sp->lock);sp->cpu= NO_PROC_ID; - return 1; - } - return 0; -} - - -/* - * Non blocking lock grab - */ - -extern __inline__ int prim_spin_lock_nb(struct spinlock *sp) -{ - if(lock_set_bit(0,&sp->lock)) - return 0; /* Locked already */ - sp->users++; - return 1; /* We got the lock */ -} - - -/* - * These wrap the locking primitives up for usage - */ - -extern __inline__ void spinlock(struct spinlock *sp) -{ - if(sp->priority<current->lock_order) - panic("lock order violation: %s (%d)\n", sp->name, current->lock_order); - if(prim_spin_lock(sp)) - { - /* - * We got a new lock. Update the priority chain - */ - sp->oldpri=current->lock_order; - current->lock_order=sp->priority; - } -} - -extern __inline__ void spinunlock(struct spinlock *sp) -{ - if(current->lock_order!=sp->priority) - panic("lock release order violation %s (%d)\n", sp->name, current->lock_order); - if(prim_spin_unlock(sp)) - { - /* - * Update the debugging lock priority chain. We dumped - * our last right to the lock. - */ - current->lock_order=sp->oldpri; - } -} - -extern __inline__ void spintestlock(struct spinlock *sp) -{ - /* - * We do no sanity checks, it's legal to optimistically - * get a lower lock. - */ - prim_spin_lock_nb(sp); -} - -extern __inline__ void spintestunlock(struct spinlock *sp) -{ - /* - * A testlock doesn't update the lock chain so we - * must not update it on free - */ - prim_spin_unlock(sp); -} diff --git a/include/asm-i386/locks.h b/include/asm-i386/locks.h deleted file mode 100644 index ffcab0afb658..000000000000 --- a/include/asm-i386/locks.h +++ /dev/null @@ -1,135 +0,0 @@ -/* - * SMP locks primitives for building ix86 locks - * (not yet used). - * - * Alan Cox, alan@redhat.com, 1995 - */ - -/* - * This would be much easier but far less clear and easy - * to borrow for other processors if it was just assembler. - */ - -static __inline__ void prim_spin_lock(struct spinlock *sp) -{ - int processor=smp_processor_id(); - - /* - * Grab the lock bit - */ - - while(lock_set_bit(0,&sp->lock)) - { - /* - * Failed, but that's cos we own it! - */ - - if(sp->cpu==processor) - { - sp->users++; - return 0; - } - /* - * Spin in the cache S state if possible - */ - while(sp->lock) - { - /* - * Wait for any invalidates to go off - */ - - if(smp_invalidate_needed&(1<<processor)) - while(lock_clear_bit(processor,&smp_invalidate_needed)) - local_flush_tlb(); - sp->spins++; - } - /* - * Someone wrote the line, we go 'I' and get - * the cache entry. Now try to regrab - */ - } - sp->users++;sp->cpu=processor; - return 1; -} - -/* - * Release a spin lock - */ - -static __inline__ int prim_spin_unlock(struct spinlock *sp) -{ - /* This is safe. The decrement is still guarded by the lock. A multilock would - not be safe this way */ - if(!--sp->users) - { - sp->cpu= NO_PROC_ID;lock_clear_bit(0,&sp->lock); - return 1; - } - return 0; -} - - -/* - * Non blocking lock grab - */ - -static __inline__ int prim_spin_lock_nb(struct spinlock *sp) -{ - if(lock_set_bit(0,&sp->lock)) - return 0; /* Locked already */ - sp->users++; - return 1; /* We got the lock */ -} - - -/* - * These wrap the locking primitives up for usage - */ - -static __inline__ void spinlock(struct spinlock *sp) -{ - if(sp->priority<current->lock_order) - panic("lock order violation: %s (%d)\n", sp->name, current->lock_order); - if(prim_spin_lock(sp)) - { - /* - * We got a new lock. Update the priority chain - */ - sp->oldpri=current->lock_order; - current->lock_order=sp->priority; - } -} - -static __inline__ void spinunlock(struct spinlock *sp) -{ - int pri; - if(current->lock_order!=sp->priority) - panic("lock release order violation %s (%d)\n", sp->name, current->lock_order); - pri=sp->oldpri; - if(prim_spin_unlock(sp)) - { - /* - * Update the debugging lock priority chain. We dumped - * our last right to the lock. - */ - current->lock_order=sp->pri; - } -} - -static __inline__ void spintestlock(struct spinlock *sp) -{ - /* - * We do no sanity checks, it's legal to optimistically - * get a lower lock. - */ - prim_spin_lock_nb(sp); -} - -static __inline__ void spintestunlock(struct spinlock *sp) -{ - /* - * A testlock doesn't update the lock chain so we - * must not update it on free - */ - prim_spin_unlock(sp); -} diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index 1a4b4879c358..678a64fed800 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h @@ -9,30 +9,20 @@ extern int printk(const char * fmt, ...) __attribute__ ((format (printf, 1, 2))); -/* It seems that people are forgetting to - * initialize their spinlocks properly, tsk tsk. - * Remember to turn this off in 2.4. -ben - */ -#if defined(CONFIG_DEBUG_SPINLOCK) -#define SPINLOCK_DEBUG 1 -#else -#define SPINLOCK_DEBUG 0 -#endif - /* * Your basic SMP spinlocks, allowing only a single CPU anywhere */ typedef struct { volatile unsigned int lock; -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK unsigned magic; #endif } spinlock_t; #define SPINLOCK_MAGIC 0xdead4ead -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC #else #define SPINLOCK_MAGIC_INIT /* */ @@ -79,7 +69,7 @@ typedef struct { static inline void _raw_spin_unlock(spinlock_t *lock) { -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK if (lock->magic != SPINLOCK_MAGIC) BUG(); if (!spin_is_locked(lock)) @@ -100,7 +90,7 @@ static inline void _raw_spin_unlock(spinlock_t *lock) static inline void _raw_spin_unlock(spinlock_t *lock) { char oldval = 1; -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK if (lock->magic != SPINLOCK_MAGIC) BUG(); if (!spin_is_locked(lock)) @@ -125,7 +115,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock) static inline void _raw_spin_lock(spinlock_t *lock) { -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK __label__ here; here: if (lock->magic != SPINLOCK_MAGIC) { @@ -151,14 +141,14 @@ printk("eip: %p\n", &&here); */ typedef struct { volatile unsigned int lock; -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK unsigned magic; #endif } rwlock_t; #define RWLOCK_MAGIC 0xdeaf1eed -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC #else #define RWLOCK_MAGIC_INIT /* */ @@ -181,7 +171,7 @@ typedef struct { static inline void _raw_read_lock(rwlock_t *rw) { -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK if (rw->magic != RWLOCK_MAGIC) BUG(); #endif @@ -190,7 +180,7 @@ static inline void _raw_read_lock(rwlock_t *rw) static inline void _raw_write_lock(rwlock_t *rw) { -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK if (rw->magic != RWLOCK_MAGIC) BUG(); #endif diff --git a/include/asm-ppc/spinlock.h b/include/asm-ppc/spinlock.h index f5d454bc5275..b7dafa9a36a5 100644 --- a/include/asm-ppc/spinlock.h +++ b/include/asm-ppc/spinlock.h @@ -7,22 +7,20 @@ #include <asm/system.h> #include <asm/processor.h> -#undef SPINLOCK_DEBUG - /* * Simple spin lock operations. */ typedef struct { volatile unsigned long lock; -#ifdef SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK volatile unsigned long owner_pc; volatile unsigned long owner_cpu; #endif } spinlock_t; #ifdef __KERNEL__ -#if SPINLOCK_DEBUG +#if CONFIG_DEBUG_SPINLOCK #define SPINLOCK_DEBUG_INIT , 0, 0 #else #define SPINLOCK_DEBUG_INIT /* */ @@ -34,7 +32,7 @@ typedef struct { #define spin_is_locked(x) ((x)->lock != 0) #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) -#ifndef SPINLOCK_DEBUG +#ifndef CONFIG_DEBUG_SPINLOCK static inline void _raw_spin_lock(spinlock_t *lock) { @@ -88,12 +86,12 @@ extern unsigned long __spin_trylock(volatile unsigned long *lock); */ typedef struct { volatile unsigned long lock; -#ifdef SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK volatile unsigned long owner_pc; #endif } rwlock_t; -#if SPINLOCK_DEBUG +#if CONFIG_DEBUG_SPINLOCK #define RWLOCK_DEBUG_INIT , 0 #else #define RWLOCK_DEBUG_INIT /* */ @@ -102,7 +100,7 @@ typedef struct { #define RW_LOCK_UNLOCKED (rwlock_t) { 0 RWLOCK_DEBUG_INIT } #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) -#ifndef SPINLOCK_DEBUG +#ifndef CONFIG_DEBUG_SPINLOCK static __inline__ void _raw_read_lock(rwlock_t *rw) { diff --git a/include/asm-x86_64/locks.h b/include/asm-x86_64/locks.h deleted file mode 100644 index 1cc171f9b600..000000000000 --- a/include/asm-x86_64/locks.h +++ /dev/null @@ -1,135 +0,0 @@ -/* - * SMP locks primitives for building ix86 locks - * (not yet used). - * - * Alan Cox, alan@redhat.com, 1995 - */ - -/* - * This would be much easier but far less clear and easy - * to borrow for other processors if it was just assembler. - */ - -extern __inline__ void prim_spin_lock(struct spinlock *sp) -{ - int processor=smp_processor_id(); - - /* - * Grab the lock bit - */ - - while(lock_set_bit(0,&sp->lock)) - { - /* - * Failed, but that's cos we own it! - */ - - if(sp->cpu==processor) - { - sp->users++; - return 0; - } - /* - * Spin in the cache S state if possible - */ - while(sp->lock) - { - /* - * Wait for any invalidates to go off - */ - - if(smp_invalidate_needed&(1<<processor)) - while(lock_clear_bit(processor,&smp_invalidate_needed)) - local_flush_tlb(); - sp->spins++; - } - /* - * Someone wrote the line, we go 'I' and get - * the cache entry. Now try to regrab - */ - } - sp->users++;sp->cpu=processor; - return 1; -} - -/* - * Release a spin lock - */ - -extern __inline__ int prim_spin_unlock(struct spinlock *sp) -{ - /* This is safe. The decrement is still guarded by the lock. A multilock would - not be safe this way */ - if(!--sp->users) - { - sp->cpu= NO_PROC_ID;lock_clear_bit(0,&sp->lock); - return 1; - } - return 0; -} - - -/* - * Non blocking lock grab - */ - -extern __inline__ int prim_spin_lock_nb(struct spinlock *sp) -{ - if(lock_set_bit(0,&sp->lock)) - return 0; /* Locked already */ - sp->users++; - return 1; /* We got the lock */ -} - - -/* - * These wrap the locking primitives up for usage - */ - -extern __inline__ void spinlock(struct spinlock *sp) -{ - if(sp->priority<current->lock_order) - panic("lock order violation: %s (%d)\n", sp->name, current->lock_order); - if(prim_spin_lock(sp)) - { - /* - * We got a new lock. Update the priority chain - */ - sp->oldpri=current->lock_order; - current->lock_order=sp->priority; - } -} - -extern __inline__ void spinunlock(struct spinlock *sp) -{ - int pri; - if(current->lock_order!=sp->priority) - panic("lock release order violation %s (%d)\n", sp->name, current->lock_order); - pri=sp->oldpri; - if(prim_spin_unlock(sp)) - { - /* - * Update the debugging lock priority chain. We dumped - * our last right to the lock. - */ - current->lock_order=sp->pri; - } -} - -extern __inline__ void spintestlock(struct spinlock *sp) -{ - /* - * We do no sanity checks, it's legal to optimistically - * get a lower lock. - */ - prim_spin_lock_nb(sp); -} - -extern __inline__ void spintestunlock(struct spinlock *sp) -{ - /* - * A testlock doesn't update the lock chain so we - * must not update it on free - */ - prim_spin_unlock(sp); -} diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h index b68101e609e1..6f1d71c65a68 100644 --- a/include/asm-x86_64/spinlock.h +++ b/include/asm-x86_64/spinlock.h @@ -9,30 +9,20 @@ extern int printk(const char * fmt, ...) __attribute__ ((format (printf, 1, 2))); -/* It seems that people are forgetting to - * initialize their spinlocks properly, tsk tsk. - * Remember to turn this off in 2.4. -ben - */ -#if defined(CONFIG_DEBUG_SPINLOCK) -#define SPINLOCK_DEBUG 1 -#else -#define SPINLOCK_DEBUG 0 -#endif - /* * Your basic SMP spinlocks, allowing only a single CPU anywhere */ typedef struct { volatile unsigned int lock; -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK unsigned magic; #endif } spinlock_t; #define SPINLOCK_MAGIC 0xdead4ead -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC #else #define SPINLOCK_MAGIC_INIT /* */ @@ -82,7 +72,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock) static inline void _raw_spin_lock(spinlock_t *lock) { -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK __label__ here; here: if (lock->magic != SPINLOCK_MAGIC) { @@ -97,7 +87,7 @@ printk("eip: %p\n", &&here); static inline void _raw_spin_unlock(spinlock_t *lock) { -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK if (lock->magic != SPINLOCK_MAGIC) BUG(); if (!spin_is_locked(lock)) @@ -120,14 +110,14 @@ static inline void _raw_spin_unlock(spinlock_t *lock) */ typedef struct { volatile unsigned int lock; -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK unsigned magic; #endif } rwlock_t; #define RWLOCK_MAGIC 0xdeaf1eed -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC #else #define RWLOCK_MAGIC_INIT /* */ @@ -150,7 +140,7 @@ typedef struct { extern inline void _raw_read_lock(rwlock_t *rw) { -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK if (rw->magic != RWLOCK_MAGIC) BUG(); #endif @@ -159,7 +149,7 @@ extern inline void _raw_read_lock(rwlock_t *rw) static inline void _raw_write_lock(rwlock_t *rw) { -#if SPINLOCK_DEBUG +#ifdef CONFIG_DEBUG_SPINLOCK if (rw->magic != RWLOCK_MAGIC) BUG(); #endif diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index e46232e8e126..0e9f7247bc86 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -64,13 +64,9 @@ #elif !defined(spin_lock_init) /* !SMP and spin_lock_init not previously defined (e.g. by including asm/spinlock.h */ -#define DEBUG_SPINLOCKS 0 /* 0 == no debugging, 1 == maintain lock state, 2 == full debug */ - -#if (DEBUG_SPINLOCKS < 1) - #ifndef CONFIG_PREEMPT -#define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic) -#define ATOMIC_DEC_AND_LOCK +# define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic) +# define ATOMIC_DEC_AND_LOCK #endif /* @@ -80,10 +76,10 @@ */ #if (__GNUC__ > 2) typedef struct { } spinlock_t; - #define SPIN_LOCK_UNLOCKED (spinlock_t) { } +# define SPIN_LOCK_UNLOCKED (spinlock_t) { } #else typedef struct { int gcc_is_buggy; } spinlock_t; - #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } +# define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } #endif #define spin_lock_init(lock) do { (void)(lock); } while(0) @@ -93,42 +89,6 @@ #define spin_unlock_wait(lock) do { (void)(lock); } while(0) #define _raw_spin_unlock(lock) do { (void)(lock); } while(0) -#elif (DEBUG_SPINLOCKS < 2) - -typedef struct { - volatile unsigned long lock; -} spinlock_t; -#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } - -#define spin_lock_init(x) do { (x)->lock = 0; } while (0) -#define spin_is_locked(lock) (test_bit(0,(lock))) -#define spin_trylock(lock) (!test_and_set_bit(0,(lock))) - -#define spin_lock(x) do { (x)->lock = 1; } while (0) -#define spin_unlock_wait(x) do { } while (0) -#define spin_unlock(x) do { (x)->lock = 0; } while (0) - -#else /* (DEBUG_SPINLOCKS >= 2) */ - -typedef struct { - volatile unsigned long lock; - volatile unsigned int babble; - const char *module; -} spinlock_t; -#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 25, __BASE_FILE__ } - -#include <linux/kernel.h> - -#define spin_lock_init(x) do { (x)->lock = 0; } while (0) -#define spin_is_locked(lock) (test_bit(0,(lock))) -#define spin_trylock(lock) (!test_and_set_bit(0,(lock))) - -#define spin_lock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_lock(%s:%p) already locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1; restore_flags(__spinflags);} while (0) -#define spin_unlock_wait(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock_wait(%s:%p) deadlock\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} restore_flags(__spinflags);} while (0) -#define spin_unlock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if (!(x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock(%s:%p) not locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 0; restore_flags(__spinflags);} while (0) - -#endif /* DEBUG_SPINLOCKS */ - /* * Read-write spinlocks, allowing multiple readers * but only one writer. |
