diff options
| author | David S. Miller <davem@kernel.bkbits.net> | 2004-02-13 06:09:30 -0800 |
|---|---|---|
| committer | David S. Miller <davem@kernel.bkbits.net> | 2004-02-13 06:09:30 -0800 |
| commit | 5d8691683220533b2ae46b4b63057dd2b4405d94 (patch) | |
| tree | 25727330f3f5b3c230b79cac7bfb686899d22056 /include | |
| parent | d1c0dfc84a8d1a97db4bf3fc5f67c0fa6e6cd5ec (diff) | |
| parent | 6afb3c3238a4479b1a9d6221a1bd7fd85854a68d (diff) | |
Merge davem@nuts.davemloft.net:/disk1/BK/sparc-2.6
into kernel.bkbits.net:/home/davem/sparc-2.6
Diffstat (limited to 'include')
| -rw-r--r-- | include/asm-generic/local.h | 2 | ||||
| -rw-r--r-- | include/asm-i386/atomic.h | 30 | ||||
| -rw-r--r-- | include/asm-mips/atomic.h | 36 | ||||
| -rw-r--r-- | include/asm-sparc/atomic.h | 97 | ||||
| -rw-r--r-- | include/asm-sparc/dma-mapping.h | 22 | ||||
| -rw-r--r-- | include/asm-sparc/processor.h | 1 | ||||
| -rw-r--r-- | include/asm-sparc/semaphore.h | 12 | ||||
| -rw-r--r-- | include/asm-sparc/system.h | 98 | ||||
| -rw-r--r-- | include/asm-x86_64/atomic.h | 30 |
9 files changed, 145 insertions, 183 deletions
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h index 45c07dbc0a59..c814b2f840ba 100644 --- a/include/asm-generic/local.h +++ b/include/asm-generic/local.h @@ -9,7 +9,7 @@ /* An unsigned long type for operations which are atomic for a single * CPU. Usually used in combination with per-cpu variables. */ -#if BITS_PER_LONG == 32 && !defined(CONFIG_SPARC32) +#if BITS_PER_LONG == 32 /* Implement in terms of atomics. */ /* Don't use typedef: don't want them to be mixed with atomic_t's. */ diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h index 61a1aece830b..4df45c5e4b16 100644 --- a/include/asm-i386/atomic.h +++ b/include/asm-i386/atomic.h @@ -27,8 +27,7 @@ typedef struct { volatile int counter; } atomic_t; * atomic_read - read atomic variable * @v: pointer of type atomic_t * - * Atomically reads the value of @v. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * Atomically reads the value of @v. */ #define atomic_read(v) ((v)->counter) @@ -37,8 +36,7 @@ typedef struct { volatile int counter; } atomic_t; * @v: pointer of type atomic_t * @i: required value * - * Atomically sets the value of @v to @i. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * Atomically sets the value of @v to @i. */ #define atomic_set(v,i) (((v)->counter) = (i)) @@ -47,8 +45,7 @@ typedef struct { volatile int counter; } atomic_t; * @i: integer value to add * @v: pointer of type atomic_t * - * Atomically adds @i to @v. Note that the guaranteed useful range - * of an atomic_t is only 24 bits. + * Atomically adds @i to @v. */ static __inline__ void atomic_add(int i, atomic_t *v) { @@ -63,8 +60,7 @@ static __inline__ void atomic_add(int i, atomic_t *v) * @i: integer value to subtract * @v: pointer of type atomic_t * - * Atomically subtracts @i from @v. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * Atomically subtracts @i from @v. */ static __inline__ void atomic_sub(int i, atomic_t *v) { @@ -81,8 +77,7 @@ static __inline__ void atomic_sub(int i, atomic_t *v) * * Atomically subtracts @i from @v and returns * true if the result is zero, or false for all - * other cases. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * other cases. */ static __inline__ int atomic_sub_and_test(int i, atomic_t *v) { @@ -99,8 +94,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v) * atomic_inc - increment atomic variable * @v: pointer of type atomic_t * - * Atomically increments @v by 1. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * Atomically increments @v by 1. */ static __inline__ void atomic_inc(atomic_t *v) { @@ -114,8 +108,7 @@ static __inline__ void atomic_inc(atomic_t *v) * atomic_dec - decrement atomic variable * @v: pointer of type atomic_t * - * Atomically decrements @v by 1. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * Atomically decrements @v by 1. */ static __inline__ void atomic_dec(atomic_t *v) { @@ -131,8 +124,7 @@ static __inline__ void atomic_dec(atomic_t *v) * * Atomically decrements @v by 1 and * returns true if the result is 0, or false for all other - * cases. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * cases. */ static __inline__ int atomic_dec_and_test(atomic_t *v) { @@ -151,8 +143,7 @@ static __inline__ int atomic_dec_and_test(atomic_t *v) * * Atomically increments @v by 1 * and returns true if the result is zero, or false for all - * other cases. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * other cases. */ static __inline__ int atomic_inc_and_test(atomic_t *v) { @@ -172,8 +163,7 @@ static __inline__ int atomic_inc_and_test(atomic_t *v) * * Atomically adds @i to @v and returns true * if the result is negative, or false when - * result is greater than or equal to zero. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * result is greater than or equal to zero. */ static __inline__ int atomic_add_negative(int i, atomic_t *v) { diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h index ccecd9767cb0..69d676ddb1ab 100644 --- a/include/asm-mips/atomic.h +++ b/include/asm-mips/atomic.h @@ -29,8 +29,7 @@ typedef struct { volatile __s64 counter; } atomic64_t; * atomic_read - read atomic variable * @v: pointer of type atomic_t * - * Atomically reads the value of @v. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * Atomically reads the value of @v. */ #define atomic_read(v) ((v)->counter) @@ -46,8 +45,7 @@ typedef struct { volatile __s64 counter; } atomic64_t; * @v: pointer of type atomic_t * @i: required value * - * Atomically sets the value of @v to @i. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * Atomically sets the value of @v to @i. */ #define atomic_set(v,i) ((v)->counter = (i)) @@ -68,8 +66,7 @@ typedef struct { volatile __s64 counter; } atomic64_t; * @i: integer value to add * @v: pointer of type atomic_t * - * Atomically adds @i to @v. Note that the guaranteed useful range - * of an atomic_t is only 24 bits. + * Atomically adds @i to @v. */ static __inline__ void atomic_add(int i, atomic_t * v) { @@ -85,8 +82,7 @@ static __inline__ void atomic_add(int i, atomic_t * v) * @i: integer value to subtract * @v: pointer of type atomic_t * - * Atomically subtracts @i from @v. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * Atomically subtracts @i from @v. */ static __inline__ void atomic_sub(int i, atomic_t * v) { @@ -137,8 +133,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) * @i: integer value to add * @v: pointer of type atomic_t * - * Atomically adds @i to @v. Note that the guaranteed useful range - * of an atomic_t is only 24 bits. + * Atomically adds @i to @v. */ static __inline__ void atomic_add(int i, atomic_t * v) { @@ -158,8 +153,7 @@ static __inline__ void atomic_add(int i, atomic_t * v) * @i: integer value to subtract * @v: pointer of type atomic_t * - * Atomically subtracts @i from @v. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * Atomically subtracts @i from @v. */ static __inline__ void atomic_sub(int i, atomic_t * v) { @@ -390,8 +384,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v) * * Atomically subtracts @i from @v and returns * true if the result is zero, or false for all - * other cases. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * other cases. */ #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) @@ -412,8 +405,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v) * * Atomically increments @v by 1 * and returns true if the result is zero, or false for all - * other cases. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * other cases. */ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) @@ -433,8 +425,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v) * * Atomically decrements @v by 1 and * returns true if the result is 0, or false for all other - * cases. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * cases. */ #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) @@ -452,8 +443,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v) * atomic_inc - increment atomic variable * @v: pointer of type atomic_t * - * Atomically increments @v by 1. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * Atomically increments @v by 1. */ #define atomic_inc(v) atomic_add(1,(v)) @@ -469,8 +459,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v) * atomic_dec - decrement and test * @v: pointer of type atomic_t * - * Atomically decrements @v by 1. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * Atomically decrements @v by 1. */ #define atomic_dec(v) atomic_sub(1,(v)) @@ -489,8 +478,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v) * * Atomically adds @i to @v and returns true * if the result is negative, or false when - * result is greater than or equal to zero. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * result is greater than or equal to zero. */ #define atomic_add_negative(i,v) (atomic_add_return(i, (v)) < 0) diff --git a/include/asm-sparc/atomic.h b/include/asm-sparc/atomic.h index f42ba9526265..873f806fc678 100644 --- a/include/asm-sparc/atomic.h +++ b/include/asm-sparc/atomic.h @@ -2,21 +2,82 @@ * * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au) + * + * Additions by Keith M Wesolowski (wesolows@foobazco.org) based + * on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>. */ #ifndef __ARCH_SPARC_ATOMIC__ #define __ARCH_SPARC_ATOMIC__ #include <linux/config.h> +#include <linux/spinlock.h> typedef struct { volatile int counter; } atomic_t; #ifdef __KERNEL__ -#ifndef CONFIG_SMP + +#ifdef CONFIG_SMP + +#define ATOMIC_HASH_SIZE 4 +#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)]) +extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE]; + +#else /* SMP */ + +#define ATOMIC_HASH_SIZE 1 +#define ATOMIC_HASH(a) 0 + +#endif /* SMP */ + +static inline int __atomic_add_return(int i, atomic_t *v) +{ + int ret; + unsigned long flags; + spin_lock_irqsave(ATOMIC_HASH(v), flags); + + ret = (v->counter += i); + + spin_unlock_irqrestore(ATOMIC_HASH(v), flags); + return ret; +} + +static inline void atomic_set(atomic_t *v, int i) +{ + unsigned long flags; + spin_lock_irqsave(ATOMIC_HASH(v), flags); + + v->counter = i; + + spin_unlock_irqrestore(ATOMIC_HASH(v), flags); +} #define ATOMIC_INIT(i) { (i) } + #define atomic_read(v) ((v)->counter) -#define atomic_set(v, i) (((v)->counter) = i) + +#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v))) +#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v))) +#define atomic_inc(v) ((void)__atomic_add_return( 1, (v))) +#define atomic_dec(v) ((void)__atomic_add_return( -1, (v))) + +#define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v))) +#define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v))) +#define atomic_inc_return(v) (__atomic_add_return( 1, (v))) +#define atomic_dec_return(v) (__atomic_add_return( -1, (v))) + +#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) + +/* This is the old 24-bit implementation. It's still used internally + * by some sparc-specific code, notably the semaphore implementation. + */ +typedef struct { volatile int counter; } atomic24_t; + +#ifndef CONFIG_SMP + +#define ATOMIC24_INIT(i) { (i) } +#define atomic24_read(v) ((v)->counter) +#define atomic24_set(v, i) (((v)->counter) = i) #else /* We do the bulk of the actual work out of line in two common @@ -33,9 +94,9 @@ typedef struct { volatile int counter; } atomic_t; * 31 8 7 0 */ -#define ATOMIC_INIT(i) { ((i) << 8) } +#define ATOMIC24_INIT(i) { ((i) << 8) } -static __inline__ int atomic_read(const atomic_t *v) +static inline int atomic24_read(const atomic24_t *v) { int ret = v->counter; @@ -45,10 +106,10 @@ static __inline__ int atomic_read(const atomic_t *v) return ret >> 8; } -#define atomic_set(v, i) (((v)->counter) = ((i) << 8)) +#define atomic24_set(v, i) (((v)->counter) = ((i) << 8)) #endif -static inline int __atomic_add(int i, atomic_t *v) +static inline int __atomic24_add(int i, atomic24_t *v) { register volatile int *ptr asm("g1"); register int increment asm("g2"); @@ -61,7 +122,7 @@ static inline int __atomic_add(int i, atomic_t *v) __asm__ __volatile__( "mov %%o7, %%g4\n\t" - "call ___atomic_add\n\t" + "call ___atomic24_add\n\t" " add %%o7, 8, %%o7\n" : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3) : "0" (increment), "r" (ptr) @@ -70,7 +131,7 @@ static inline int __atomic_add(int i, atomic_t *v) return increment; } -static inline int __atomic_sub(int i, atomic_t *v) +static inline int __atomic24_sub(int i, atomic24_t *v) { register volatile int *ptr asm("g1"); register int increment asm("g2"); @@ -83,7 +144,7 @@ static inline int __atomic_sub(int i, atomic_t *v) __asm__ __volatile__( "mov %%o7, %%g4\n\t" - "call ___atomic_sub\n\t" + "call ___atomic24_sub\n\t" " add %%o7, 8, %%o7\n" : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3) : "0" (increment), "r" (ptr) @@ -92,19 +153,19 @@ static inline int __atomic_sub(int i, atomic_t *v) return increment; } -#define atomic_add(i, v) ((void)__atomic_add((i), (v))) -#define atomic_sub(i, v) ((void)__atomic_sub((i), (v))) +#define atomic24_add(i, v) ((void)__atomic24_add((i), (v))) +#define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v))) -#define atomic_dec_return(v) __atomic_sub(1, (v)) -#define atomic_inc_return(v) __atomic_add(1, (v)) +#define atomic24_dec_return(v) __atomic24_sub(1, (v)) +#define atomic24_inc_return(v) __atomic24_add(1, (v)) -#define atomic_sub_and_test(i, v) (__atomic_sub((i), (v)) == 0) -#define atomic_dec_and_test(v) (__atomic_sub(1, (v)) == 0) +#define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0) +#define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0) -#define atomic_inc(v) ((void)__atomic_add(1, (v))) -#define atomic_dec(v) ((void)__atomic_sub(1, (v))) +#define atomic24_inc(v) ((void)__atomic24_add(1, (v))) +#define atomic24_dec(v) ((void)__atomic24_sub(1, (v))) -#define atomic_add_negative(i, v) (__atomic_add((i), (v)) < 0) +#define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0) /* Atomic operations are already serializing */ #define smp_mb__before_atomic_dec() barrier() diff --git a/include/asm-sparc/dma-mapping.h b/include/asm-sparc/dma-mapping.h index 779cfef77d00..2dc5bb8effa6 100644 --- a/include/asm-sparc/dma-mapping.h +++ b/include/asm-sparc/dma-mapping.h @@ -1,5 +1,25 @@ +#ifndef _ASM_SPARC_DMA_MAPPING_H +#define _ASM_SPARC_DMA_MAPPING_H + #include <linux/config.h> #ifdef CONFIG_PCI #include <asm-generic/dma-mapping.h> -#endif +#else + +static inline void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, int flag) +{ + BUG(); + return NULL; +} + +static inline void dma_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ + BUG(); +} + +#endif /* PCI */ + +#endif /* _ASM_SPARC_DMA_MAPPING_H */ diff --git a/include/asm-sparc/processor.h b/include/asm-sparc/processor.h index b0c5a0d09204..0a9a4f5bc585 100644 --- a/include/asm-sparc/processor.h +++ b/include/asm-sparc/processor.h @@ -22,7 +22,6 @@ #include <asm/segment.h> #include <asm/btfixup.h> #include <asm/page.h> -#include <asm/atomic.h> /* * Bus types diff --git a/include/asm-sparc/semaphore.h b/include/asm-sparc/semaphore.h index 0e6122ae3058..b3b16d121ace 100644 --- a/include/asm-sparc/semaphore.h +++ b/include/asm-sparc/semaphore.h @@ -10,7 +10,7 @@ #include <linux/rwsem.h> struct semaphore { - atomic_t count; + atomic24_t count; int sleepers; wait_queue_head_t wait; #if WAITQUEUE_DEBUG @@ -40,7 +40,7 @@ struct semaphore { static inline void sema_init (struct semaphore *sem, int val) { - atomic_set(&sem->count, val); + atomic24_set(&sem->count, val); sem->sleepers = 0; init_waitqueue_head(&sem->wait); #if WAITQUEUE_DEBUG @@ -78,7 +78,7 @@ static inline void down(struct semaphore * sem) __asm__ __volatile__( "mov %%o7, %%g4\n\t" - "call ___atomic_sub\n\t" + "call ___atomic24_sub\n\t" " add %%o7, 8, %%o7\n\t" "tst %%g2\n\t" "bl 2f\n\t" @@ -115,7 +115,7 @@ static inline int down_interruptible(struct semaphore * sem) __asm__ __volatile__( "mov %%o7, %%g4\n\t" - "call ___atomic_sub\n\t" + "call ___atomic24_sub\n\t" " add %%o7, 8, %%o7\n\t" "tst %%g2\n\t" "bl 2f\n\t" @@ -154,7 +154,7 @@ static inline int down_trylock(struct semaphore * sem) __asm__ __volatile__( "mov %%o7, %%g4\n\t" - "call ___atomic_sub\n\t" + "call ___atomic24_sub\n\t" " add %%o7, 8, %%o7\n\t" "tst %%g2\n\t" "bl 2f\n\t" @@ -193,7 +193,7 @@ static inline void up(struct semaphore * sem) __asm__ __volatile__( "mov %%o7, %%g4\n\t" - "call ___atomic_add\n\t" + "call ___atomic24_add\n\t" " add %%o7, 8, %%o7\n\t" "tst %%g2\n\t" "ble 2f\n\t" diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h index b53cf2c6897e..a4c7d566e075 100644 --- a/include/asm-sparc/system.h +++ b/include/asm-sparc/system.h @@ -171,32 +171,11 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr, /* * Changing the IRQ level on the Sparc. */ -extern __inline__ void setipl(unsigned long __orig_psr) -{ - __asm__ __volatile__( - "wr %0, 0x0, %%psr\n\t" - "nop; nop; nop\n" - : /* no outputs */ - : "r" (__orig_psr) - : "memory", "cc"); -} +extern void local_irq_restore(unsigned long); +extern unsigned long __local_irq_save(void); +extern void local_irq_enable(void); -extern __inline__ void local_irq_enable(void) -{ - unsigned long tmp; - - __asm__ __volatile__( - "rd %%psr, %0\n\t" - "nop; nop; nop;\n\t" /* Sun4m + Cypress + SMP bug */ - "andn %0, %1, %0\n\t" - "wr %0, 0x0, %%psr\n\t" - "nop; nop; nop\n" - : "=r" (tmp) - : "i" (PSR_PIL) - : "memory"); -} - -extern __inline__ unsigned long getipl(void) +static inline unsigned long getipl(void) { unsigned long retval; @@ -204,76 +183,11 @@ extern __inline__ unsigned long getipl(void) return retval; } -#if 0 /* not used */ -extern __inline__ unsigned long swap_pil(unsigned long __new_psr) -{ - unsigned long retval; - - __asm__ __volatile__( - "rd %%psr, %0\n\t" - "nop; nop; nop;\n\t" /* Sun4m + Cypress + SMP bug */ - "and %0, %2, %%g1\n\t" - "and %1, %2, %%g2\n\t" - "xorcc %%g1, %%g2, %%g0\n\t" - "be 1f\n\t" - " nop\n\t" - "wr %0, %2, %%psr\n\t" - "nop; nop; nop;\n" - "1:\n" - : "=&r" (retval) - : "r" (__new_psr), "i" (PSR_PIL) - : "g1", "g2", "memory", "cc"); - - return retval; -} -#endif - -extern __inline__ unsigned long read_psr_and_cli(void) -{ - unsigned long retval; - - __asm__ __volatile__( - "rd %%psr, %0\n\t" - "nop; nop; nop;\n\t" /* Sun4m + Cypress + SMP bug */ - "or %0, %1, %%g1\n\t" - "wr %%g1, 0x0, %%psr\n\t" - "nop; nop; nop\n\t" - : "=r" (retval) - : "i" (PSR_PIL) - : "g1", "memory"); - - return retval; -} - #define local_save_flags(flags) ((flags) = getipl()) -#define local_irq_save(flags) ((flags) = read_psr_and_cli()) -#define local_irq_restore(flags) setipl((flags)) -#define local_irq_disable() ((void) read_psr_and_cli()) - +#define local_irq_save(flags) ((flags) = __local_irq_save()) +#define local_irq_disable() ((void) __local_irq_save()) #define irqs_disabled() ((getipl() & PSR_PIL) != 0) -#ifdef CONFIG_SMP - -extern unsigned char global_irq_holder; - -#define save_and_cli(flags) do { save_flags(flags); cli(); } while(0) - -extern void __global_cli(void); -extern void __global_sti(void); -extern unsigned long __global_save_flags(void); -extern void __global_restore_flags(unsigned long flags); -#define cli() __global_cli() -#define sti() __global_sti() -#define save_flags(flags) ((flags)=__global_save_flags()) -#define restore_flags(flags) __global_restore_flags(flags) - -#else - -#define cli() local_irq_disable() -#define sti() local_irq_enable() - -#endif - /* XXX Change this if we ever use a PSO mode kernel. */ #define mb() __asm__ __volatile__ ("" : : : "memory") #define rmb() mb() diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h index fb8d4f54d3d0..baf472fb50f2 100644 --- a/include/asm-x86_64/atomic.h +++ b/include/asm-x86_64/atomic.h @@ -29,8 +29,7 @@ typedef struct { volatile int counter; } atomic_t; * atomic_read - read atomic variable * @v: pointer of type atomic_t * - * Atomically reads the value of @v. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * Atomically reads the value of @v. */ #define atomic_read(v) ((v)->counter) @@ -39,8 +38,7 @@ typedef struct { volatile int counter; } atomic_t; * @v: pointer of type atomic_t * @i: required value * - * Atomically sets the value of @v to @i. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * Atomically sets the value of @v to @i. */ #define atomic_set(v,i) (((v)->counter) = (i)) @@ -49,8 +47,7 @@ typedef struct { volatile int counter; } atomic_t; * @i: integer value to add * @v: pointer of type atomic_t * - * Atomically adds @i to @v. Note that the guaranteed useful range - * of an atomic_t is only 24 bits. + * Atomically adds @i to @v. */ static __inline__ void atomic_add(int i, atomic_t *v) { @@ -65,8 +62,7 @@ static __inline__ void atomic_add(int i, atomic_t *v) * @i: integer value to subtract * @v: pointer of type atomic_t * - * Atomically subtracts @i from @v. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * Atomically subtracts @i from @v. */ static __inline__ void atomic_sub(int i, atomic_t *v) { @@ -83,8 +79,7 @@ static __inline__ void atomic_sub(int i, atomic_t *v) * * Atomically subtracts @i from @v and returns * true if the result is zero, or false for all - * other cases. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * other cases. */ static __inline__ int atomic_sub_and_test(int i, atomic_t *v) { @@ -101,8 +96,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v) * atomic_inc - increment atomic variable * @v: pointer of type atomic_t * - * Atomically increments @v by 1. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * Atomically increments @v by 1. */ static __inline__ void atomic_inc(atomic_t *v) { @@ -116,8 +110,7 @@ static __inline__ void atomic_inc(atomic_t *v) * atomic_dec - decrement atomic variable * @v: pointer of type atomic_t * - * Atomically decrements @v by 1. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * Atomically decrements @v by 1. */ static __inline__ void atomic_dec(atomic_t *v) { @@ -133,8 +126,7 @@ static __inline__ void atomic_dec(atomic_t *v) * * Atomically decrements @v by 1 and * returns true if the result is 0, or false for all other - * cases. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * cases. */ static __inline__ int atomic_dec_and_test(atomic_t *v) { @@ -153,8 +145,7 @@ static __inline__ int atomic_dec_and_test(atomic_t *v) * * Atomically increments @v by 1 * and returns true if the result is zero, or false for all - * other cases. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * other cases. */ static __inline__ int atomic_inc_and_test(atomic_t *v) { @@ -174,8 +165,7 @@ static __inline__ int atomic_inc_and_test(atomic_t *v) * * Atomically adds @i to @v and returns true * if the result is negative, or false when - * result is greater than or equal to zero. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * result is greater than or equal to zero. */ static __inline__ int atomic_add_negative(int i, atomic_t *v) { |
