summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@athlon.transmeta.com>2002-02-04 19:11:21 -0800
committerLinus Torvalds <torvalds@athlon.transmeta.com>2002-02-04 19:11:21 -0800
commit08eb400cbebc7717f546845df7b87ad6f8711cf5 (patch)
tree1ee43fee22b320eebc4f2fb2b62c1386428baff5 /include
parent9582480a60d0be5ac47e3da2777ec828278abf7f (diff)
v2.4.6.5 -> v2.4.6.6
- me: fix more buffer head SMP races (non-x86 only - weak memory ordering) - Andrea Arkangeli: some bh cleanups from the buffer race condition fix - Tim Waugh: parport drievr documentation, init sanity - Ion Badulescu: starfire net driver update - David Miller: sparc and networking updates - Ivan Kokshaysky: alpha version of the inlined rw-semaphores - NIIBE Yutaka: SuperH update
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/atomic.h5
-rw-r--r--include/asm-alpha/rwsem.h208
-rw-r--r--include/asm-i386/atomic.h6
-rw-r--r--include/asm-ia64/atomic.h6
-rw-r--r--include/asm-m68k/atomic.h6
-rw-r--r--include/asm-mips/atomic.h6
-rw-r--r--include/asm-mips64/atomic.h6
-rw-r--r--include/asm-ppc/atomic.h5
-rw-r--r--include/asm-s390/atomic.h5
-rw-r--r--include/asm-s390x/atomic.h6
-rw-r--r--include/asm-sparc/atomic.h6
-rw-r--r--include/asm-sparc/bitops.h10
-rw-r--r--include/asm-sparc64/atomic.h6
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/if_arp.h2
-rw-r--r--include/linux/netlink.h2
-rw-r--r--include/linux/parport.h1
-rw-r--r--include/linux/skbuff.h1
18 files changed, 282 insertions, 6 deletions
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h
index a509f6c74bd9..349cd5613f56 100644
--- a/include/asm-alpha/atomic.h
+++ b/include/asm-alpha/atomic.h
@@ -106,4 +106,9 @@ static __inline__ long atomic_sub_return(int i, atomic_t * v)
#define atomic_inc(v) atomic_add(1,(v))
#define atomic_dec(v) atomic_sub(1,(v))
+#define smp_mb__before_atomic_dec() smp_mb()
+#define smp_mb__after_atomic_dec() smp_mb()
+#define smp_mb__before_atomic_inc() smp_mb()
+#define smp_mb__after_atomic_inc() smp_mb()
+
#endif /* _ALPHA_ATOMIC_H */
diff --git a/include/asm-alpha/rwsem.h b/include/asm-alpha/rwsem.h
new file mode 100644
index 000000000000..681229989f98
--- /dev/null
+++ b/include/asm-alpha/rwsem.h
@@ -0,0 +1,208 @@
+#ifndef _ALPHA_RWSEM_H
+#define _ALPHA_RWSEM_H
+
+/*
+ * Written by Ivan Kokshaysky <ink@jurassic.park.msu.ru>, 2001.
+ * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
+ */
+
+#ifndef _LINUX_RWSEM_H
+#error please dont include asm/rwsem.h directly, use linux/rwsem.h instead
+#endif
+
+#ifdef __KERNEL__
+
+#include <asm/compiler.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+struct rwsem_waiter;
+
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
+
+/*
+ * the semaphore definition
+ */
+struct rw_semaphore {
+ long count;
+#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
+#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
+#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
+#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
+#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
+#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+#if RWSEM_DEBUG
+ int debug;
+#endif
+};
+
+#if RWSEM_DEBUG
+#define __RWSEM_DEBUG_INIT , 0
+#else
+#define __RWSEM_DEBUG_INIT /* */
+#endif
+
+#define __RWSEM_INITIALIZER(name) \
+ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
+ LIST_HEAD_INIT((name).wait_list) __RWSEM_DEBUG_INIT }
+
+#define DECLARE_RWSEM(name) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+static inline void init_rwsem(struct rw_semaphore *sem)
+{
+ sem->count = RWSEM_UNLOCKED_VALUE;
+ spin_lock_init(&sem->wait_lock);
+ INIT_LIST_HEAD(&sem->wait_list);
+#if RWSEM_DEBUG
+ sem->debug = 0;
+#endif
+}
+
+static inline void __down_read(struct rw_semaphore *sem)
+{
+ long oldcount;
+#ifndef CONFIG_SMP
+ oldcount = sem->count;
+ sem->count += RWSEM_ACTIVE_READ_BIAS;
+#else
+ long temp;
+ __asm__ __volatile__(
+ "1: ldq_l %0,%1\n"
+ " addq %0,%3,%2\n"
+ " stq_c %2,%1\n"
+ " beq %2,2f\n"
+ " mb\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
+ :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
+#endif
+ if (__builtin_expect(oldcount < 0, 0))
+ rwsem_down_read_failed(sem);
+}
+
+static inline void __down_write(struct rw_semaphore *sem)
+{
+ long oldcount;
+#ifndef CONFIG_SMP
+ oldcount = sem->count;
+ sem->count += RWSEM_ACTIVE_WRITE_BIAS;
+#else
+ long temp;
+ __asm__ __volatile__(
+ "1: ldq_l %0,%1\n"
+ " addq %0,%3,%2\n"
+ " stq_c %2,%1\n"
+ " beq %2,2f\n"
+ " mb\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
+ :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
+#endif
+ if (__builtin_expect(oldcount, 0))
+ rwsem_down_write_failed(sem);
+}
+
+static inline void __up_read(struct rw_semaphore *sem)
+{
+ long oldcount;
+#ifndef CONFIG_SMP
+ oldcount = sem->count;
+ sem->count -= RWSEM_ACTIVE_READ_BIAS;
+#else
+ long temp;
+ __asm__ __volatile__(
+ " mb\n"
+ "1: ldq_l %0,%1\n"
+ " subq %0,%3,%2\n"
+ " stq_c %2,%1\n"
+ " beq %2,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
+ :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
+#endif
+ if (__builtin_expect(oldcount < 0, 0))
+ if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0)
+ rwsem_wake(sem);
+}
+
+static inline void __up_write(struct rw_semaphore *sem)
+{
+ long count;
+#ifndef CONFIG_SMP
+ sem->count -= RWSEM_ACTIVE_WRITE_BIAS;
+ count = sem->count;
+#else
+ long temp;
+ __asm__ __volatile__(
+ " mb\n"
+ "1: ldq_l %0,%1\n"
+ " subq %0,%3,%2\n"
+ " stq_c %2,%1\n"
+ " beq %2,2f\n"
+ " subq %0,%3,%0\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ :"=&r" (count), "=m" (sem->count), "=&r" (temp)
+ :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
+#endif
+ if (__builtin_expect(count, 0))
+ if ((int)count == 0)
+ rwsem_wake(sem);
+}
+
+static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem)
+{
+#ifndef CONFIG_SMP
+ sem->count += val;
+#else
+ long temp;
+ __asm__ __volatile__(
+ "1: ldq_l %0,%1\n"
+ " addq %0,%2,%0\n"
+ " stq_c %0,%1\n"
+ " beq %0,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ :"=&r" (temp), "=m" (sem->count)
+ :"Ir" (val), "m" (sem->count));
+#endif
+}
+
+static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
+{
+#ifndef CONFIG_SMP
+ sem->count += val;
+ return sem->count;
+#else
+ long ret, temp;
+ __asm__ __volatile__(
+ "1: ldq_l %0,%1\n"
+ " addq %0,%3,%2\n"
+ " addq %0,%3,%0\n"
+ " stq_c %2,%1\n"
+ " beq %2,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ :"=&r" (ret), "=m" (sem->count), "=&r" (temp)
+ :"Ir" (val), "m" (sem->count));
+
+ return ret;
+#endif
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ALPHA_RWSEM_H */
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index 34ccfe914c36..b356b373944e 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -195,4 +195,10 @@ __asm__ __volatile__(LOCK "andl %0,%1" \
__asm__ __volatile__(LOCK "orl %0,%1" \
: : "r" (mask),"m" (*addr) : "memory")
+/* Atomic operations are already serializing on x86 */
+#define smp_mb__before_atomic_dec() barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc() barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
#endif
diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h
index 08fc25c5715f..d10f1f424b45 100644
--- a/include/asm-ia64/atomic.h
+++ b/include/asm-ia64/atomic.h
@@ -91,4 +91,10 @@ atomic_add_negative (int i, atomic_t *v)
#define atomic_inc(v) atomic_add(1, (v))
#define atomic_dec(v) atomic_sub(1, (v))
+/* Atomic operations are already serializing */
+#define smp_mb__before_atomic_dec() barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc() barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
#endif /* _ASM_IA64_ATOMIC_H */
diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h
index 4f92a2f5731a..d7ef37661179 100644
--- a/include/asm-m68k/atomic.h
+++ b/include/asm-m68k/atomic.h
@@ -49,4 +49,10 @@ static __inline__ int atomic_dec_and_test(volatile atomic_t *v)
#define atomic_set_mask(mask, v) \
__asm__ __volatile__("orl %1,%0" : "=m" (*v) : "id" (mask),"0"(*v))
+/* Atomic operations are already serializing */
+#define smp_mb__before_atomic_dec() barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc() barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
#endif /* __ARCH_M68K_ATOMIC __ */
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h
index 63667f0c81f1..b15c8b2eb297 100644
--- a/include/asm-mips/atomic.h
+++ b/include/asm-mips/atomic.h
@@ -272,6 +272,12 @@ extern __inline__ int atomic_sub_return(int i, atomic_t * v)
* Currently not implemented for MIPS.
*/
+/* Atomic operations are already serializing */
+#define smp_mb__before_atomic_dec() barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc() barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
#endif /* defined(__KERNEL__) */
#endif /* __ASM_ATOMIC_H */
diff --git a/include/asm-mips64/atomic.h b/include/asm-mips64/atomic.h
index a2804018d4af..9ed4c541926d 100644
--- a/include/asm-mips64/atomic.h
+++ b/include/asm-mips64/atomic.h
@@ -183,6 +183,12 @@ extern __inline__ int atomic_sub_return(int i, atomic_t * v)
* atomic_add_negative is currently not implemented for mips64.
*/
+/* Atomic operations are already serializing */
+#define smp_mb__before_atomic_dec() barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc() barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
#endif /* defined(__KERNEL__) */
#endif /* _ASM_ATOMIC_H */
diff --git a/include/asm-ppc/atomic.h b/include/asm-ppc/atomic.h
index 1cefd84efe41..a0fa988a5029 100644
--- a/include/asm-ppc/atomic.h
+++ b/include/asm-ppc/atomic.h
@@ -111,4 +111,9 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
return t;
}
+#define smp_mb__before_atomic_dec() smp_mb()
+#define smp_mb__after_atomic_dec() smp_mb()
+#define smp_mb__before_atomic_inc() smp_mb()
+#define smp_mb__after_atomic_inc() smp_mb()
+
#endif /* _ASM_PPC_ATOMIC_H_ */
diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h
index d2bf9f58c8b6..d99d1aef9d71 100644
--- a/include/asm-s390/atomic.h
+++ b/include/asm-s390/atomic.h
@@ -170,5 +170,10 @@ if (atomic_compare_and_swap ((from), (to), (where))) {\
atomic_set(where,(to));\
}
+#define smp_mb__before_atomic_dec() smp_mb()
+#define smp_mb__after_atomic_dec() smp_mb()
+#define smp_mb__before_atomic_inc() smp_mb()
+#define smp_mb__after_atomic_inc() smp_mb()
+
#endif /* __ARCH_S390_ATOMIC __ */
diff --git a/include/asm-s390x/atomic.h b/include/asm-s390x/atomic.h
index 0d2a9348b1fd..e97e7bbe3c0b 100644
--- a/include/asm-s390x/atomic.h
+++ b/include/asm-s390x/atomic.h
@@ -170,5 +170,9 @@ if (atomic_compare_and_swap ((from), (to), (where))) {\
atomic_set(where,(to));\
}
-#endif /* __ARCH_S390_ATOMIC __ */
+#define smp_mb__before_atomic_dec() smp_mb()
+#define smp_mb__after_atomic_dec() smp_mb()
+#define smp_mb__before_atomic_inc() smp_mb()
+#define smp_mb__after_atomic_inc() smp_mb()
+#endif /* __ARCH_S390_ATOMIC __ */
diff --git a/include/asm-sparc/atomic.h b/include/asm-sparc/atomic.h
index 6c8c16bf44bb..e28e478a020b 100644
--- a/include/asm-sparc/atomic.h
+++ b/include/asm-sparc/atomic.h
@@ -100,6 +100,12 @@ static __inline__ int __atomic_sub(int i, atomic_t *v)
#define atomic_add_negative(i, v) (__atomic_add((i), (v)) < 0)
+/* Atomic operations are already serializing */
+#define smp_mb__before_atomic_dec() barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc() barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
#endif /* !(__KERNEL__) */
#endif /* !(__ARCH_SPARC_ATOMIC__) */
diff --git a/include/asm-sparc/bitops.h b/include/asm-sparc/bitops.h
index b1085349015a..b3e159e77d8e 100644
--- a/include/asm-sparc/bitops.h
+++ b/include/asm-sparc/bitops.h
@@ -1,4 +1,4 @@
-/* $Id: bitops.h,v 1.61 2000/09/23 02:11:22 davem Exp $
+/* $Id: bitops.h,v 1.62 2001/07/07 10:58:22 davem Exp $
* bitops.h: Bit string operations on the Sparc.
*
* Copyright 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -131,7 +131,9 @@ extern __inline__ void clear_bit(unsigned long nr, volatile void *addr)
(void) test_and_clear_bit(nr, addr);
}
-extern __inline__ int test_and_change_bit(unsigned long nr, volatile void *addr)
+#define test_and_change_bit(n, a) __test_and_change_bit(n, a)
+
+extern __inline__ int __test_and_change_bit(unsigned long nr, volatile void *addr)
{
register unsigned long mask asm("g2");
register unsigned long *ADDR asm("g1");
@@ -149,7 +151,9 @@ extern __inline__ int test_and_change_bit(unsigned long nr, volatile void *addr)
return mask != 0;
}
-extern __inline__ void change_bit(unsigned long nr, volatile void *addr)
+#define change_bit(n, a) __change_bit(n, a)
+
+extern __inline__ void __change_bit(unsigned long nr, volatile void *addr)
{
(void) test_and_change_bit(nr, addr);
}
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h
index 1a6cc2bfc143..76cdae611544 100644
--- a/include/asm-sparc64/atomic.h
+++ b/include/asm-sparc64/atomic.h
@@ -29,4 +29,10 @@ extern int __atomic_sub(int, atomic_t *);
#define atomic_inc(v) ((void)__atomic_add(1, v))
#define atomic_dec(v) ((void)__atomic_sub(1, v))
+/* Atomic operations are already serializing */
+#define smp_mb__before_atomic_dec() barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc() barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
#endif /* !(__ARCH_SPARC64_ATOMIC__) */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index f6866ccf6abb..6f6ef817b059 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1058,6 +1058,7 @@ extern int fs_may_remount_ro(struct super_block *);
extern int try_to_free_buffers(struct page *, unsigned int);
extern void refile_buffer(struct buffer_head * buf);
+extern void end_buffer_io_sync(struct buffer_head *bh, int uptodate);
/* reiserfs_writepage needs this */
extern void set_buffer_async_io(struct buffer_head *bh) ;
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index 9843b91077d2..e8dc980b0ea0 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -35,10 +35,10 @@
#define ARPHRD_IEEE802 6 /* IEEE 802.2 Ethernet/TR/TB */
#define ARPHRD_ARCNET 7 /* ARCnet */
#define ARPHRD_APPLETLK 8 /* APPLEtalk */
-#define ARPHRD_IEEE1394 9 /* IEEE 1394 IPv4 - RFC 2734 */
#define ARPHRD_DLCI 15 /* Frame Relay DLCI */
#define ARPHRD_ATM 19 /* ATM */
#define ARPHRD_METRICOM 23 /* Metricom STRIP (new IANA id) */
+#define ARPHRD_IEEE1394 24 /* IEEE 1394 IPv4 - RFC 2734 */
/* Dummy types for non ARP hardware */
#define ARPHRD_SLIP 256
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 2afb52e98531..ce7f93b9e9d0 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -34,7 +34,7 @@ struct nlmsghdr
#define NLM_F_REQUEST 1 /* It is request message. */
#define NLM_F_MULTI 2 /* Multipart message, terminated by NLMSG_DONE */
-#define NLM_F_ACK 4 /* If succeed, reply with ack */
+#define NLM_F_ACK 4 /* Reply with ack, with zero or error code */
#define NLM_F_ECHO 8 /* Echo this request */
/* Modifiers to GET request */
diff --git a/include/linux/parport.h b/include/linux/parport.h
index 40732e005e85..8d148b6db6e9 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -22,6 +22,7 @@
#define PARPORT_DMA_NOFIFO -3
#define PARPORT_DISABLE -2
#define PARPORT_IRQ_PROBEONLY -3
+#define PARPORT_IOHI_AUTO -1
#define PARPORT_CONTROL_STROBE 0x1
#define PARPORT_CONTROL_AUTOFD 0x2
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 44fc5e05e267..09eee390b5bc 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1124,6 +1124,7 @@ extern void skb_free_datagram(struct sock * sk, struct sk_buff *skb);
extern unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len, unsigned int csum);
extern int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
extern unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int csum);
+extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
extern void skb_init(void);
extern void skb_add_mtu(int mtu);